diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c index 7172f8ead309..18a82ca72d76 100644 --- a/sys/cam/ctl/ctl.c +++ b/sys/cam/ctl/ctl.c @@ -1,13610 +1,13602 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * Copyright (c) 2017 Jakub Wojciech Klama * Copyright (c) 2018 Marcelo Araujo * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_PER, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_da_verify_recovery_page verify_er_page_default = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/SMS_VER_PER, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) const static struct scsi_control_ext_page control_ext_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_control_ext_page control_ext_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0xff }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC, /*mrie*/SIEP_MRIE_NO, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 1} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | SIEP_FLAGS_LOGERR, /*mrie*/0x0f, /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, /*report_count*/{0xff, 0xff, 0xff, 0xff} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/SLBPP_SITUA, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct scsi_cddvd_capabilities_page cddvd_page_default = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0x3f, /*caps2*/0x00, /*caps3*/0xf0, /*caps4*/0x00, /*caps5*/0x29, /*caps6*/0x00, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{8, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0, /*caps2*/0, /*caps3*/0, /*caps4*/0, /*caps5*/0, /*caps6*/0, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{0, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); static int worker_threads = -1; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); static int ctl_lun_map_size = 1024; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); #ifdef CTL_TIME_IO static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, &ctl_time_io_secs, 0, "Log requests taking more seconds"); #endif /* * Maximum number of LUNs we support. MUST be a power of 2. */ #define CTL_DEFAULT_MAX_LUNS 1024 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); /* * Maximum number of ports registered at one time. */ #define CTL_DEFAULT_MAX_PORTS 256 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); /* * Maximum number of initiators we support. */ #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), * Block limits (0xB0), Block Device Characteristics (0xB1) and * Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); static int ctl_init(void); static int ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_enable_lun(struct ctl_lun *lun); static int ctl_disable_lun(struct ctl_lun *lun); static int ctl_free_lun(struct ctl_lun *lun); static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io); static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip); static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, bool skip); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); static void ctl_failover_lun(union ctl_io *io); static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_target_reset(union ctl_io *io); static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type); static int ctl_lun_reset(union ctl_io *io); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_query_task(union ctl_io *io, int task_set); static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type); static int ctl_i_t_nexus_reset(union ctl_io *io); static int ctl_query_async_event(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); -static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); +static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr); static void ctl_datamove_remote_write(union ctl_io *io); -static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); +static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static void ctl_process_done(union ctl_io *io); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); static int ctl_ha_init(void); static int ctl_ha_shutdown(void); static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ha_frontend = { .name = "ha", .init = ctl_ha_init, .shutdown = ctl_ha_shutdown, }; static int ctl_ha_init(void) { struct ctl_softc *softc = control_softc; if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &softc->othersc_pool) != 0) return (ENOMEM); if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { ctl_pool_free(softc->othersc_pool); return (EIO); } if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { ctl_ha_msg_destroy(softc); ctl_pool_free(softc->othersc_pool); return (EIO); } return (0); }; static int ctl_ha_shutdown(void) { struct ctl_softc *softc = control_softc; struct ctl_port *port; ctl_ha_msg_shutdown(softc); if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) return (EIO); if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) return (EIO); ctl_pool_free(softc->othersc_pool); while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { ctl_port_deregister(port); free(port->port_name, M_CTL); free(port, M_CTL); } return (0); }; static void ctl_ha_datamove(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_sg_entry *sgl; union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy, i, j; memset(&msg.dt, 0, sizeof(msg.dt)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[0].addr = (void *)vtophys(io->scsiio.kern_data_ptr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; #endif msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; do_sg_copy = 1; } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before processing. */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[j].addr = sgl[i].addr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[j].addr = (void *)vtophys(sgl[i].addr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[j].addr = sgl[i].addr; #endif msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.dt) - sizeof(msg.dt.sg_list) + sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, M_WAITOK) > CTL_HA_STATUS_SUCCESS) { io->io_hdr.port_status = 31341; - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); return; } msg.dt.sent_sg_entries = sg_entries_sent; } /* * Officially handover the request from us to peer. * If failover has just happened, then we must return error. * If failover happen just after, then it is not our problem. */ if (lun) mtx_lock(&lun->lun_lock); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { if (lun) mtx_unlock(&lun->lun_lock); io->io_hdr.port_status = 31342; - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); return; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; if (lun) mtx_unlock(&lun->lun_lock); } static void ctl_ha_done(union ctl_io *io) { union ctl_ha_msg msg; if (io->io_hdr.io_type == CTL_IO_SCSI) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); } ctl_free_io(io); } static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, msg_info->scsi.sense_len); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } void ctl_isc_announce_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg *msg; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&lun->lun_lock); i = sizeof(msg->lun); if (lun->lun_devid) i += lun->lun_devid->len; i += sizeof(pr_key) * lun->pr_key_count; alloc: mtx_unlock(&lun->lun_lock); msg = malloc(i, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); k = sizeof(msg->lun); if (lun->lun_devid) k += lun->lun_devid->len; k += sizeof(pr_key) * lun->pr_key_count; if (i < k) { free(msg, M_CTL); i = k; goto alloc; } bzero(&msg->lun, sizeof(msg->lun)); msg->hdr.msg_type = CTL_MSG_LUN_SYNC; msg->hdr.nexus.targ_lun = lun->lun; msg->hdr.nexus.targ_mapped_lun = lun->lun; msg->lun.flags = lun->flags; msg->lun.pr_generation = lun->pr_generation; msg->lun.pr_res_idx = lun->pr_res_idx; msg->lun.pr_res_type = lun->pr_res_type; msg->lun.pr_key_count = lun->pr_key_count; i = 0; if (lun->lun_devid) { msg->lun.lun_devid_len = lun->lun_devid->len; memcpy(&msg->lun.data[i], lun->lun_devid->data, msg->lun.lun_devid_len); i += msg->lun.lun_devid_len; } for (k = 0; k < CTL_MAX_INITIATORS; k++) { if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) continue; pr_key.pr_iid = k; memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); i += sizeof(pr_key); } mtx_unlock(&lun->lun_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); if (lun->flags & CTL_LUN_PRIMARY_SC) { for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } } } void ctl_isc_announce_port(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; i = sizeof(msg->port) + strlen(port->port_name) + 1; if (port->lun_map) i += port->lun_map_size * sizeof(uint32_t); if (port->port_devid) i += port->port_devid->len; if (port->target_devid) i += port->target_devid->len; if (port->init_devid) i += port->init_devid->len; msg = malloc(i, M_CTL, M_WAITOK); bzero(&msg->port, sizeof(msg->port)); msg->hdr.msg_type = CTL_MSG_PORT_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->port.port_type = port->port_type; msg->port.physical_port = port->physical_port; msg->port.virtual_port = port->virtual_port; msg->port.status = port->status; i = 0; msg->port.name_len = sprintf(&msg->port.data[i], "%d:%s", softc->ha_id, port->port_name) + 1; i += msg->port.name_len; if (port->lun_map) { msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); memcpy(&msg->port.data[i], port->lun_map, msg->port.lun_map_len); i += msg->port.lun_map_len; } if (port->port_devid) { msg->port.port_devid_len = port->port_devid->len; memcpy(&msg->port.data[i], port->port_devid->data, msg->port.port_devid_len); i += msg->port.port_devid_len; } if (port->target_devid) { msg->port.target_devid_len = port->target_devid->len; memcpy(&msg->port.data[i], port->target_devid->data, msg->port.target_devid_len); i += msg->port.target_devid_len; } if (port->init_devid) { msg->port.init_devid_len = port->init_devid->len; memcpy(&msg->port.data[i], port->init_devid->data, msg->port.init_devid_len); i += msg->port.init_devid_len; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); } void ctl_isc_announce_iid(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i, l; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&softc->ctl_lock); i = sizeof(msg->iid); l = 0; if (port->wwpn_iid[iid].name) l = strlen(port->wwpn_iid[iid].name) + 1; i += l; msg = malloc(i, M_CTL, M_NOWAIT); if (msg == NULL) { mtx_unlock(&softc->ctl_lock); return; } bzero(&msg->iid, sizeof(msg->iid)); msg->hdr.msg_type = CTL_MSG_IID_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->hdr.nexus.initid = iid; msg->iid.in_use = port->wwpn_iid[iid].in_use; msg->iid.name_len = l; msg->iid.wwpn = port->wwpn_iid[iid].wwpn; if (port->wwpn_iid[iid].name) strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); mtx_unlock(&softc->ctl_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); free(msg, M_CTL); } void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg msg; u_int i; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == page && lun->mode_pages.index[i].subpage == subpage) break; } if (i == CTL_NUM_MODE_PAGES) return; /* Don't try to replicate pages not present on this device. */ if (lun->mode_pages.index[i].page_data == NULL) return; bzero(&msg.mode, sizeof(msg.mode)); msg.hdr.msg_type = CTL_MSG_MODE_SYNC; msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.mode.page_code = page; msg.mode.subpage = subpage; msg.mode.page_len = lun->mode_pages.index[i].page_len; memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, msg.mode.page_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), M_WAITOK); } static void ctl_isc_ha_link_up(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_ha_msg msg; int i; /* Announce this node parameters to peer for validation. */ msg.login.msg_type = CTL_MSG_LOGIN; msg.login.version = CTL_HA_VERSION; msg.login.ha_mode = softc->ha_mode; msg.login.ha_id = softc->ha_id; msg.login.max_luns = ctl_max_luns; msg.login.max_ports = ctl_max_ports; msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), M_WAITOK); STAILQ_FOREACH(port, &softc->port_list, links) { ctl_isc_announce_port(port); for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use) ctl_isc_announce_iid(port, i); } } STAILQ_FOREACH(lun, &softc->lun_list, links) ctl_isc_announce_lun(lun); } static void ctl_isc_ha_link_down(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_io *io; int i; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); io = ctl_alloc_io(softc->othersc_pool); mtx_lock(&softc->ctl_lock); ctl_zero_io(io); io->io_hdr.msg_type = CTL_MSG_FAILOVER; io->io_hdr.nexus.targ_mapped_lun = lun->lun; ctl_enqueue_isc(io); } STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port >= softc->port_min && port->targ_port < softc->port_max) continue; port->status &= ~CTL_PORT_STATUS_ONLINE; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { port->wwpn_iid[i].in_use = 0; free(port->wwpn_iid[i].name, M_CTL); port->wwpn_iid[i].name = NULL; } } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); mtx_lock(&softc->ctl_lock); if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); if (msg->ua.ua_all) { if (msg->ua.ua_set) ctl_est_ua_all(lun, iid, msg->ua.ua_type); else ctl_clr_ua_all(lun, iid, msg->ua.ua_type); } else { if (msg->ua.ua_set) ctl_est_ua(lun, iid, msg->ua.ua_type); else ctl_clr_ua(lun, iid, msg->ua.ua_type); } mtx_unlock(&lun->lun_lock); } static void ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; ctl_lun_flags oflags; uint32_t targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; if (msg->lun.lun_devid_len != i || (i > 0 && memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { mtx_unlock(&lun->lun_lock); printf("%s: Received conflicting HA LUN %d\n", __func__, targ_lun); return; } else { /* Record whether peer is primary. */ oflags = lun->flags; if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_DISABLED) == 0) lun->flags |= CTL_LUN_PEER_SC_PRIMARY; else lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; if (oflags != lun->flags) ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); /* If peer is primary and we are not -- use data */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { lun->pr_generation = msg->lun.pr_generation; lun->pr_res_idx = msg->lun.pr_res_idx; lun->pr_res_type = msg->lun.pr_res_type; lun->pr_key_count = msg->lun.pr_key_count; for (k = 0; k < CTL_MAX_INITIATORS; k++) ctl_clr_prkey(lun, k); for (k = 0; k < msg->lun.pr_key_count; k++) { memcpy(&pr_key, &msg->lun.data[i], sizeof(pr_key)); ctl_alloc_prkey(lun, pr_key.pr_iid); ctl_set_prkey(lun, pr_key.pr_iid, pr_key.pr_key); i += sizeof(pr_key); } } mtx_unlock(&lun->lun_lock); CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", __func__, targ_lun, (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? "primary" : "secondary")); /* If we are primary but peer doesn't know -- notify */ if ((lun->flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) ctl_isc_announce_lun(lun); } } static void ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; struct ctl_lun *lun; int i, new; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 1; port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); port->frontend = &ha_frontend; port->targ_port = msg->hdr.nexus.targ_port; port->fe_datamove = ctl_ha_datamove; port->fe_done = ctl_ha_done; } else if (port->frontend == &ha_frontend) { CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 0; } else { printf("%s: Received conflicting HA port %d\n", __func__, msg->hdr.nexus.targ_port); return; } port->port_type = msg->port.port_type; port->physical_port = msg->port.physical_port; port->virtual_port = msg->port.virtual_port; port->status = msg->port.status; i = 0; free(port->port_name, M_CTL); port->port_name = strndup(&msg->port.data[i], msg->port.name_len, M_CTL); i += msg->port.name_len; if (msg->port.lun_map_len != 0) { if (port->lun_map == NULL || port->lun_map_size * sizeof(uint32_t) < msg->port.lun_map_len) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(msg->port.lun_map_len, M_CTL, M_WAITOK); } memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); i += msg->port.lun_map_len; } else { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; } if (msg->port.port_devid_len != 0) { if (port->port_devid == NULL || port->port_devid->len < msg->port.port_devid_len) { free(port->port_devid, M_CTL); port->port_devid = malloc(sizeof(struct ctl_devid) + msg->port.port_devid_len, M_CTL, M_WAITOK); } memcpy(port->port_devid->data, &msg->port.data[i], msg->port.port_devid_len); port->port_devid->len = msg->port.port_devid_len; i += msg->port.port_devid_len; } else { free(port->port_devid, M_CTL); port->port_devid = NULL; } if (msg->port.target_devid_len != 0) { if (port->target_devid == NULL || port->target_devid->len < msg->port.target_devid_len) { free(port->target_devid, M_CTL); port->target_devid = malloc(sizeof(struct ctl_devid) + msg->port.target_devid_len, M_CTL, M_WAITOK); } memcpy(port->target_devid->data, &msg->port.data[i], msg->port.target_devid_len); port->target_devid->len = msg->port.target_devid_len; i += msg->port.target_devid_len; } else { free(port->target_devid, M_CTL); port->target_devid = NULL; } if (msg->port.init_devid_len != 0) { if (port->init_devid == NULL || port->init_devid->len < msg->port.init_devid_len) { free(port->init_devid, M_CTL); port->init_devid = malloc(sizeof(struct ctl_devid) + msg->port.init_devid_len, M_CTL, M_WAITOK); } memcpy(port->init_devid->data, &msg->port.data[i], msg->port.init_devid_len); port->init_devid->len = msg->port.init_devid_len; i += msg->port.init_devid_len; } else { free(port->init_devid, M_CTL); port->init_devid = NULL; } if (new) { if (ctl_port_register(port) != 0) { printf("%s: ctl_port_register() failed with error\n", __func__); } } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; int iid; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { printf("%s: Received IID for unknown port %d\n", __func__, msg->hdr.nexus.targ_port); return; } iid = msg->hdr.nexus.initid; if (port->wwpn_iid[iid].in_use != 0 && msg->iid.in_use == 0) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); port->wwpn_iid[iid].in_use = msg->iid.in_use; port->wwpn_iid[iid].wwpn = msg->iid.wwpn; free(port->wwpn_iid[iid].name, M_CTL); if (msg->iid.name_len) { port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], msg->iid.name_len, M_CTL); } else port->wwpn_iid[iid].name = NULL; } static void ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { if (msg->login.version != CTL_HA_VERSION) { printf("CTL HA peers have different versions %d != %d\n", msg->login.version, CTL_HA_VERSION); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_mode != softc->ha_mode) { printf("CTL HA peers have different ha_mode %d != %d\n", msg->login.ha_mode, softc->ha_mode); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_id == softc->ha_id) { printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.max_luns != ctl_max_luns || msg->login.max_ports != ctl_max_ports || msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { printf("CTL HA peers have different limits\n"); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } } static void ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; u_int i; uint32_t initidx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == msg->mode.page_code && lun->mode_pages.index[i].subpage == msg->mode.subpage) break; } if (i == CTL_NUM_MODE_PAGES) { mtx_unlock(&lun->lun_lock); return; } memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, lun->mode_pages.index[i].page_len); initidx = ctl_get_initindex(&msg->hdr.nexus); if (initidx != -1) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc = control_softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg *msg, msgbuf; if (param > sizeof(msgbuf)) msg = malloc(param, M_CTL, M_WAITOK); else msg = &msgbuf; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, M_WAITOK); if (isc_status != CTL_HA_STATUS_SUCCESS) { printf("%s: Error receiving message: %d\n", __func__, isc_status); if (msg != &msgbuf) free(msg, M_CTL); return; } CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); switch (msg->hdr.msg_type) { case CTL_MSG_SERIALIZE: io = ctl_alloc_io(softc->othersc_pool); ctl_zero_io(io); // populate ctsio from msg io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.remote_io = msg->hdr.original_sc; io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode != CTL_HA_MODE_XFER) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg->hdr.nexus; io->scsiio.priority = msg->scsi.priority; io->scsiio.tag_num = msg->scsi.tag_num; io->scsiio.tag_type = msg->scsi.tag_type; #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ io->scsiio.cdb_len = msg->scsi.cdb_len; memcpy(io->scsiio.cdb, msg->scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.remote_io = msg->hdr.serializing_sc; if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.status = msg->hdr.status; if (msg->dt.sg_sequence == 0) { #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif i = msg->dt.kern_sg_entries + msg->dt.kern_data_len / CTL_HA_DATAMOVE_SEGMENT + 1; sgl = malloc(sizeof(*sgl) * i, M_CTL, M_WAITOK | M_ZERO); CTL_RSGL(io) = sgl; CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg->dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg->dt.kern_sg_entries; io->scsiio.kern_data_len = msg->dt.kern_data_len; io->scsiio.kern_total_len = msg->dt.kern_total_len; io->scsiio.kern_data_resid = msg->dt.kern_data_resid; io->scsiio.kern_rel_offset = msg->dt.kern_rel_offset; io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; io->io_hdr.flags |= msg->dt.flags & CTL_FLAG_BUS_ADDR; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg->dt.sent_sg_entries, j = 0; i < (msg->dt.sent_sg_entries + msg->dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg->dt.sg_list[j].addr; sgl[i].len = msg->dt.sg_list[j].len; } /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg->dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg->hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.port_status = msg->scsi.port_status; io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; if (msg->hdr.status != CTL_STATUS_NONE) { io->io_hdr.status = msg->hdr.status; io->scsiio.scsi_status = msg->scsi.scsi_status; io->scsiio.sense_len = msg->scsi.sense_len; memcpy(&io->scsiio.sense_data, &msg->scsi.sense_data, msg->scsi.sense_len); if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); break; } io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.remote_io = msg->hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, msg); else ctl_isc_handler_finish_ser_only(softc, msg); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(msg, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg->hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg->hdr.nexus; taskio->task_action = msg->task.task_action; taskio->tag_num = msg->task.tag_num; taskio->tag_type = msg->task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbinuptime(&taskio->io_hdr.start_bt); #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; presio->io_hdr.nexus = msg->hdr.nexus; presio->pr_msg = msg->pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_UA: ctl_isc_ua(softc, msg, param); break; case CTL_MSG_PORT_SYNC: ctl_isc_port_sync(softc, msg, param); break; case CTL_MSG_LUN_SYNC: ctl_isc_lun_sync(softc, msg, param); break; case CTL_MSG_IID_SYNC: ctl_isc_iid_sync(softc, msg, param); break; case CTL_MSG_LOGIN: ctl_isc_login(softc, msg, param); break; case CTL_MSG_MODE_SYNC: ctl_isc_mode_sync(softc, msg, param); break; default: printf("Received HA message of unknown type %d\n", msg->hdr.msg_type); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); break; } if (msg != &msgbuf) free(msg, M_CTL); } else if (event == CTL_HA_EVT_LINK_CHANGE) { printf("CTL: HA link status changed from %d to %d\n", softc->ha_link, param); if (param == softc->ha_link) return; if (softc->ha_link == CTL_HA_LINK_ONLINE) { softc->ha_link = param; ctl_isc_ha_link_down(softc); } else { softc->ha_link = param; if (softc->ha_link == CTL_HA_LINK_ONLINE) ctl_isc_ha_link_up(softc); } return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, src->scsi.sense_len); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) { memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, src->scsiio.sense_len); dest->scsi.scsi_status = src->scsiio.scsi_status; dest->scsi.sense_len = src->scsiio.sense_len; dest->hdr.status = src->io_hdr.status; } void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) { int i; mtx_assert(&lun->lun_lock, MA_OWNED); if (lun->pending_ua[port] == NULL) return; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port * CTL_MAX_INIT_PER_PORT + i == except) continue; lun->pending_ua[port][i] |= ua; } } void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) ctl_est_ua_port(lun, i, except, ua); } void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } } static int ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; struct ctl_lun_req ireq; int error, value; value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_unlock(&softc->ctl_lock); bzero(&ireq, sizeof(ireq)); ireq.reqtype = CTL_LUNREQ_MODIFY; ireq.reqdata.modify.lun_id = lun->lun; lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, curthread); if (ireq.status != CTL_LUN_OK) { printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", __func__, ireq.status, ireq.error_str); } mtx_lock(&softc->ctl_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { struct make_dev_args args; struct ctl_softc *softc; int i, error; softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); make_dev_args_init(&args); args.mda_devsw = &ctl_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = softc; args.mda_si_drv2 = NULL; error = make_dev_s(&args, &softc->dev, "cam/ctl"); if (error != 0) { free(softc, M_DEVBUF); control_softc = NULL; return (error); } sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->flags = 0; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", ctl_max_luns, CTL_DEFAULT_MAX_LUNS); ctl_max_luns = CTL_DEFAULT_MAX_LUNS; } softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_lun_mask = malloc(sizeof(uint32_t) * ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", ctl_max_ports, CTL_DEFAULT_MAX_PORTS); ctl_max_ports = CTL_DEFAULT_MAX_PORTS; } softc->ctl_port_mask = malloc(sizeof(uint32_t) * ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_cnt = ctl_max_ports; softc->port_min = 0; } else { softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; softc->port_min = (softc->ha_id - 1) * softc->port_cnt; } softc->port_max = softc->port_min + softc->port_cnt; softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, "HA link state (0 - offline, 1 - unknown, 2 - online)"); STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); return (error); } } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); return (error); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); if (softc->is_single == 0) { if (ctl_frontend_register(&ha_frontend) != 0) softc->is_single = 1; } return (0); } static int ctl_shutdown(void) { struct ctl_softc *softc = control_softc; int i; if (softc->is_single == 0) ctl_frontend_deregister(&ha_frontend); destroy_dev(softc->dev); /* Shutdown CTL threads. */ softc->shutdown = 1; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; while (thr->thread != NULL) { wakeup(thr); if (thr->thread != NULL) pause("CTL thr shutdown", 1); } mtx_destroy(&thr->queue_lock); } while (softc->thresh_thread != NULL) { wakeup(softc->thresh_thread); if (softc->thresh_thread != NULL) pause("CTL thr shutdown", 1); } ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); free(softc->ctl_luns, M_DEVBUF); free(softc->ctl_lun_mask, M_DEVBUF); free(softc->ctl_port_mask, M_DEVBUF); free(softc->ctl_ports, M_DEVBUF); sysctl_ctx_free(&softc->sysctl_ctx); free(softc, M_DEVBUF); control_softc = NULL; return (0); } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (ctl_shutdown()); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; int last; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); last = (--port->wwpn_iid[iid].in_use == 0); port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); if (last) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); ctl_isc_announce_iid(port, iid); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = port->ctl_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); union ctl_ha_msg msg_info; struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t targ_lun; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; /* Make sure that we know about this port. */ if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 1); goto badjuju; } /* Make sure that we know about this LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); /* * The other node would not send this request to us unless * received announce that we are primary node for this LUN. * If this LUN does not exist now, it is probably result of * a race, so respond to initiator in the most opaque way. */ ctl_set_busy(ctsio); goto badjuju; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/Os completed. */ if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); ctl_set_busy(ctsio); goto badjuju; } entry = ctl_get_cmd_entry(ctsio, NULL); if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); goto badjuju; } CTL_LUN(ctsio) = lun; CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); mtx_unlock(&lun->lun_lock); } else { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_WAITOK); } break; case CTL_ACTION_OVERLAP: LIST_REMOVE(&ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); goto badjuju; case CTL_ACTION_OVERLAP_TAG: LIST_REMOVE(&ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num); goto badjuju; case CTL_ACTION_ERROR: default: LIST_REMOVE(&ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); badjuju: ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io((union ctl_io *)ctsio); break; } } /* * Returns 0 for success, errno for failure. */ static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { struct ctl_io_hdr *ioh; mtx_lock(&lun->lun_lock); ioh = LIST_FIRST(&lun->ooa_queue); if (ioh == NULL) { mtx_unlock(&lun->lun_lock); return; } while (LIST_NEXT(ioh, ooa_links) != NULL) ioh = LIST_NEXT(ioh, ooa_links); for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { union ctl_io *io = (union ctl_io *)ioh; struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) { (*cur_fill_num)++; continue; } entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.blocker != NULL) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; (*cur_fill_num)++; } mtx_unlock(&lun->lun_lock); } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc = dev->si_drv1; struct ctl_port *port; struct ctl_lun *lun; int retval; retval = 0; switch (cmd) { case CTL_IO: retval = ctl_ioctl_io(dev, cmd, addr, flag, td); break; case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max) continue; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action == 0) continue; /* * XXX KDM we have to drop the lock here, because * the online/offline operations can potentially * block. We need to reference count the frontends * so they can't go away, */ if (cmd == CTL_ENABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_online(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_DISABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_offline(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_SET_PORT_WWNS) { ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && (ooa_hdr->lun_num >= ctl_max_luns || softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbinuptime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if (delay_info->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); if (err_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); if (delete_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) != 0) { mtx_unlock(&lun->lun_lock); continue; } for (j = 0; j < ctl_max_ports; j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %ju port %d iid %d key " "%#jx\n", lun->lun, j, k, (uintmax_t)lun->pr_keys[j][k]); } } mtx_unlock(&lun->lun_lock); } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; lun_req = (struct ctl_lun_req *)addr; tmp_args_nvl = lun_req->args_nvl; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->args != NULL) { packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { free(packed, M_CTL); lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyin args."); break; } lun_req->args_nvl = nvlist_unpack(packed, lun_req->args_len, 0); free(packed, M_CTL); if (lun_req->args_nvl == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot unpack args nvlist."); break; } } else lun_req->args_nvl = nvlist_create(0); retval = backend->ioctl(dev, cmd, addr, flag, td); nvlist_destroy(lun_req->args_nvl); lun_req->args_nvl = tmp_args_nvl; if (lun_req->result_nvl != NULL) { if (lun_req->result != NULL) { packed = nvlist_pack(lun_req->result_nvl, &packed_len); if (packed == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > lun_req->result_len) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, lun_req->result, packed_len)) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } lun_req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(lun_req->result_nvl); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun_list *list; const char *name, *value; void *cookie; int type; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(lun->be_lun->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string( lun->be_lun->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; req = (struct ctl_req *)addr; tmp_args_nvl = req->args_nvl; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->args != NULL) { packed = malloc(req->args_len, M_CTL, M_WAITOK); if (copyin(req->args, packed, req->args_len) != 0) { free(packed, M_CTL); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyin args."); break; } req->args_nvl = nvlist_unpack(packed, req->args_len, 0); free(packed, M_CTL); if (req->args_nvl == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot unpack args nvlist."); break; } } else req->args_nvl = nvlist_create(0); if (fe->ioctl) retval = fe->ioctl(dev, cmd, addr, flag, td); else retval = ENODEV; nvlist_destroy(req->args_nvl); req->args_nvl = tmp_args_nvl; if (req->result_nvl != NULL) { if (req->result != NULL) { packed = nvlist_pack(req->result_nvl, &packed_len); if (packed == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > req->result_len) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, req->result, packed_len)) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(req->result_nvl); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; const char *name, *value; void *cookie; int j, type; uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(port->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string(port->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } if (port->lun_map != NULL) { sbuf_printf(sb, "\ton\n"); for (j = 0; j < port->lun_map_size; j++) { plun = ctl_lun_map_from_port(port, j); if (plun == UINT32_MAX) continue; sbuf_printf(sb, "\t%u\n", j, plun); } } for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_LUN_MAP: { struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; struct ctl_port *port; mtx_lock(&softc->ctl_lock); if (lm->port < softc->port_min || lm->port >= softc->port_max || (port = softc->ctl_ports[lm->port]) == NULL) { mtx_unlock(&softc->ctl_lock); return (ENXIO); } if (port->status & CTL_PORT_STATUS_ONLINE) { STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_port(lun, lm->port, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps if (lm->plun != UINT32_MAX) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_unset(port, lm->plun); else if (lm->lun < ctl_max_luns && softc->ctl_luns[lm->lun] != NULL) retval = ctl_lun_map_set(port, lm->plun, lm->lun); else return (ENXIO); } else { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_deinit(port); else retval = ctl_lun_map_init(port); } if (port->status & CTL_PORT_STATUS_ONLINE) ctl_isc_announce_port(port); break; } case CTL_GET_LUN_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (lun->lun < stats->first_item) continue; if (stats->fill_len + sizeof(lun->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&lun->stats, &stats->stats[i++], sizeof(lun->stats)); if (retval != 0) break; stats->fill_len += sizeof(lun->stats); } stats->num_items = softc->num_luns; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } case CTL_GET_PORT_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < stats->first_item) continue; if (stats->fill_len + sizeof(port->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&port->stats, &stats->stats[i++], sizeof(port->stats)); if (retval != 0) break; stats->fill_len += sizeof(port->stats); } stats->num_items = softc->num_ports; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } int ctl_lun_map_init(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; int size = ctl_lun_map_size; uint32_t i; if (port->lun_map == NULL || port->lun_map_size < size) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(size * sizeof(uint32_t), M_CTL, M_NOWAIT); } if (port->lun_map == NULL) return (ENOMEM); for (i = 0; i < size; i++) port->lun_map[i] = UINT32_MAX; port->lun_map_size = size; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_disable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_deinit(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; if (port->lun_map == NULL) return (0); port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_enable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) { int status; uint32_t old; if (port->lun_map == NULL) { status = ctl_lun_map_init(port); if (status != 0) return (status); } if (plun >= port->lun_map_size) return (EINVAL); old = port->lun_map[plun]; port->lun_map[plun] = glun; if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { if (port->lun_enable != NULL) port->lun_enable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) { uint32_t old; if (port->lun_map == NULL || plun >= port->lun_map_size) return (0); old = port->lun_map[plun]; port->lun_map[plun] = UINT32_MAX; if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { if (port->lun_disable != NULL) port->lun_disable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) { if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); if (lun_id > port->lun_map_size) return (UINT32_MAX); return (port->lun_map[lun_id]); } uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { uint32_t i; if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < port->lun_map_size; i++) { if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } uint32_t ctl_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result = 0xffffffff; be64enc(lun, encoded); switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = lun[1]; break; case RPL_LUNDATA_ATYP_FLAT: if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case RPL_LUNDATA_ATYP_EXTLUN: switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { case 0x02: switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { case 0x00: result = lun[1]; break; case 0x10: result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; break; case 0x20: if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) result = (lun[2] << 24) + (lun[3] << 16) + (lun[4] << 8) + lun[5]; break; } break; case RPL_LUNDATA_EXT_EAM_NOT_SPEC: result = 0xffffffff; break; } break; } return (result); } uint64_t ctl_encode_lun(uint32_t decoded) { uint64_t l = decoded; if (l <= 0xff) return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); if (l <= 0x3fff) return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); if (l <= 0xffffff) return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | (l << 32)); return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); } int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) { int i; for (i = first; i < last; i++) { if ((mask[i / 32] & (1 << (i % 32))) == 0) return (i); } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; #ifdef IO_POOLS pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ #else pool->zone = ctl_softc->io_zone; #endif *npool = pool; return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); #endif free(pool, M_CTL); } union ctl_io * ctl_alloc_io(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_WAITOK); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_NOWAIT); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } void ctl_free_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); } void ctl_zero_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i, page_code; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; page_code = page_index->page_code & SMPH_PC_MASK; switch (page_code) { case SMS_RW_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_VERIFY_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], &verify_er_page_changeable, sizeof(verify_er_page_changeable)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], &verify_er_page_default, sizeof(verify_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.verify_er_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "writecache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = dnvlist_get_string(lun->be_lun->options, "readcache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: { struct scsi_control_page *control_page; memcpy(&lun->mode_pages.control_page[ CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "reordering", NULL); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_page[ CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case 0x01: memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_DEFAULT], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CHANGEABLE], &control_ext_page_changeable, sizeof(control_ext_page_changeable)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], sizeof(control_ext_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_ext_page; break; default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = dnvlist_get_string(lun->be_lun->options, "used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_CDDVD_CAPS_PAGE:{ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[ CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, sizeof(cddvd_page_changeable)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], sizeof(cddvd_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.cddvd_page; break; } default: panic("invalid page code value %#x", page_code); } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { while (str[i] == '-') /* Skip dashes in UUIDs. */ str++; c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * Add LUN. * * Returns 0 for success, non-zero (errno) for failure. */ int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *ctl_softc = control_softc; struct ctl_lun *nlun, *lun; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *uuid, *vendor, *value; int lun_number; int devidlen, idlen1, idlen2 = 0, len; /* * We support only Direct Access, CD-ROM or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: case T_PROCESSOR: case T_CDROM: break; case T_SEQUENTIAL: case T_CHANGER: default: return (EINVAL); } lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = dnvlist_get_string(be_lun->options, "eui", NULL); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = dnvlist_get_string(be_lun->options, "naa", NULL); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); if (uuid != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 18; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } if (uuid != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_UUID; desc->identifier[0] = 0x10; hex2bin(uuid, &desc->identifier[2], 16); desc->length = 18; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (ctl_max_luns - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (ctl_max_luns - 1)) { printf("ctl: requested LUN ID %d is higher " "than ctl_max_luns - 1 (%d)\n", be_lun->req_lun_id, ctl_max_luns - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } fail: free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN, out of LUNs\n"); goto fail; } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_unlock(&ctl_softc->ctl_lock); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; if (be_lun->flags & CTL_LUN_FLAG_EJECTED) lun->flags |= CTL_LUN_EJECTED; if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) lun->flags |= CTL_LUN_NO_MEDIA; if (be_lun->flags & CTL_LUN_FLAG_STOPPED) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = dnvlist_get_string(be_lun->options, "removable", NULL); if (value != NULL) { if (strcmp(value, "on") == 0) lun->flags |= CTL_LUN_REMOVABLE; } else if (be_lun->lun_type == T_CDROM) lun->flags |= CTL_LUN_REMOVABLE; lun->ctl_softc = ctl_softc; #ifdef CTL_TIME_IO lun->last_busy = getsbinuptime(); #endif LIST_INIT(&lun->ooa_queue); STAILQ_INIT(&lun->error_list); lun->ie_reported = 1; callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); ctl_tpc_lun_init(lun); if (lun->flags & CTL_LUN_REMOVABLE) { lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, M_CTL, M_WAITOK); } /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* Setup statistics gathering */ lun->stats.item = lun_number; /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ mtx_lock(&ctl_softc->ctl_lock); STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; mtx_unlock(&ctl_softc->ctl_lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); mtx_lock(&ctl_softc->ctl_lock); STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); ctl_softc->ctl_luns[lun_number] = NULL; ctl_softc->num_luns--; mtx_unlock(&ctl_softc->ctl_lock); free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (EIO); } return (0); } /* * Free LUN that has no active requests. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; struct ctl_lun *nlun; int i; KASSERT(LIST_EMPTY(&lun->ooa_queue), ("Freeing a LUN %p with outstanding I/O!\n", lun)); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; softc->num_luns--; STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } mtx_unlock(&softc->ctl_lock); /* * Tell the backend to free resources, if this LUN has a backend. */ lun->be_lun->lun_shutdown(lun->be_lun); lun->ie_reportcnt = UINT32_MAX; callout_drain(&lun->ie_callout); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < ctl_max_ports; i++) free(lun->pending_ua[i], M_CTL); free(lun->pending_ua, M_DEVBUF); for (i = 0; i < ctl_max_ports; i++) free(lun->pr_keys[i], M_CTL); free(lun->pr_keys, M_DEVBUF); free(lun->write_buffer, M_CTL); free(lun->prevent, M_CTL); free(lun, M_CTL); return (0); } static int ctl_enable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, ("%s: LUN not disabled", __func__)); lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_enable == NULL) continue; /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } static int ctl_disable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, ("%s: LUN not enabled", __func__)); lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_disable == NULL) continue; /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_disable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_disable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_no_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_NO_MEDIA; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_has_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); if (lun->flags & CTL_LUN_REMOVABLE) ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); mtx_unlock(&lun->lun_lock); if ((lun->flags & CTL_LUN_REMOVABLE) && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } return (0); } int ctl_lun_ejected(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_EJECTED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_primary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_lun_secondary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } /* * Remove LUN. If there are active requests, wait for completion. * * Returns 0 for success, non-zero (errno) for failure. * Completion is reported to backed via the lun_shutdown() method. */ int ctl_remove_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; ctl_disable_lun(lun); mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (LIST_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); mtx_unlock(&lun->lun_lock); if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int -ctl_config_move_done(union ctl_io *io) +ctl_config_move_done(union ctl_io *io, bool samethr) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); - if ((io->io_hdr.port_status != 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, - /*retry_count*/ io->io_hdr.port_status); - } else if (io->scsiio.kern_data_resid != 0 && - (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_invalid_field_ciu(&io->scsiio); - } - if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ if (lun->flags & CTL_LUN_PR_RESERVED) { ctl_set_success(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_start_stop_unit *cdb; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); cdb = (struct scsi_start_stop_unit *)ctsio->cdb; if ((cdb->how & SSS_PC_MASK) == 0) { if ((lun->flags & CTL_LUN_PR_RESERVED) && (cdb->how & SSS_START) == 0) { uint32_t residx; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } if ((cdb->how & SSS_LOEJ) && (lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 4, /*bit_valid*/ 1, /*bit*/ 1); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && lun->prevent_count > 0) { /* "Medium removal prevented" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_prevent_allow(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_prevent *cdb; int retval; uint32_t initidx; CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); cdb = (struct scsi_prevent *)ctsio->cdb; if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&lun->lun_lock); if ((cdb->how & PR_PREVENT) && ctl_is_set(lun->prevent, initidx) == 0) { ctl_set_mask(lun->prevent, initidx); lun->prevent_count++; } else if ((cdb->how & PR_PREVENT) == 0 && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t starting_lba; uint32_t block_count; int retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); byte2 = cdb->byte2; break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); byte2 = cdb->byte2; break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio, MAX(starting_lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); goto bailout; } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = starting_lba; lbalen->len = block_count; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint64_t buffer_offset; uint32_t len; uint8_t byte2; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); switch (ctsio->cdb[0]) { case READ_BUFFER: { struct scsi_read_buffer *cdb; cdb = (struct scsi_read_buffer *)ctsio->cdb; buffer_offset = scsi_3btoul(cdb->offset); len = scsi_3btoul(cdb->length); byte2 = cdb->byte2; break; } case READ_BUFFER_16: { struct scsi_read_buffer_16 *cdb; cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; buffer_offset = scsi_8btou64(cdb->offset); len = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* This shouldn't happen. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (buffer_offset > CTL_WRITE_BUFFER_SIZE || buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_write_buffer *cdb; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); cdb = (struct scsi_write_buffer *)ctsio->cdb; len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_write_same_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba += lbalen->len; if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; } CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; const char *val; uint64_t lba, ival; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_write_same\n")); switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* ANCHOR flag can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); if ((lun->be_lun->maxlba + 1) - lba > ival) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_write_same_cont; num_blocks = 1 << 31; } else num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_unmap\n")); cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); uint8_t *current_cp; int set_ua; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; current_cp = (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); mtx_lock(&lun->lun_lock); if (memcmp(current_cp, page_ptr, page_index->page_len)) { memcpy(current_cp, page_ptr, page_index->page_len); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (CTL_RETVAL_COMPLETE); } static void ctl_ie_timer(void *arg) { struct ctl_lun *lun = arg; uint64_t t; if (lun->ie_asc == 0) return; if (lun->MODE_IE.mrie == SIEP_MRIE_UA) ctl_est_ua_all(lun, -1, CTL_UA_IE); else lun->ie_reported = 0; if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(lun->MODE_IE.interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_schedule(&lun->ie_callout, t * hz / 10); } } int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_info_exceptions_page *pg; uint64_t t; (void)ctl_default_page_handler(ctsio, page_index, page_ptr); pg = (struct scsi_info_exceptions_page *)page_ptr; mtx_lock(&lun->lun_lock); if (pg->info_flags & SIEP_FLAGS_TEST) { lun->ie_asc = 0x5d; lun->ie_ascq = 0xff; if (pg->mrie == SIEP_MRIE_UA) { ctl_est_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = 1; } else { ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = -1; } lun->ie_reportcnt = 1; if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(pg->interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_reset(&lun->ie_callout, t * hz / 10, ctl_ie_timer, lun); } } else { lun->ie_asc = 0; lun->ie_ascq = 0; lun->ie_reported = 1; ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reportcnt = UINT32_MAX; callout_stop(&lun->ie_callout); } mtx_unlock(&lun->lun_lock); return (CTL_RETVAL_COMPLETE); } static int ctl_do_mode_select(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int page_len, page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; uint16_t *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if ((page_index->page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((page_index->page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((page_index->page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (page_index->subpage == sph->subpage) { page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((i >= CTL_NUM_MODE_PAGES) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if (page_len != page_index->page_len - page_len_offset - page_len_size) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (*len_left < page_index->page_len) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); union ctl_modepage_info *modepage_info; int bd_len, i, header_size, param_len, rtd; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (rtd) { if (param_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Revert to defaults. */ ctl_init_page_index(lun); mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); modepage_info->header.len_left = param_len - header_size - bd_len; modepage_info->header.len_used = header_size + bd_len; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int pc, page_code, llba, subpage; int alloc_len, page_len, header_len, bd_len, total_len; void *block_desc; struct ctl_page_index *page_index; llba = 0; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) bd_len = 0; else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) { bd_len = 0; } else if (lun->be_lun->lun_type == T_DIRECT) { if (cdb->byte2 & SMS10_LLBAA) { llba = 1; bd_len = sizeof(struct scsi_mode_block_descr_dlong); } else bd_len = sizeof(struct scsi_mode_block_descr_dshort); } else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { u_int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; page_len += page_index->page_len; } break; } default: { u_int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; page_len += page_index->page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } header->block_descr_len = bd_len; block_desc = &header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (llba) header->flags |= SMH_LONGLBA; scsi_ulto2b(bd_len, header->block_descr_len); block_desc = &header[1]; break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (bd_len > 0) { if (lun->be_lun->lun_type == T_DIRECT) { if (llba) { struct scsi_mode_block_descr_dlong *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_u64to8b(lun->be_lun->maxlba + 1, bd->num_blocks); scsi_ulto4b(lun->be_lun->blocksize, bd->block_len); } else { struct scsi_mode_block_descr_dshort *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_ulto4b(MIN(lun->be_lun->maxlba+1, UINT32_MAX), bd->num_blocks); scsi_ulto3b(lun->be_lun->blocksize, bd->block_len); } } else { struct scsi_mode_block_descr *bd = block_desc; scsi_ulto3b(0, bd->block_len); } } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_temperature *data; const char *value; data = (struct scsi_log_temperature *)page_index->page_data; scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; data++; scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct stat_page *data; struct bintime *t; data = (struct stat_page *)page_index->page_data; scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); data->sap.hdr.param_control = SLP_LBIN; data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - sizeof(struct scsi_log_param_header); scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], data->sap.read_num); scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], data->sap.write_num); if (lun->be_lun->blocksize > 0) { scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / lun->be_lun->blocksize, data->sap.recvieved_lba); scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / lun->be_lun->blocksize, data->sap.transmitted_lba); } t = &lun->stats.time[CTL_STATS_READ]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.read_int); t = &lun->stats.time[CTL_STATS_WRITE]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.write_int); scsi_u64to8b(0, data->sap.weighted_num); scsi_u64to8b(0, data->sap.weighted_int); scsi_ulto2b(SLP_IT, data->it.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - sizeof(struct scsi_log_param_header); #ifdef CTL_TIME_IO scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); #endif scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - sizeof(struct scsi_log_param_header); scsi_ulto4b(3, data->ti.exponent); scsi_ulto4b(1, data->ti.integer); return (0); } int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_informational_exceptions *data; const char *value; data = (struct scsi_log_informational_exceptions *)page_index->page_data; scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - sizeof(struct scsi_log_param_header); data->ie_asc = lun->ie_asc; data->ie_ascq = lun->ie_ascq; if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) header->page |= SL_DS; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*data), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio, lba); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (retval); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_ident_info(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_ident_info *cdb; struct scsi_report_ident_info_data *rii_ptr; struct scsi_report_ident_info_descr *riid_ptr; const char *oii, *otii; int retval, alloc_len, total_len = 0, len = 0; CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); cdb = (struct scsi_report_ident_info *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_ident_info_data); switch (cdb->type) { case RII_LUII: oii = dnvlist_get_string(lun->be_lun->options, "ident_info", NULL); if (oii) len = strlen(oii); /* Approximately */ break; case RII_LUTII: otii = dnvlist_get_string(lun->be_lun->options, "text_ident_info", NULL); if (otii) len = strlen(otii) + 1; /* NULL-terminated */ break; case RII_IIS: len = 2 * sizeof(struct scsi_report_ident_info_descr); break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 11, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return(retval); } total_len += len; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; switch (cdb->type) { case RII_LUII: if (oii) { if (oii[0] == '0' && oii[1] == 'x') len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); else strncpy((uint8_t *)(rii_ptr + 1), oii, len); } break; case RII_LUTII: if (otii) strlcpy((uint8_t *)(rii_ptr + 1), otii, len); break; case RII_IIS: riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); riid_ptr->type = RII_LUII; scsi_ulto2b(0xffff, riid_ptr->length); riid_ptr++; riid_ptr->type = RII_LUTII; scsi_ulto2b(0xffff, riid_ptr->length); } scsi_ulto2b(len, rii_ptr->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; int num_ha_groups, num_target_ports, shared_group; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } num_target_ports = 0; shared_group = (softc->is_single != 0); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->status & CTL_PORT_STATUS_HA_SHARED) shared_group = 1; } mtx_unlock(&softc->ctl_lock); num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * (shared_group + num_ha_groups) + sizeof(struct scsi_target_port_descriptor) * num_target_ports; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_min / softc->port_cnt; if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { /* Some shelf is known to be primary. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) os = TPG_ASYMMETRIC_ACCESS_STANDBY; else os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; if (lun->flags & CTL_LUN_PRIMARY_SC) { ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { /* No known primary shelf. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) { ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; } } if (shared_group) { tpg_desc->pref_state = ts; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (!softc->is_single && (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } for (g = 0; g < num_ha_groups; g++) { tpg_desc->pref_state = (g == pg) ? ts : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(2 + g, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < g * softc->port_cnt || port->targ_port >= (g + 1) * softc->port_cnt) continue; if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (port->status & CTL_PORT_STATUS_HA_SHARED) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* FALLTHROUGH */ case RSO_OPTIONS_OC_ASA: total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; case RSO_OPTIONS_OC_ASA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; if (entry->flags & CTL_CMD_FLAG_SA5) { entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } else if (service_action != 0) { one->support = 1; break; } goto fill_one; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_ext_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; if (cdb->options & RST_REPD) total_len = sizeof(struct scsi_report_supported_tmf_ext_data); else total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | RST_TRS; data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; data->length = total_len - 4; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->pr_generation, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->pr_res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags1 = SPRI_CRH; res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->pr_res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int i; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } } return (0); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->pr_generation++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); int retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms* param; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); cdb = (struct scsi_per_res_out *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++){ if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; } case SPRO_RESERVE: mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; struct ctl_lun *lun; int i; uint32_t residx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } residx = ctl_get_initindex(&msg->hdr.nexus); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->pr_generation++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (lun->pr_res_type != SPR_TYPE_EX_AC && lun->pr_res_type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_2btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; int num_filled, num_luns, num_port_luns, retval; uint32_t alloc_len, lun_datalen; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_report_luns *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_report_luns\n")); num_luns = 0; num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) num_luns++; } mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: case RPL_REPORT_NONSUBSID: break; case RPL_REPORT_WELLKNOWN: case RPL_REPORT_ADMIN: case RPL_REPORT_CONGLOM: num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < num_port_luns && num_filled < num_luns; targ_lun_id++) { lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id == UINT32_MAX) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; be64enc(lun_data->luns[num_filled++].lundata, ctl_encode_lun(targ_lun_id)); /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(lun_datalen, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr, *ps; uint32_t initidx; int have_error; u_int sense_len = SSD_FULL_SIZE; scsi_sense_data_type sense_format; ctl_ua_type ua_type; uint8_t asc = 0, ascq = 0; cdb = (struct scsi_request_sense *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL || ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_link < CTL_HA_LINK_UNKNOWN)) { /* "Logical unit not supported" */ ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); goto send; } have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps += initidx % CTL_MAX_INIT_PER_PORT; if (ps != NULL && ps->error_code != 0) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(ps); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) ps, (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) ps, (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, ps, sizeof(*sense_ptr)); ps->error_code = 0; have_error = 1; } else { ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; } if (have_error == 0) { /* * Report informational exception if have one and allowed. */ if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { asc = lun->ie_asc; ascq = lun->ie_ascq; } ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ asc, /*ascq*/ ascq, SSD_ELEM_NONE); } mtx_unlock(&lun->lun_lock); send: /* * We report the SCSI status as OK, since the status of the command * itself is OK. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_supported_pages *pages; int sup_page_size; int p; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sup_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; /* SCSI Feature Sets */ pages->page_list[p++] = SVPD_SCSI_SFS; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_unit_serial_number *sn_ptr; int data_len; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_extended_inquiry_data *eid_ptr; int data_len; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLR; /* * We support revert to defaults (RTD) bit in MODE SELECT. */ eid_ptr->flags5 = SVPD_EID_RTD_SUP; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_mode_page_policy *mpp_ptr; int data_len; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; int data_len, g; uint8_t proto; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port && port->port_devid) data_len += port->port_devid->len; if (port && port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port && port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port && port->port_type == CTL_PORT_SAS) proto = SCSI_PROTO_SAS << 4; else if (port && port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port && port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; if (softc->is_single || (port && port->status & CTL_PORT_STATUS_HA_SHARED)) g = 1; else g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; scsi_ulto2b(g, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port && port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, pd->relative_port_id); if (port->init_devid) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_sfs *sfs_ptr; int sfs_page_size, n; sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sfs_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sfs_ptr->page_code = SVPD_SCSI_SFS; n = 0; /* Discovery 2016 */ scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* SBC Base 2016 */ scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); /* SBC Base 2010 */ scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { /* Basic Provisioning 2016 */ scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); } /* Drive Maintenance 2016 */ //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); } scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_limits *bl_ptr; const char *val; uint64_t ival; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_descr", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_u64to8b(ival, bl_ptr->max_write_same_length); if (lun->be_lun->maxlba + 1 > ival) bl_ptr->flags |= SVPD_BL_WSNZ; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_device_characteristics *bdc_ptr; const char *value; u_int i; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_logical_block_prov *lbp_ptr; const char *value; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; value = dnvlist_get_string(lun->be_lun->options, "provisioning_type", NULL); if (value != NULL) { if (strcmp(value, "resource") == 0) lbp_ptr->prov_type = SVPD_LBP_RESOURCE; else if (strcmp(value, "thin") == 0) lbp_ptr->prov_type = SVPD_LBP_THIN; } else lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry *cdb; int alloc_len, retval; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_SCSI_SFS: retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; const char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; port_type = port->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (lun != NULL) { if ((lun->flags & CTL_LUN_PRIMARY_SC) || softc->ha_link >= CTL_HA_LINK_UNKNOWN) { inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; } else { inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | lun->be_lun->lun_type; } if (lun->flags & CTL_LUN_REMOVABLE) inq_ptr->dev_qual2 |= SID_RMB; } else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC5; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The response format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; inq_ptr->spc2_flags |= SPC2_SID_MultiP; inq_ptr->flags = SID_CmdQue; if (port_type == CTL_PORT_SCSI) inq_ptr->flags |= SID_WBus16 | SID_Sync; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "vendor", NULL)) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", NULL)) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; case T_CDROM: strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "revision", NULL)) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-6 (no version claimed) */ scsi_ulto2b(0x00C0, inq_ptr->version1); /* SPC-5 (no version claimed) */ scsi_ulto2b(0x05C0, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } else if (port_type == CTL_PORT_UMASS) { /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ scsi_ulto2b(0x1730, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: break; case T_CDROM: /* MMC-6 (no version claimed) */ scsi_ulto2b(0x04E0, inq_ptr->version4); break; default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } int ctl_get_config(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_config_header *hdr; struct scsi_get_config_feature *feature; struct scsi_get_config *cdb; uint32_t alloc_len, data_len; int rt, starting; cdb = (struct scsi_get_config *)ctsio->cdb; rt = (cdb->rt & SGC_RT_MASK); starting = scsi_2btoul(cdb->starting_feature); alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_config_header) + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; if (lun->flags & CTL_LUN_NO_MEDIA) scsi_ulto2b(0x0000, hdr->current_profile); else scsi_ulto2b(0x0010, hdr->current_profile); feature = (struct scsi_get_config_feature *)(hdr + 1); if (starting > 0x003b) goto done; if (starting > 0x003a) goto f3b; if (starting > 0x002b) goto f3a; if (starting > 0x002a) goto f2b; if (starting > 0x001f) goto f2a; if (starting > 0x001e) goto f1f; if (starting > 0x001d) goto f1e; if (starting > 0x0010) goto f1d; if (starting > 0x0003) goto f10; if (starting > 0x0002) goto f3; if (starting > 0x0001) goto f2; if (starting > 0x0000) goto f1; /* Profile List */ scsi_ulto2b(0x0000, feature->feature_code); feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ feature->feature_data[2] = 0x00; scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ feature->feature_data[6] = 0x01; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1: /* Core */ scsi_ulto2b(0x0001, feature->feature_code); feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(0x00000000, &feature->feature_data[0]); feature->feature_data[4] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2: /* Morphing */ scsi_ulto2b(0x0002, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x02; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3: /* Removable Medium */ scsi_ulto2b(0x0003, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x39; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) goto done; f10: /* Random Read */ scsi_ulto2b(0x0010, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); scsi_ulto2b(1, &feature->feature_data[4]); feature->feature_data[6] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1d: /* Multi-Read */ scsi_ulto2b(0x001D, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 0; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1e: /* CD Read */ scsi_ulto2b(0x001E, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1f: /* DVD Read */ scsi_ulto2b(0x001F, feature->feature_code); feature->flags = 0x08; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x01; feature->feature_data[2] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2a: /* DVD+RW */ scsi_ulto2b(0x002A, feature->feature_code); feature->flags = 0x04; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2b: /* DVD+R */ scsi_ulto2b(0x002B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3a: /* DVD+RW Dual Layer */ scsi_ulto2b(0x003A, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3b: /* DVD+R Dual Layer */ scsi_ulto2b(0x003B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; done: data_len = (uint8_t *)feature - (uint8_t *)hdr; if (rt == SGC_RT_SPECIFIC && data_len > 4) { feature = (struct scsi_get_config_feature *)(hdr + 1); if (scsi_2btoul(feature->feature_code) == starting) feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; data_len = (uint8_t *)feature - (uint8_t *)hdr; } scsi_ulto4b(data_len - 4, hdr->data_length); ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_event_status(struct ctl_scsiio *ctsio) { struct scsi_get_event_status_header *hdr; struct scsi_get_event_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_get_event_status *)ctsio->cdb; if ((cdb->byte2 & SGESN_POLLED) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; scsi_ulto2b(0, hdr->descr_length); hdr->nea_class = SGESN_NEA; hdr->supported_class = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_mechanism_status(struct ctl_scsiio *ctsio) { struct scsi_mechanism_status_header *hdr; struct scsi_mechanism_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_mechanism_status *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; hdr->state1 = 0x00; hdr->state2 = 0xe0; scsi_ulto3b(0, hdr->lba); hdr->slots_num = 0; scsi_ulto2b(0, hdr->slots_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_ultomsf(uint32_t lba, uint8_t *buf) { lba += 150; buf[0] = 0; buf[1] = bin2bcd((lba / 75) / 60); buf[2] = bin2bcd((lba / 75) % 60); buf[3] = bin2bcd(lba % 75); } int ctl_read_toc(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_toc_hdr *hdr; struct scsi_read_toc_type01_descr *descr; struct scsi_read_toc *cdb; uint32_t alloc_len, data_len; int format, msf; cdb = (struct scsi_read_toc *)ctsio->cdb; msf = (cdb->byte2 & CD_MSF) != 0; format = cdb->format; alloc_len = scsi_2btoul(cdb->data_len); data_len = sizeof(struct scsi_read_toc_hdr); if (format == 0) data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); else data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; if (format == 0) { scsi_ulto2b(0x12, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); descr++; descr->addr_ctl = 0x14; descr->track_number = 0xaa; if (msf) ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); else scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); } else { scsi_ulto2b(0x0a, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: return (1); break; /* NOTREACHED */ } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); /* If not UNMAP -- go other way. */ if (io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_ERROR); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_ERROR) return (retval); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) seq = FALSE; return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) return (CTL_ACTION_PASS); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io) { const struct ctl_cmd_entry *pending_entry, *ooa_entry; const ctl_serialize_action *serialize_row; /* * Aborted commands are not going to be executed and may even * not report completion, so we don't care about their order. * Let them complete ASAP to clean the OOA queue. */ if (pending_io->io_hdr.flags & CTL_FLAG_ABORT) return (CTL_ACTION_SKIP); /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) return (CTL_ACTION_PASS); /* * Ordered tags have to block until all items ahead of them * have completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) return (CTL_ACTION_BLOCK); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) return (CTL_ACTION_BLOCK); pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], pending_io->scsiio.cdb[1], pending_io)); ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); if (ooa_entry->seridx == CTL_SERIDX_INVLD) return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], ooa_io->scsiio.cdb[1], ooa_io)); serialize_row = ctl_serialize_table[ooa_entry->seridx]; switch (serialize_row[pending_entry->seridx]) { case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_EXTENTOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); return (CTL_ACTION_PASS); case CTL_SER_EXTENTSEQ: if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_extent_check_seq(ooa_io, pending_io)); return (CTL_ACTION_PASS); case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_BLOCKOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); case CTL_SER_SKIP: return (CTL_ACTION_SKIP); default: panic("%s: Invalid serialization value %d for %d => %d", __func__, serialize_row[pending_entry->seridx], pending_entry->seridx, ooa_entry->seridx); } return (CTL_ACTION_ERROR); } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io) { union ctl_io *ooa_io; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (ooa_io = *starting_io; ooa_io != NULL; ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { action = ctl_check_for_blockage(lun, pending_io, ooa_io); if (action != CTL_ACTION_PASS) { *starting_io = ooa_io; return (action); } } *starting_io = NULL; return (CTL_ACTION_PASS); } /* * Try to unblock the specified I/O. * * skip parameter allows explicitly skip present blocker of the I/O, * starting from the previous one on OOA queue. It can be used when * we know for sure that the blocker I/O does no longer count. */ static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) { struct ctl_softc *softc = lun->ctl_softc; union ctl_io *bio, *obio; const struct ctl_cmd_entry *entry; union ctl_ha_msg msg_info; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); if (io->io_hdr.blocker == NULL) return; obio = bio = io->io_hdr.blocker; if (skip) bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); action = ctl_check_ooa(lun, io, &bio); if (action == CTL_ACTION_BLOCK) { /* Still blocked, but may be by different I/O now. */ if (bio != obio) { TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = bio; } return; } /* No longer blocked, one way or another. */ TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = NULL; switch (action) { case CTL_ACTION_OVERLAP: ctl_set_overlapped_cmd(&io->scsiio); goto error; case CTL_ACTION_OVERLAP_TAG: ctl_set_overlapped_tag(&io->scsiio, io->scsiio.tag_num & 0xff); goto error; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: /* Serializing commands from the other SC retire there. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = io; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_NOWAIT); break; } /* * Check this I/O for LUN state changes that may have happened * while this command was blocked. The LUN state may have been * changed by a command ahead of us in the queue. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(io); break; case CTL_ACTION_ERROR: default: ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 0, /*retry_count*/ 0); error: /* Serializing commands from the other SC are done here. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { ctl_try_unblock_others(lun, io, TRUE); LIST_REMOVE(&io->io_hdr, ooa_links); ctl_copy_sense_data_back(io, &msg_info); msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io(io); break; } ctl_done(io); break; } } /* * Try to unblock I/Os blocked by the specified I/O. * * skip parameter allows explicitly skip the specified I/O as blocker, * starting from the previous one on the OOA queue. It can be used when * we know for sure that the specified I/O does no longer count (done). * It has to be still on OOA queue though so that we know where to start. */ static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) { union ctl_io *io, *next_io; mtx_assert(&lun->lun_lock, MA_OWNED); for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); KASSERT(io->io_hdr.blocker != NULL, ("I/O %p on blocked list without blocker", io)); ctl_try_unblock_io(lun, io, skip); } KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), ("blocked_queue is not empty after skipping %p", bio)); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we may have to * reject some commands disallowed by HA mode and link state. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { if (softc->ha_link == CTL_HA_LINK_OFFLINE && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_unavail(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_transit(ctsio); retval = 1; goto bailout; } if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } /* The rest of checks are only done on executing side */ if (softc->ha_mode == CTL_HA_MODE_XFER) goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { ctl_set_hw_write_protected(ctsio); retval = 1; goto bailout; } if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->pr_res_type == SPR_TYPE_WR_EX || lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { if (lun->flags & CTL_LUN_EJECTED) ctl_set_lun_ejected(ctsio); else if (lun->flags & CTL_LUN_NO_MEDIA) { if (lun->flags & CTL_LUN_REMOVABLE) ctl_set_lun_no_media(ctsio); else ctl_set_lun_int_reqd(ctsio); } else if (lun->flags & CTL_LUN_STOPPED) ctl_set_lun_stopped(ctsio); else goto bailout; retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover_lun(union ctl_io *rio) { struct ctl_softc *softc = CTL_SOFTC(rio); struct ctl_lun *lun; struct ctl_io_hdr *io, *next_io; uint32_t targ_lun; targ_lun = rio->io_hdr.nexus.targ_mapped_lun; CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); /* Find and lock the LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun > ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } if (softc->ha_mode == CTL_HA_MODE_XFER) { LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_ABORT; io->flags |= CTL_FLAG_FAILOVER; ctl_try_unblock_io(lun, (union ctl_io *)io, FALSE); } else { /* This can be only due to DATAMOVE */ io->msg_type = CTL_MSG_DATAMOVE_DONE; io->flags &= ~CTL_FLAG_DMA_INPROG; io->flags |= CTL_FLAG_IO_ACTIVE; io->port_status = 31340; ctl_enqueue_isc((union ctl_io *)io); } } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } else { /* SERIALIZE modes */ LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->blocker != NULL) { TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, io, blocked_links); io->blocker = NULL; } ctl_try_unblock_others(lun, (union ctl_io *)io, TRUE); LIST_REMOVE(io, ooa_links); ctl_free_io((union ctl_io *)io); } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } mtx_unlock(&lun->lun_lock); } static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t initidx, targ_lun; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if (targ_lun < ctl_max_luns) lun = softc->ctl_luns[targ_lun]; if (lun) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; } } CTL_LUN(ctsio) = lun; if (lun) { CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a particular LUN, * and stays there until completion. */ #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return; } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return; } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return; } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return; } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if (ctsio->cdb[0] != REQUEST_SENSE) { struct scsi_sense_data *ps; ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; } /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; u_int sense_len = 0; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, &sense_len, SSD_TYPE_NONE); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = sense_len; ctl_done((union ctl_io *)ctsio); return; } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return; } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); msg_info.scsi.cdb_len = ctsio->cdb_len; msg_info.scsi.priority = ctsio->priority; if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { ctl_set_busy(ctsio); ctl_done((union ctl_io *)ctsio); return; } return; } bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_ERROR: default: mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); ctl_done((union ctl_io *)ctsio); break; } } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_DIRECT: if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) return (0); break; case T_PROCESSOR: if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) return (0); break; case T_CDROM: if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } static int ctl_target_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; uint32_t initidx; ctl_ua_type ua_type; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = io->taskio.task_action; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); if (io->taskio.task_action == CTL_TASK_TARGET_RESET) ua_type = CTL_UA_TARG_RESET; else ua_type = CTL_UA_BUS_RESET; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; ctl_do_lun_reset(lun, initidx, ua_type); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_io_hdr *xioh; int i; mtx_lock(&lun->lun_lock); /* Abort tasks. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; ctl_try_unblock_io(lun, (union ctl_io *)xioh, FALSE); } /* Clear CA. */ for (i = 0; i < ctl_max_ports; i++) { free(lun->pending_sense[i], M_CTL); lun->pending_sense[i] = NULL; } /* Clear reservation. */ lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent) { for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->prevent, i); lun->prevent_count = 0; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, -1); /* Establish UA. */ #if 0 ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif mtx_unlock(&lun->lun_lock); } static int ctl_lun_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { union ctl_ha_msg msg_info; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } return (0); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { struct ctl_io_hdr *xioh; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((targ_port == UINT32_MAX || targ_port == xioh->nexus.targ_port) && (init_id == UINT32_MAX || init_id == xioh->nexus.initid)) { if (targ_port != xioh->nexus.targ_port || init_id != xioh->nexus.initid) xioh->flags |= CTL_FLAG_ABORT_STATUS; xioh->flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xioh->nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t p, i; p = initidx / CTL_MAX_INIT_PER_PORT; i = initidx % CTL_MAX_INIT_PER_PORT; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); /* Abort tasks. */ ctl_abort_tasks_lun(lun, p, i, 1); /* Clear CA. */ ps = lun->pending_sense[p]; if (ps != NULL) ps[i].error_code = 0; /* Clear reservation. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, initidx); /* Establish UA. */ ctl_est_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); uint32_t initidx; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_abort_task(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_io_hdr *xioh; struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) || (xioh->nexus.initid != io->io_hdr.nexus.initid) || (xioh->flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) { #else /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { #endif xioh->flags |= CTL_FLAG_ABORT; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_task(union ctl_io *io, int task_set) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_io_hdr *xioh; struct ctl_lun *lun; int found = 0; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) || (xioh->nexus.initid != io->io_hdr.nexus.initid) || (xioh->flags & CTL_FLAG_ABORT)) continue; if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { found = 1; break; } } mtx_unlock(&lun->lun_lock); if (found) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_async_event(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; ctl_ua_type ua; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); mtx_unlock(&lun->lun_lock); if (ua != CTL_UA_NONE) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_run_task(union ctl_io *io) { int retval = 1; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: retval = ctl_lun_reset(io); break; case CTL_TASK_TARGET_RESET: case CTL_TASK_BUS_RESET: retval = ctl_target_reset(io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; case CTL_TASK_QUERY_TASK: retval = ctl_query_task(io, 0); break; case CTL_TASK_QUERY_TASK_SET: retval = ctl_query_task(io, 1); break; case CTL_TASK_QUERY_ASYNC_EVENT: retval = ctl_query_async_event(io); break; default: printf("%s: got unknown task management event %d\n", __func__, io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_done(io); break; } mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctl_done(io); break; } if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_free_io(io); break; } mtx_lock(&lun->lun_lock); ctl_try_unblock_others(lun, io, TRUE); LIST_REMOVE(&io->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_free_io(io); break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc(io); ctl_free_io(io); break; case CTL_MSG_BAD_JUJU: ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ - io->scsiio.be_move_done(io); + ctl_datamove_done(io, false); break; case CTL_MSG_FAILOVER: ctl_failover_lun(io); ctl_free_io(io); break; default: printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); ctl_free_io(io); break; } } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio, (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_OUT); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ +static void +ctl_datamove_done_process(union ctl_io *io) +{ +#ifdef CTL_TIME_IO + struct bintime cur_bt; +#endif + + KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, + ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); + +#ifdef CTL_TIME_IO + getbinuptime(&cur_bt); + bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); + bintime_add(&io->io_hdr.dma_bt, &cur_bt); +#endif + io->io_hdr.num_dmas++; + + if ((io->io_hdr.port_status != 0) && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, + /*retry_count*/ io->io_hdr.port_status); + } else if (io->scsiio.kern_data_resid != 0 && + (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && + ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { + ctl_set_invalid_field_ciu(&io->scsiio); + } else if (ctl_debug & CTL_DEBUG_CDB_DATA) + ctl_data_print(io); +} + +void +ctl_datamove_done(union ctl_io *io, bool samethr) +{ + + ctl_datamove_done_process(io); + io->scsiio.be_move_done(io, samethr); +} + void ctl_datamove(union ctl_io *io) { void (*fe_datamove)(union ctl_io *io); mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); /* No data transferred yet. Frontend must update this when done. */ io->scsiio.kern_data_resid = io->scsiio.kern_data_len; #ifdef CTL_TIME_IO - if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { - char str[256]; - char path_str[64]; - struct sbuf sb; - - ctl_scsi_path_string(io, path_str, sizeof(path_str)); - sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); - - sbuf_cat(&sb, path_str); - switch (io->io_hdr.io_type) { - case CTL_IO_SCSI: - ctl_scsi_command_string(&io->scsiio, NULL, &sb); - sbuf_printf(&sb, "\n"); - sbuf_cat(&sb, path_str); - sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", - io->scsiio.tag_num, io->scsiio.tag_type, - io->scsiio.priority); - break; - case CTL_IO_TASK: - sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", - io->taskio.task_action, - io->taskio.tag_num, io->taskio.tag_type); - break; - default: - panic("%s: Invalid CTL I/O type %d\n", - __func__, io->io_hdr.io_type); - } - sbuf_cat(&sb, path_str); - sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", - (intmax_t)time_uptime - io->io_hdr.start_time); - sbuf_finish(&sb); - printf("%s", sbuf_data(&sb)); - } + getbinuptime(&io->io_hdr.dma_start_bt); #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; - /* - * Note that the backend, in this case, will get the - * callback in its context. In other cases it may get - * called in the frontend's interrupt thread context. - */ - io->scsiio.be_move_done(io); + ctl_datamove_done_process(io); + io->scsiio.be_move_done(io, true); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { - io->scsiio.be_move_done(io); + ctl_datamove_done_process(io); + io->scsiio.be_move_done(io, true); return; } fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.port_status = io->io_hdr.port_status; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; uint32_t i; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int -ctl_datamove_remote_dm_write_cb(union ctl_io *io) +ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr) { int retval; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int -ctl_datamove_remote_dm_read_cb(union ctl_io *io) +ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr) { uint32_t i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d\n", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist; uint32_t len_to_go; int retval; int i; retval = 0; local_sglist = CTL_LSGL(io); len_to_go = io->scsiio.kern_data_len; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; len_to_go > 0; i++) { local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); local_sglist[i].addr = malloc(local_sglist[i].len, M_CTL, M_WAITOK); len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The original * number of S/G entries is available in rem_sg_entries. */ io->scsiio.kern_sg_entries = i; return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; uint32_t local_used, remote_used, total_used; int i, j, isc_ret; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (1); } local_sglist = CTL_LSGL(io); remote_sglist = CTL_RSGL(io); local_used = 0; remote_used = 0; total_used = 0; /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ isc_ret = CTL_HA_STATUS_SUCCESS; for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { uint32_t cur_len; uint8_t *tmp_ptr; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); rq->size = cur_len; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; #if 0 /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ rq->local = vtophys(tmp_ptr); } else rq->local = tmp_ptr; #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); rq->local = tmp_ptr; #endif tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; isc_ret = ctl_dt_single(rq); if (isc_ret > CTL_HA_STATUS_SUCCESS) break; } if (isc_ret != CTL_HA_STATUS_WAIT) { rq->ret = isc_ret; callback(rq); } return (0); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; uint32_t i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if (retval != 0) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; } } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 0); return; } /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) ctl_datamove_remote_write(io); else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ctl_datamove_remote_read(io); else { io->io_hdr.port_status = 31339; ctl_send_datamove_done(io, /*have_lock*/ 0); } } static void ctl_process_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun = CTL_LUN(io); void (*fe_done)(union ctl_io *io); union ctl_ha_msg msg; CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = port->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", io->scsiio.tag_num, io->scsiio.tag_type, io->scsiio.priority); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_INFO) ctl_io_error_print(io, NULL); fe_done(io); return; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any informational exception and status * of this command can be modified to report it in form of either * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. */ if (lun->ie_reported == 0 && lun->ie_asc != 0 && io->io_hdr.status == CTL_SUCCESS && (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { uint8_t mrie = lun->MODE_IE.mrie; uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || (lun->MODE_VER.byte3 & SMS_VER_PER)); if (((mrie == SIEP_MRIE_REC_COND && per) || mrie == SIEP_MRIE_REC_UNCOND || mrie == SIEP_MRIE_NO_SENSE) && (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, /*asc*/ lun->ie_asc, /*ascq*/ lun->ie_ascq, SSD_ELEM_NONE); lun->ie_reported = 1; } } else if (lun->ie_reported < 0) lun->ie_reported = 0; /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if (!STAILQ_EMPTY(&lun->error_list) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { int type; #ifdef CTL_TIME_IO struct bintime bt; getbinuptime(&bt); bintime_sub(&bt, &io->io_hdr.start_bt); #endif if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; lun->stats.bytes[type] += io->scsiio.kern_total_len; lun->stats.operations[type] ++; lun->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&lun->stats.time[type], &bt); #endif mtx_lock(&port->port_lock); port->stats.bytes[type] += io->scsiio.kern_total_len; port->stats.operations[type] ++; port->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&port->stats.time[type], &bt); #endif mtx_unlock(&port->port_lock); } /* * Run through the blocked queue of this I/O and see if anything * can be unblocked, now that this I/O is done and will be removed. * We need to do it before removal to have OOA position to start. */ ctl_try_unblock_others(lun, io, TRUE); /* * Remove this from the OOA queue. */ LIST_REMOVE(&io->io_hdr, ooa_links); #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->last_busy = getsbinuptime(); #endif /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && LIST_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && (ctl_debug & CTL_DEBUG_INFO) != 0) ctl_io_error_print(io, NULL); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. */ if ((softc->ha_mode != CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), M_WAITOK); } fe_done(io); } /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t initidx, p, targ_lun; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. * If we don't have a LUN for this, just toss the sense information. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); goto bailout; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); p = initidx / CTL_MAX_INIT_PER_PORT; if (lun->pending_sense[p] == NULL) { lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, M_CTL, M_NOWAIT | M_ZERO); } if ((ps = lun->pending_sense[p]) != NULL) { ps += initidx % CTL_MAX_INIT_PER_PORT; memset(ps, 0, sizeof(*ps)); memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); } mtx_unlock(&lun->lun_lock); bailout: ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } int ctl_run(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_scsiio_precheck(&io->scsiio); break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_run_task(io); break; default: printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_serseq_done(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); if (lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) return; mtx_lock(&lun->lun_lock); io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; ctl_try_unblock_others(lun, io, FALSE); mtx_unlock(&lun->lun_lock); } void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%u:%u:%u tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - incoming queue * - RtR queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(&io->scsiio); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); } thr->thread = NULL; kthread_exit(); } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct ctl_logical_block_provisioning_page *page; const char *attr; union ctl_ha_msg msg; uint64_t thres, val; int i, e, set; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_NO_MEDIA) || lun->backend->lun_attr == NULL) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_mode == CTL_HA_MODE_XFER) continue; if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->MODE_LBP; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr(lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e = (val >= thres); else e = (val <= thres); if (e) break; } mtx_lock(&lun->lun_lock); if (e) { scsi_u64to8b((uint8_t *)&page->descr[i] - (uint8_t *)page, lun->ua_tpt_info); if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = 1; } else set = 0; } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = -1; } mtx_unlock(&lun->lun_lock); if (set != 0 && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = (set > 0); msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); mtx_unlock(&softc->ctl_lock); // XXX ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); mtx_lock(&softc->ctl_lock); } } mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, PDROP, "-", CTL_LBP_PERIOD * hz); } softc->thresh_thread = NULL; kthread_exit(); } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl.h b/sys/cam/ctl/ctl.h index 56dd5313b4cb..be3e4a37b157 100644 --- a/sys/cam/ctl/ctl.h +++ b/sys/cam/ctl/ctl.h @@ -1,210 +1,211 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl.h#5 $ * $FreeBSD$ */ /* * Function definitions used both within CTL and potentially in various CTL * clients. * * Author: Ken Merry */ #ifndef _CTL_H_ #define _CTL_H_ #define CTL_RETVAL_COMPLETE 0 #define CTL_RETVAL_QUEUED 1 #define CTL_RETVAL_ALLOCATED 2 #define CTL_RETVAL_ERROR 3 typedef enum { CTL_PORT_NONE = 0x00, CTL_PORT_FC = 0x01, CTL_PORT_SCSI = 0x02, CTL_PORT_IOCTL = 0x04, CTL_PORT_INTERNAL = 0x08, CTL_PORT_ISCSI = 0x10, CTL_PORT_SAS = 0x20, CTL_PORT_UMASS = 0x40, CTL_PORT_ALL = 0xff, CTL_PORT_ISC = 0x100 // FC port for inter-shelf communication } ctl_port_type; struct ctl_port_entry { ctl_port_type port_type; char port_name[64]; int32_t targ_port; int physical_port; int virtual_port; u_int flags; #define CTL_PORT_WWNN_VALID 0x01 #define CTL_PORT_WWPN_VALID 0x02 uint64_t wwnn; uint64_t wwpn; int online; }; struct ctl_modepage_header { uint8_t page_code; uint8_t subpage; uint16_t len_used; uint16_t len_left; }; union ctl_modepage_info { struct ctl_modepage_header header; }; /* * Serial number length, for VPD page 0x80. */ #define CTL_SN_LEN 16 /* * Device ID length, for VPD page 0x83. */ #define CTL_DEVID_LEN 64 #define CTL_DEVID_MIN_LEN 16 /* * WWPN length, for VPD page 0x83. */ #define CTL_WWPN_LEN 8 #define CTL_DRIVER_NAME_LEN 32 /* * Unit attention types. ASC/ASCQ values for these should be placed in * ctl_build_ua. These are also listed in order of reporting priority. * i.e. a poweron UA is reported first, bus reset second, etc. */ typedef enum { CTL_UA_NONE = 0x0000, CTL_UA_POWERON = 0x0001, CTL_UA_BUS_RESET = 0x0002, CTL_UA_TARG_RESET = 0x0004, CTL_UA_I_T_NEXUS_LOSS = 0x0008, CTL_UA_LUN_RESET = 0x0010, CTL_UA_LUN_CHANGE = 0x0020, CTL_UA_MODE_CHANGE = 0x0040, CTL_UA_LOG_CHANGE = 0x0080, CTL_UA_INQ_CHANGE = 0x0100, CTL_UA_RES_PREEMPT = 0x0400, CTL_UA_RES_RELEASE = 0x0800, CTL_UA_REG_PREEMPT = 0x1000, CTL_UA_ASYM_ACC_CHANGE = 0x2000, CTL_UA_CAPACITY_CHANGE = 0x4000, CTL_UA_THIN_PROV_THRES = 0x8000, CTL_UA_MEDIUM_CHANGE = 0x10000, CTL_UA_IE = 0x20000 } ctl_ua_type; #ifdef _KERNEL MALLOC_DECLARE(M_CTL); struct ctl_page_index; #ifdef SYSCTL_DECL /* from sysctl.h */ SYSCTL_DECL(_kern_cam_ctl); #endif struct ctl_lun; struct ctl_port; struct ctl_softc; /* * Put a string into an sbuf, escaping characters that are illegal or not * recommended in XML. Note this doesn't escape everything, just > < and &. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size); int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last); int ctl_set_mask(uint32_t *mask, uint32_t bit); int ctl_clear_mask(uint32_t *mask, uint32_t bit); int ctl_is_set(uint32_t *mask, uint32_t bit); int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); int ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); -int ctl_config_move_done(union ctl_io *io); +int ctl_config_move_done(union ctl_io *io, bool samethr); +void ctl_datamove_done(union ctl_io *io, bool samethr); void ctl_datamove(union ctl_io *io); void ctl_serseq_done(union ctl_io *io); void ctl_done(union ctl_io *io); void ctl_data_submit_done(union ctl_io *io); void ctl_config_read_done(union ctl_io *io); void ctl_config_write_done(union ctl_io *io); void ctl_portDB_changed(int portnum); int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua); void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua); void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua); void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type); uint32_t ctl_decode_lun(uint64_t encoded); uint64_t ctl_encode_lun(uint32_t decoded); void ctl_isc_announce_lun(struct ctl_lun *lun); void ctl_isc_announce_port(struct ctl_port *port); void ctl_isc_announce_iid(struct ctl_port *port, int iid); void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage); int ctl_expand_number(const char *buf, uint64_t *num); #endif /* _KERNEL */ #endif /* _CTL_H_ */ /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h index be8ab4d1706b..05e65abe41f8 100644 --- a/sys/cam/ctl/ctl_backend.h +++ b/sys/cam/ctl/ctl_backend.h @@ -1,251 +1,250 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.h#2 $ * $FreeBSD$ */ /* * CTL backend driver definitions * * Author: Ken Merry */ #ifndef _CTL_BACKEND_H_ #define _CTL_BACKEND_H_ #include #include typedef enum { CTL_LUN_SERSEQ_OFF, CTL_LUN_SERSEQ_READ, CTL_LUN_SERSEQ_ON } ctl_lun_serseq; #ifdef _KERNEL #define CTL_BACKEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ return (ctl_backend_register( \ (struct ctl_backend_driver *)data)); \ break; \ case MOD_UNLOAD: \ return (ctl_backend_deregister( \ (struct ctl_backend_driver *)data)); \ break; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) struct ctl_be_lun; typedef void (*be_callback_t)(struct ctl_be_lun *be_lun); /* * The lun_type field is the SCSI device type of this particular LUN. In * general, this should be T_DIRECT, although backends will want to create * a processor LUN, typically at LUN 0. See scsi_all.h for the defines for * the various SCSI device types. * * The flags are described above. * * The be_lun field is the backend driver's own context that will get * passsed back so that it can tell which LUN CTL is referencing. * * maxlba is the maximum accessible LBA on the LUN. Note that this is * different from the capacity of the array. capacity = maxlba + 1 * * blocksize is the size, in bytes, of each LBA on the LUN. In general * this should be 512. In theory CTL should be able to handle other block * sizes. Host application software may not deal with it very well, though. * * pblockexp is the log2() of number of LBAs on the LUN per physical sector. * * pblockoff is the lowest LBA on the LUN aligned to physical sector. * * ublockexp is the log2() of number of LBAs on the LUN per UNMAP block. * * ublockoff is the lowest LBA on the LUN aligned to UNMAP block. * * atomicblock is the number of blocks that can be written atomically. * * opttxferlen is the number of blocks that can be written in one operation. * * req_lun_id is the requested LUN ID. CTL only pays attention to this * field if the CTL_LUN_FLAG_ID_REQ flag is set. If the requested LUN ID is * not available, the LUN addition will fail. If a particular LUN ID isn't * requested, the first available LUN ID will be allocated. * * serial_num is the device serial number returned in the SCSI INQUIRY VPD * page 0x80. This should be a unique, per-shelf value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded out with ASCII spaces. This field should NOT be NULL * terminated. * * device_id is the T10 device identifier returned in the SCSI INQUIRY VPD * page 0x83. This should be a unique, per-LUN value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded with ASCII spaces. This field should NOT be NULL * terminated. * * The lun_shutdown() method is the callback for the ctl_remove_lun() * call. It is called when all outstanding I/O for that LUN has been * completed and CTL has deleted the resources for that LUN. When the CTL * backend gets this call, it can safely free its per-LUN resources. * * The be field is a pointer to the ctl_backend_driver structure, which * contains the backend methods to be called by CTL. * * The ctl_lun field is for CTL internal use only, and should not be used * by the backend. * * The links field is for CTL internal use only, and should not be used by * the backend. */ struct ctl_be_lun { uint8_t lun_type; /* passed to CTL */ ctl_backend_lun_flags flags; /* passed to CTL */ ctl_lun_serseq serseq; /* passed to CTL */ uint64_t maxlba; /* passed to CTL */ uint32_t blocksize; /* passed to CTL */ uint16_t pblockexp; /* passed to CTL */ uint16_t pblockoff; /* passed to CTL */ uint16_t ublockexp; /* passed to CTL */ uint16_t ublockoff; /* passed to CTL */ uint32_t atomicblock; /* passed to CTL */ uint32_t opttxferlen; /* passed to CTL */ uint32_t req_lun_id; /* passed to CTL */ uint32_t lun_id; /* returned from CTL */ uint8_t serial_num[CTL_SN_LEN]; /* passed to CTL */ uint8_t device_id[CTL_DEVID_LEN];/* passed to CTL */ be_callback_t lun_shutdown; /* passed to CTL */ struct ctl_backend_driver *be; /* passed to CTL */ void *ctl_lun; /* used by CTL */ nvlist_t *options; /* passed to CTL */ STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */ }; typedef enum { CTL_BE_FLAG_NONE = 0x00, /* no flags */ CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */ } ctl_backend_flags; typedef int (*be_init_t)(void); typedef int (*be_shutdown_t)(void); typedef int (*be_func_t)(union ctl_io *io); typedef void (*be_vfunc_t)(union ctl_io *io); typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); typedef int (*be_luninfo_t)(struct ctl_be_lun *be_lun, struct sbuf *sb); typedef uint64_t (*be_lunattr_t)(struct ctl_be_lun *be_lun, const char *attrname); struct ctl_backend_driver { char name[CTL_BE_NAME_LEN]; /* passed to CTL */ ctl_backend_flags flags; /* passed to CTL */ be_init_t init; /* passed to CTL */ be_shutdown_t shutdown; /* passed to CTL */ be_func_t data_submit; /* passed to CTL */ - be_func_t data_move_done; /* passed to CTL */ be_func_t config_read; /* passed to CTL */ be_func_t config_write; /* passed to CTL */ be_ioctl_t ioctl; /* passed to CTL */ be_luninfo_t lun_info; /* passed to CTL */ be_lunattr_t lun_attr; /* passed to CTL */ #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED be_func_t config_move_done; /* passed to backend */ #endif #if 0 be_vfunc_t config_write_done; /* passed to backend */ #endif STAILQ_ENTRY(ctl_backend_driver) links; /* used by CTL */ }; int ctl_backend_register(struct ctl_backend_driver *be); int ctl_backend_deregister(struct ctl_backend_driver *be); struct ctl_backend_driver *ctl_backend_find(char *backend_name); /* * To add a LUN, call ctl_add_lun(). */ int ctl_add_lun(struct ctl_be_lun *be_lun); /* * To remove a LUN, first call ctl_remove_lun(). * You will get the lun_shutdown() callback when all * I/O to the LUN has completed and the LUN has been deleted. */ int ctl_remove_lun(struct ctl_be_lun *be_lun); /* * To start a LUN (transition from powered off to powered on state) call * ctl_start_lun(). To stop a LUN (transition from powered on to powered * off state) call ctl_stop_lun(). */ int ctl_start_lun(struct ctl_be_lun *be_lun); int ctl_stop_lun(struct ctl_be_lun *be_lun); /* * Methods to notify about media and tray status changes. */ int ctl_lun_no_media(struct ctl_be_lun *be_lun); int ctl_lun_has_media(struct ctl_be_lun *be_lun); int ctl_lun_ejected(struct ctl_be_lun *be_lun); /* * Called on LUN HA role change. */ int ctl_lun_primary(struct ctl_be_lun *be_lun); int ctl_lun_secondary(struct ctl_be_lun *be_lun); /* * Let the backend notify the initiators about changes. */ void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun); #endif /* _KERNEL */ #endif /* _CTL_BACKEND_H_ */ /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c index 0fbe0949f893..17a336ebe872 100644 --- a/sys/cam/ctl/ctl_backend_block.c +++ b/sys/cam/ctl/ctl_backend_block.c @@ -1,2836 +1,2804 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2009-2011 Spectra Logic Corporation * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ */ /* * CAM Target Layer driver backend for block devices. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The idea here is that we'll allocate enough S/G space to hold a 1MB * I/O. If we get an I/O larger than that, we'll split it. */ #define CTLBLK_HALF_IO_SIZE (512 * 1024) #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) #define CTLBLK_MIN_SEG (128 * 1024) #define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys) #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1) #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) #define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG) #ifdef CTLBLK_DEBUG #define DPRINTF(fmt, args...) \ printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while(0) #endif #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) SDT_PROVIDER_DEFINE(cbb); typedef enum { CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, CTL_BE_BLOCK_LUN_WAITING = 0x04, } ctl_be_block_lun_flags; typedef enum { CTL_BE_BLOCK_NONE, CTL_BE_BLOCK_DEV, CTL_BE_BLOCK_FILE } ctl_be_block_type; struct ctl_be_block_filedata { struct ucred *cred; }; union ctl_be_block_bedata { struct ctl_be_block_filedata file; }; struct ctl_be_block_io; struct ctl_be_block_lun; typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, const char *attrname); /* * Backend LUN structure. There is a 1:1 mapping between a block device * and a backend block LUN, and between a backend block LUN and a CTL LUN. */ struct ctl_be_block_lun { struct ctl_be_lun cbe_lun; /* Must be first element. */ struct ctl_lun_create_params params; char *dev_path; ctl_be_block_type dev_type; struct vnode *vn; union ctl_be_block_bedata backend; cbb_dispatch_t dispatch; cbb_dispatch_t lun_flush; cbb_dispatch_t unmap; cbb_dispatch_t get_lba_status; cbb_getattr_t getattr; uint64_t size_blocks; uint64_t size_bytes; struct ctl_be_block_softc *softc; struct devstat *disk_stats; ctl_be_block_lun_flags flags; SLIST_ENTRY(ctl_be_block_lun) links; struct taskqueue *io_taskqueue; struct task io_task; int num_threads; STAILQ_HEAD(, ctl_io_hdr) input_queue; STAILQ_HEAD(, ctl_io_hdr) config_read_queue; STAILQ_HEAD(, ctl_io_hdr) config_write_queue; STAILQ_HEAD(, ctl_io_hdr) datamove_queue; struct mtx_padalign io_lock; struct mtx_padalign queue_lock; }; /* * Overall softc structure for the block backend module. */ struct ctl_be_block_softc { struct sx modify_lock; struct mtx lock; int num_luns; SLIST_HEAD(, ctl_be_block_lun) lun_list; uma_zone_t beio_zone; uma_zone_t bufmin_zone; uma_zone_t bufmax_zone; }; static struct ctl_be_block_softc backend_block_softc; /* * Per-I/O information. */ struct ctl_be_block_io { union ctl_io *io; struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; struct iovec xiovecs[CTLBLK_MAX_SEGS]; int refcnt; int bio_cmd; int two_sglists; int num_segs; int num_bios_sent; int num_bios_done; int send_complete; int first_error; uint64_t first_error_offset; struct bintime ds_t0; devstat_tag_type ds_tag_type; devstat_trans_flags ds_trans_type; uint64_t io_len; uint64_t io_offset; int io_arg; struct ctl_be_block_softc *softc; struct ctl_be_block_lun *lun; void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ }; extern struct ctl_softc *control_softc; static int cbb_num_threads = 32; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer Block Backend"); SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, &cbb_num_threads, 0, "Number of threads per backing file"); static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); static void ctl_free_beio(struct ctl_be_block_io *beio); static void ctl_complete_beio(struct ctl_be_block_io *beio); -static int ctl_be_block_move_done(union ctl_io *io); +static int ctl_be_block_move_done(union ctl_io *io, bool samethr); static void ctl_be_block_biodone(struct bio *bio); static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_worker(void *context, int pending); static int ctl_be_block_submit(union ctl_io *io); static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun); static int ctl_be_block_config_write(union ctl_io *io); static int ctl_be_block_config_read(union ctl_io *io); static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb); static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); static int ctl_be_block_init(void); static int ctl_be_block_shutdown(void); static struct ctl_backend_driver ctl_be_block_driver = { .name = "block", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_be_block_init, .shutdown = ctl_be_block_shutdown, .data_submit = ctl_be_block_submit, - .data_move_done = ctl_be_block_move_done, .config_read = ctl_be_block_config_read, .config_write = ctl_be_block_config_write, .ioctl = ctl_be_block_ioctl, .lun_info = ctl_be_block_lun_info, .lun_attr = ctl_be_block_lun_attr }; MALLOC_DEFINE(M_CTLBLK, "ctlblock", "Memory used for CTL block backend"); CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); static void ctl_alloc_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg, size_t len) { if (len <= CTLBLK_MIN_SEG) { sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK); } else { KASSERT(len <= CTLBLK_MAX_SEG, ("Too large alloc %zu > %lu", len, CTLBLK_MAX_SEG)); sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK); } sg->len = len; } static void ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg) { if (sg->len <= CTLBLK_MIN_SEG) { uma_zfree(softc->bufmin_zone, sg->addr); } else { KASSERT(sg->len <= CTLBLK_MAX_SEG, ("Too large free %zu > %lu", sg->len, CTLBLK_MAX_SEG)); uma_zfree(softc->bufmax_zone, sg->addr); } } static struct ctl_be_block_io * ctl_alloc_beio(struct ctl_be_block_softc *softc) { struct ctl_be_block_io *beio; beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); beio->softc = softc; beio->refcnt = 1; return (beio); } static void ctl_real_free_beio(struct ctl_be_block_io *beio) { struct ctl_be_block_softc *softc = beio->softc; int i; for (i = 0; i < beio->num_segs; i++) { ctl_free_seg(softc, &beio->sg_segs[i]); /* For compare we had two equal S/G lists. */ if (beio->two_sglists) { ctl_free_seg(softc, &beio->sg_segs[i + CTLBLK_HALF_SEGS]); } } uma_zfree(softc->beio_zone, beio); } static void ctl_refcnt_beio(void *arg, int diff) { struct ctl_be_block_io *beio = arg; if (atomic_fetchadd_int(&beio->refcnt, diff) + diff == 0) ctl_real_free_beio(beio); } static void ctl_free_beio(struct ctl_be_block_io *beio) { ctl_refcnt_beio(beio, -1); } static void ctl_complete_beio(struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; if (beio->beio_cont != NULL) { beio->beio_cont(beio); } else { ctl_free_beio(beio); ctl_data_submit_done(io); } } static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static void ctl_be_block_compare(union ctl_io *io) { struct ctl_be_block_io *beio; uint64_t off, res; int i; uint8_t info[8]; beio = (struct ctl_be_block_io *)PRIV(io)->ptr; off = 0; for (i = 0; i < beio->num_segs; i++) { res = cmp(beio->sg_segs[i].addr, beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, beio->sg_segs[i].len); off += res; if (res < beio->sg_segs[i].len) break; } if (i < beio->num_segs) { scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); } else ctl_set_success(&io->scsiio); } static int -ctl_be_block_move_done(union ctl_io *io) +ctl_be_block_move_done(union ctl_io *io, bool samethr) { struct ctl_be_block_io *beio; struct ctl_be_block_lun *be_lun; struct ctl_lba_len_flags *lbalen; -#ifdef CTL_TIME_IO - struct bintime cur_bt; -#endif beio = (struct ctl_be_block_io *)PRIV(io)->ptr; be_lun = beio->lun; DPRINTF("entered\n"); - -#ifdef CTL_TIME_IO - getbinuptime(&cur_bt); - bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); - bintime_add(&io->io_hdr.dma_bt, &cur_bt); -#endif - io->io_hdr.num_dmas++; io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; /* - * We set status at this point for read commands, and write - * commands with errors. + * We set status at this point for read and compare commands. */ - if (io->io_hdr.flags & CTL_FLAG_ABORT) { - ; - } else if ((io->io_hdr.port_status != 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, - /*retry_count*/ io->io_hdr.port_status); - } else if (io->scsiio.kern_data_resid != 0 && - (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_invalid_field_ciu(&io->scsiio); - } else if ((io->io_hdr.port_status == 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { + if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { lbalen = ARGS(beio->io); if (lbalen->flags & CTL_LLF_READ) { ctl_set_success(&io->scsiio); } else if (lbalen->flags & CTL_LLF_COMPARE) { /* We have two data blocks ready for comparison. */ ctl_be_block_compare(io); } } /* * If this is a read, or a write with errors, it is done. */ if ((beio->bio_cmd == BIO_READ) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { ctl_complete_beio(beio); return (0); } /* - * At this point, we have a write and the DMA completed - * successfully. We now have to queue it to the task queue to + * At this point, we have a write and the DMA completed successfully. + * If we were called synchronously in the original thread then just + * dispatch, otherwise we now have to queue it to the task queue to * execute the backend I/O. That is because we do blocking * memory allocations, and in the file backing case, blocking I/O. * This move done routine is generally called in the SIM's * interrupt context, and therefore we cannot block. */ - mtx_lock(&be_lun->queue_lock); - STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); - mtx_unlock(&be_lun->queue_lock); - taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); - + if (samethr) { + be_lun->dispatch(be_lun, beio); + } else { + mtx_lock(&be_lun->queue_lock); + STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); + mtx_unlock(&be_lun->queue_lock); + taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); + } return (0); } static void ctl_be_block_biodone(struct bio *bio) { struct ctl_be_block_io *beio; struct ctl_be_block_lun *be_lun; union ctl_io *io; int error; beio = bio->bio_caller1; be_lun = beio->lun; io = beio->io; DPRINTF("entered\n"); error = bio->bio_error; mtx_lock(&be_lun->io_lock); if (error != 0 && (beio->first_error == 0 || bio->bio_offset < beio->first_error_offset)) { beio->first_error = error; beio->first_error_offset = bio->bio_offset; } beio->num_bios_done++; /* * XXX KDM will this cause WITNESS to complain? Holding a lock * during the free might cause it to complain. */ g_destroy_bio(bio); /* * If the send complete bit isn't set, or we aren't the last I/O to * complete, then we're done. */ if ((beio->send_complete == 0) || (beio->num_bios_done < beio->num_bios_sent)) { mtx_unlock(&be_lun->io_lock); return; } /* * At this point, we've verified that we are the last I/O to * complete, so it's safe to drop the lock. */ devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If there are any errors from the backing device, we fail the * entire I/O with a medium error. */ error = beio->first_error; if (error != 0) { if (error == EOPNOTSUPP) { ctl_set_invalid_opcode(&io->scsiio); } else if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else if (beio->bio_cmd == BIO_FLUSH) { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad2); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write, a flush, a delete or verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (beio->bio_cmd == BIO_FLUSH) || (beio->bio_cmd == BIO_DELETE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } } static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct mount *mountpoint; int error, lock_flags; DPRINTF("entered\n"); binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, curthread); VOP_UNLOCK(be_lun->vn); vn_finished_write(mountpoint); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); if (error == 0) ctl_set_success(&io->scsiio); else { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad1); } ctl_complete_beio(beio); } SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { struct ctl_be_block_filedata *file_data; union ctl_io *io; struct uio xuio; struct iovec *xiovec; size_t s; int error, flags, i; DPRINTF("entered\n"); file_data = &be_lun->backend.file; io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); if (beio->bio_cmd == BIO_READ) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); /* * UFS pays attention to IO_DIRECT for reads. If the * DIRECTIO option is configured into the kernel, it calls * ffs_rawread(). But that only works for single-segment * uios with user space addresses. In our case, with a * kernel uio, it still reads into the buffer cache, but it * will just try to release the buffer from the cache later * on in ffs_read(). * * ZFS does not pay attention to IO_DIRECT for reads. * * UFS does not pay attention to IO_SYNC for reads. * * ZFS pays attention to IO_SYNC (which translates into the * Solaris define FRSYNC for zfs_read()) for reads. It * attempts to sync the file before reading. */ error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn); SDT_PROBE0(cbb, , read, file_done); if (error == 0 && xuio.uio_resid > 0) { /* * If we red less then requested (EOF), then * we should clean the rest of the buffer. */ s = beio->io_len - xuio.uio_resid; for (i = 0; i < beio->num_segs; i++) { if (s >= beio->sg_segs[i].len) { s -= beio->sg_segs[i].len; continue; } bzero((uint8_t *)beio->sg_segs[i].addr + s, beio->sg_segs[i].len - s); s = 0; } } } else { struct mount *mountpoint; int lock_flags; (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); /* * UFS pays attention to IO_DIRECT for writes. The write * is done asynchronously. (Normally the write would just * get put into cache. * * UFS pays attention to IO_SYNC for writes. It will * attempt to write the buffer out synchronously if that * flag is set. * * ZFS does not pay attention to IO_DIRECT for writes. * * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) * for writes. It will flush the transaction from the * cache before returning. */ error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn); vn_finished_write(mountpoint); SDT_PROBE0(cbb, , write, file_done); } mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } } static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, status; DPRINTF("entered\n"); off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } VOP_UNLOCK(be_lun->vn); data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) { struct vattr vattr; struct statfs statfs; uint64_t val; int error; val = UINT64_MAX; if (be_lun->vn == NULL) return (val); vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); if (strcmp(attrname, "blocksused") == 0) { error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error == 0) val = vattr.va_bytes / be_lun->cbe_lun.blocksize; } if (strcmp(attrname, "blocksavail") == 0 && !VN_IS_DOOMED(be_lun->vn)) { error = VFS_STATFS(be_lun->vn->v_mount, &statfs); if (error == 0) val = statfs.f_bavail * statfs.f_bsize / be_lun->cbe_lun.blocksize; } VOP_UNLOCK(be_lun->vn); return (val); } static void ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io; struct cdevsw *csw; struct cdev *dev; struct uio xuio; struct iovec *xiovec; int error, flags, i, ref; DPRINTF("entered\n"); io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { if (beio->bio_cmd == BIO_READ) error = csw->d_read(dev, &xuio, flags); else error = csw->d_write(dev, &xuio, flags); dev_relthread(dev, ref); } else error = ENXIO; if (beio->bio_cmd == BIO_READ) SDT_PROBE0(cbb, , read, file_done); else SDT_PROBE0(cbb, , write, file_done); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } } static void ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct cdevsw *csw; struct cdev *dev; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, ref, status; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; goto done; } off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } dev_relthread(dev, ref); done: data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { struct bio *bio; struct cdevsw *csw; struct cdev *dev; int ref; DPRINTF("entered\n"); /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); bio->bio_cmd = BIO_FLUSH; bio->bio_offset = 0; bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = 0; /* * We don't need to acquire the LUN lock here, because we are only * sending one bio, and so there is no other context to synchronize * with. */ beio->num_bios_sent = 1; beio->send_complete = 1; binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { bio->bio_dev = dev; csw->d_strategy(bio); dev_relthread(dev, ref); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } static void ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio, uint64_t off, uint64_t len, int last) { struct bio *bio; uint64_t maxlen; struct cdevsw *csw; struct cdev *dev; int ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); while (len > 0) { bio = g_alloc_bio(); bio->bio_cmd = BIO_DELETE; bio->bio_dev = dev; bio->bio_offset = off; bio->bio_length = MIN(len, maxlen); bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; off += bio->bio_length; len -= bio->bio_length; mtx_lock(&be_lun->io_lock); beio->num_bios_sent++; if (last && len == 0) beio->send_complete = 1; mtx_unlock(&be_lun->io_lock); if (csw) { csw->d_strategy(bio); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end; uint64_t len; io = beio->io; DPRINTF("entered\n"); binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); if (beio->io_offset == -1) { beio->io_len = 0; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { len = (uint64_t)scsi_4btoul(buf->length) * be_lun->cbe_lun.blocksize; beio->io_len += len; ctl_be_block_unmap_dev_range(be_lun, beio, scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, len, (end - buf < 2) ? TRUE : FALSE); } } else ctl_be_block_unmap_dev_range(be_lun, beio, beio->io_offset, beio->io_len, TRUE); } static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); struct bio *bio; struct cdevsw *csw; struct cdev *dev; off_t cur_offset; int i, max_iosize, ref; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); /* * We have to limit our I/O size to the maximum supported by the * backend device. */ if (csw) { max_iosize = dev->si_iosize_max; if (max_iosize < PAGE_SIZE) max_iosize = DFLTPHYS; } else max_iosize = DFLTPHYS; cur_offset = beio->io_offset; for (i = 0; i < beio->num_segs; i++) { size_t cur_size; uint8_t *cur_ptr; cur_size = beio->sg_segs[i].len; cur_ptr = beio->sg_segs[i].addr; while (cur_size > 0) { /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); bio->bio_cmd = beio->bio_cmd; bio->bio_dev = dev; bio->bio_caller1 = beio; bio->bio_length = min(cur_size, max_iosize); bio->bio_offset = cur_offset; bio->bio_data = cur_ptr; bio->bio_done = ctl_be_block_biodone; bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; cur_offset += bio->bio_length; cur_ptr += bio->bio_length; cur_size -= bio->bio_length; TAILQ_INSERT_TAIL(&queue, bio, bio_queue); beio->num_bios_sent++; } } beio->send_complete = 1; binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); /* * Fire off all allocated requests! */ while ((bio = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bio, bio_queue); if (csw) csw->d_strategy(bio); else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) { struct diocgattr_arg arg; struct cdevsw *csw; struct cdev *dev; int error, ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (UINT64_MAX); strlcpy(arg.name, attrname, sizeof(arg.name)); arg.len = sizeof(arg.value.off); if (csw->d_ioctl) { error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); } else error = ENODEV; dev_relthread(dev, ref); if (error != 0) return (UINT64_MAX); return (arg.value.off); } static void ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; beio->io_len = lbalen->len * cbe_lun->blocksize; beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; beio->bio_cmd = BIO_FLUSH; beio->ds_trans_type = DEVSTAT_NO_DATA; DPRINTF("SYNC\n"); be_lun->lun_flush(be_lun, beio); } static void ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_config_write_done(io); return; } ctl_be_block_config_write(io); } static void ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_softc *softc = be_lun->softc; struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; uint64_t len_left, lba; uint32_t pb, pbo, adj; int i, seglen; uint8_t *buf, *end; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = ARGS(beio->io); if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; be_lun->unmap(be_lun, beio); return; } beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; DPRINTF("WRITE SAME at LBA %jx len %u\n", (uintmax_t)lbalen->lba, lbalen->len); pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; if (be_lun->cbe_lun.pblockoff > 0) pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; else pbo = 0; len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) { /* * Setup the S/G entry for this chunk. */ seglen = MIN(CTLBLK_MAX_SEG, len_left); if (pb > cbe_lun->blocksize) { adj = ((lbalen->lba + lba) * cbe_lun->blocksize + seglen - pbo) % pb; if (seglen > adj) seglen -= adj; else seglen -= seglen % cbe_lun->blocksize; } else seglen -= seglen % cbe_lun->blocksize; ctl_alloc_seg(softc, &beio->sg_segs[i], seglen); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); beio->num_segs++; len_left -= seglen; buf = beio->sg_segs[i].addr; end = buf + seglen; for (; buf < end; buf += cbe_lun->blocksize) { if (lbalen->flags & SWS_NDOB) { memset(buf, 0, cbe_lun->blocksize); } else { memcpy(buf, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lbalen->lba + lba, buf); lba++; } } beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = lba * cbe_lun->blocksize; /* We can not do all in one run. Correct and schedule rerun. */ if (len_left > 0) { lbalen->lba += lba; lbalen->len -= lba; beio->beio_cont = ctl_be_block_cw_done_ws; } be_lun->dispatch(be_lun, beio); } static void ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_ptr_len_flags *ptrlen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } beio->io_len = 0; beio->io_offset = -1; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; DPRINTF("UNMAP\n"); be_lun->unmap(be_lun, beio); } static void ctl_be_block_cr_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_read_done(io); } static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cr_done; PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: /* GET LBA STATUS */ beio->bio_cmd = -1; beio->ds_trans_type = DEVSTAT_NO_DATA; beio->ds_tag_type = DEVSTAT_TAG_ORDERED; beio->io_len = 0; if (be_lun->get_lba_status) be_lun->get_lba_status(be_lun, beio); else ctl_be_block_cr_done(beio); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } static void ctl_be_block_cw_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_write_done(io); } static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cw_done; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: ctl_be_block_cw_dispatch_sync(be_lun, io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_be_block_cw_dispatch_ws(be_lun, io); break; case UNMAP: ctl_be_block_cw_dispatch_unmap(be_lun, io); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); static void ctl_be_block_next(struct ctl_be_block_io *beio) { struct ctl_be_block_lun *be_lun; union ctl_io *io; io = beio->io; be_lun = beio->lun; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_data_submit_done(io); return; } io->io_hdr.status &= ~CTL_STATUS_MASK; io->io_hdr.status |= CTL_STATUS_NONE; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); } static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; struct ctl_lba_len_flags *lbalen; struct ctl_ptr_len_flags *bptrlen; uint64_t len_left, lbas; int i; softc = be_lun->softc; DPRINTF("entered\n"); lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_WRITE) { SDT_PROBE0(cbb, , write, start); } else { SDT_PROBE0(cbb, , read, start); } beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; bptrlen = PRIV(io); bptrlen->ptr = (void *)beio; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } if (lbalen->flags & CTL_LLF_WRITE) { beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; } else { beio->bio_cmd = BIO_READ; beio->ds_trans_type = DEVSTAT_READ; } DPRINTF("%s at LBA %jx len %u @%ju\n", (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); if (lbalen->flags & CTL_LLF_COMPARE) { beio->two_sglists = 1; lbas = CTLBLK_HALF_IO_SIZE; } else { lbas = CTLBLK_MAX_IO_SIZE; } lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; beio->io_len = lbas * cbe_lun->blocksize; bptrlen->len += lbas; for (i = 0, len_left = beio->io_len; len_left > 0; i++) { KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", i, CTLBLK_MAX_SEGS)); /* * Setup the S/G entry for this chunk. */ ctl_alloc_seg(softc, &beio->sg_segs[i], MIN(CTLBLK_MAX_SEG, len_left)); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); /* Set up second segment for compare operation. */ if (beio->two_sglists) { ctl_alloc_seg(softc, &beio->sg_segs[i + CTLBLK_HALF_SEGS], beio->sg_segs[i].len); } beio->num_segs++; len_left -= beio->sg_segs[i].len; } if (bptrlen->len < lbalen->len) beio->beio_cont = ctl_be_block_next; io->scsiio.be_move_done = ctl_be_block_move_done; /* For compare we have separate S/G lists for read and datamove. */ if (beio->two_sglists) io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; else io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; io->scsiio.kern_data_len = beio->io_len; io->scsiio.kern_sg_entries = beio->num_segs; io->scsiio.kern_data_ref = ctl_refcnt_beio; io->scsiio.kern_data_arg = beio; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; /* * For the read case, we need to read the data into our buffers and * then we can send it back to the user. For the write case, we * need to get the data from the user first. */ if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, alloc_done); be_lun->dispatch(be_lun, beio); } else { SDT_PROBE0(cbb, , write, alloc_done); -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } } static void ctl_be_block_worker(void *context, int pending) { struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; union ctl_io *io; struct ctl_be_block_io *beio; DPRINTF("entered\n"); /* * Fetch and process I/Os from all queues. If we detect LUN * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, * so make response maximally opaque to not confuse initiator. */ for (;;) { mtx_lock(&be_lun->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); if (io != NULL) { DPRINTF("datamove queue\n"); STAILQ_REMOVE_HEAD(&be_lun->datamove_queue, links); mtx_unlock(&be_lun->queue_lock); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_complete_beio(beio); continue; } be_lun->dispatch(be_lun, beio); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); if (io != NULL) { DPRINTF("config write queue\n"); STAILQ_REMOVE_HEAD(&be_lun->config_write_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_write_done(io); continue; } ctl_be_block_cw_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); if (io != NULL) { DPRINTF("config read queue\n"); STAILQ_REMOVE_HEAD(&be_lun->config_read_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_read_done(io); continue; } ctl_be_block_cr_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); if (io != NULL) { DPRINTF("input queue\n"); STAILQ_REMOVE_HEAD(&be_lun->input_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_data_submit_done(io); continue; } ctl_be_block_dispatch(be_lun, io); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ mtx_unlock(&be_lun->queue_lock); break; } } /* * Entry point from CTL to the backend for I/O. We queue everything to a * work thread, so this just puts the I/O on a queue and wakes up the * thread. */ static int ctl_be_block_submit(union ctl_io *io) { struct ctl_be_block_lun *be_lun; DPRINTF("entered\n"); be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); PRIV(io)->len = 0; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (CTL_RETVAL_COMPLETE); } static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_block_softc *softc = &backend_block_softc; int error; error = 0; switch (cmd) { case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: error = ctl_be_block_create(softc, lun_req); break; case CTL_LUNREQ_RM: error = ctl_be_block_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: error = ctl_be_block_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "invalid LUN request type %d", lun_req->reqtype); break; } break; } default: error = ENOTTY; break; } return (error); } static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_filedata *file_data; struct ctl_lun_create_params *params; const char *value; struct vattr vattr; off_t ps, pss, po, pos, us, uss, uo, uos; int error; cbe_lun = &be_lun->cbe_lun; file_data = &be_lun->backend.file; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_FILE; be_lun->dispatch = ctl_be_block_dispatch_file; be_lun->lun_flush = ctl_be_block_flush_file; be_lun->get_lba_status = ctl_be_block_gls_file; be_lun->getattr = ctl_be_block_getattr_file; be_lun->unmap = NULL; cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error != 0) { snprintf(req->error_str, sizeof(req->error_str), "error calling VOP_GETATTR() for file %s", be_lun->dev_path); return (error); } file_data->cred = crhold(curthread->td_ucred); if (params->lun_size_bytes != 0) be_lun->size_bytes = params->lun_size_bytes; else be_lun->size_bytes = vattr.va_size; /* * For files we can use any logical block size. Prefer 512 bytes * for compatibility reasons. If file's vattr.va_blocksize * (preferred I/O block size) is bigger and multiple to chosen * logical block size -- report it as physical block size. */ if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); us = ps = vattr.va_blocksize; uo = po = 0; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) ctl_expand_number(value, &ps); value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); if (value != NULL) ctl_expand_number(value, &us); value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } /* * Sanity check. The media size has to be at least one * sector long. */ if (be_lun->size_bytes < cbe_lun->blocksize) { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "file %s size %ju < block size %u", be_lun->dev_path, (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); } cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; return (error); } static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_lun_create_params *params; struct cdevsw *csw; struct cdev *dev; const char *value; int error, atomic, maxio, ref, unmap, tmp; off_t ps, pss, po, pos, us, uss, uo, uos, otmp; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_DEV; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (ENXIO); if (strcmp(csw->d_name, "zvol") == 0) { be_lun->dispatch = ctl_be_block_dispatch_zvol; be_lun->get_lba_status = ctl_be_block_gls_zvol; atomic = maxio = CTLBLK_MAX_IO_SIZE; } else { be_lun->dispatch = ctl_be_block_dispatch_dev; be_lun->get_lba_status = NULL; atomic = 0; maxio = dev->si_iosize_max; if (maxio <= 0) maxio = DFLTPHYS; if (maxio > CTLBLK_MAX_SEG) maxio = CTLBLK_MAX_SEG; } be_lun->lun_flush = ctl_be_block_flush_dev; be_lun->getattr = ctl_be_block_getattr_dev; be_lun->unmap = ctl_be_block_unmap_dev; if (!csw->d_ioctl) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "no d_ioctl for device %s!", be_lun->dev_path); return (ENODEV); } error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGSECTORSIZE ioctl " "on %s!", error, be_lun->dev_path); return (error); } /* * If the user has asked for a blocksize that is greater than the * backing device's blocksize, we can do it only if the blocksize * the user is asking for is an even multiple of the underlying * device's blocksize. */ if ((params->blocksize_bytes != 0) && (params->blocksize_bytes >= tmp)) { if (params->blocksize_bytes % tmp == 0) { cbe_lun->blocksize = params->blocksize_bytes; } else { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u is not an even " "multiple of backing device blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } } else if (params->blocksize_bytes != 0) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u < backing device " "blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = MAX(tmp, 2048); else cbe_lun->blocksize = tmp; error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGMEDIASIZE " " ioctl on %s!", error, be_lun->dev_path); return (error); } if (params->lun_size_bytes != 0) { if (params->lun_size_bytes > otmp) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested LUN size %ju > backing device " "size %ju", (uintmax_t)params->lun_size_bytes, (uintmax_t)otmp); return (EINVAL); } be_lun->size_bytes = params->lun_size_bytes; } else be_lun->size_bytes = otmp; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, curthread); if (error) ps = po = 0; else { error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, FREAD, curthread); if (error) po = 0; } us = ps; uo = po; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) ctl_expand_number(value, &ps); value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); if (value != NULL) ctl_expand_number(value, &us); value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } cbe_lun->atomicblock = atomic / cbe_lun->blocksize; cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { unmap = 1; } else { struct diocgattr_arg arg; strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); arg.len = sizeof(arg.value.i); error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); unmap = (error == 0) ? arg.value.i : 0; } value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); if (value != NULL) unmap = (strcmp(value, "on") == 0); if (unmap) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; else cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; dev_relthread(dev, ref); return (0); } static int ctl_be_block_close(struct ctl_be_block_lun *be_lun) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; int flags; if (be_lun->vn) { flags = FREAD; if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) flags |= FWRITE; (void)vn_close(be_lun->vn, flags, NOCRED, curthread); be_lun->vn = NULL; switch (be_lun->dev_type) { case CTL_BE_BLOCK_DEV: break; case CTL_BE_BLOCK_FILE: if (be_lun->backend.file.cred != NULL) { crfree(be_lun->backend.file.cred); be_lun->backend.file.cred = NULL; } break; case CTL_BE_BLOCK_NONE: break; default: panic("Unexpected backend type %d", be_lun->dev_type); break; } be_lun->dev_type = CTL_BE_BLOCK_NONE; } return (0); } static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct nameidata nd; const char *value; int error, flags; error = 0; if (rootvnode == NULL) { snprintf(req->error_str, sizeof(req->error_str), "Root filesystem is not mounted"); return (1); } pwd_ensure_dirs(); value = dnvlist_get_string(cbe_lun->options, "file", NULL); if (value == NULL) { snprintf(req->error_str, sizeof(req->error_str), "no file argument specified"); return (1); } free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = strdup(value, M_CTLBLK); flags = FREAD; value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); if (value != NULL) { if (strcmp(value, "on") != 0) flags |= FWRITE; } else if (cbe_lun->lun_type == T_DIRECT) flags |= FWRITE; again: NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); error = vn_open(&nd, &flags, 0, NULL); if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { flags &= ~FWRITE; goto again; } if (error) { /* * This is the only reasonable guess we can make as far as * path if the user doesn't give us a fully qualified path. * If they want to specify a file, they need to specify the * full path. */ if (be_lun->dev_path[0] != '/') { char *dev_name; asprintf(&dev_name, M_CTLBLK, "/dev/%s", be_lun->dev_path); free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = dev_name; goto again; } snprintf(req->error_str, sizeof(req->error_str), "error opening %s: %d", be_lun->dev_path, error); return (error); } if (flags & FWRITE) cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; else cbe_lun->flags |= CTL_LUN_FLAG_READONLY; NDFREE(&nd, NDF_ONLY_PNBUF); be_lun->vn = nd.ni_vp; /* We only support disks and files. */ if (vn_isdisk_error(be_lun->vn, &error)) { error = ctl_be_block_open_dev(be_lun, req); } else if (be_lun->vn->v_type == VREG) { error = ctl_be_block_open_file(be_lun, req); } else { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "%s is not a disk or plain file", be_lun->dev_path); } VOP_UNLOCK(be_lun->vn); if (error != 0) ctl_be_block_close(be_lun); cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (be_lun->dispatch != ctl_be_block_dispatch_dev) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; return (0); } static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_lun *be_lun; struct ctl_lun_create_params *params; char num_thread_str[16]; char tmpstr[32]; const char *value; int retval, num_threads; int tmp_num_threads; params = &req->reqdata.create; retval = 0; req->status = CTL_LUN_OK; be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; be_lun->params = req->reqdata.create; be_lun->softc = softc; STAILQ_INIT(&be_lun->input_queue); STAILQ_INIT(&be_lun->config_read_queue); STAILQ_INIT(&be_lun->config_write_queue); STAILQ_INIT(&be_lun->datamove_queue); mtx_init(&be_lun->io_lock, "ctlblock io", NULL, MTX_DEF); mtx_init(&be_lun->queue_lock, "ctlblock queue", NULL, MTX_DEF); cbe_lun->options = nvlist_clone(req->args_nvl); if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = 0; cbe_lun->flags = 0; value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { be_lun->size_bytes = params->lun_size_bytes; if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { retval = ctl_be_block_open(be_lun, req); if (retval != 0) { retval = 0; req->status = CTL_LUN_WARNING; } } num_threads = cbb_num_threads; } else { num_threads = 1; } value = dnvlist_get_string(cbe_lun->options, "num_threads", NULL); if (value != NULL) { tmp_num_threads = strtol(value, NULL, 0); /* * We don't let the user specify less than one * thread, but hope he's clueful enough not to * specify 1000 threads. */ if (tmp_num_threads < 1) { snprintf(req->error_str, sizeof(req->error_str), "invalid number of threads %s", num_thread_str); goto bailout_error; } num_threads = tmp_num_threads; } if (be_lun->vn == NULL) cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; /* Tell the user the blocksize we ended up using */ params->lun_size_bytes = be_lun->size_bytes; params->blocksize_bytes = cbe_lun->blocksize; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; cbe_lun->be = &ctl_be_block_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); be_lun->io_taskqueue = taskqueue_create("ctlblocktq", M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "unable to create taskqueue"); goto bailout_error; } /* * Note that we start the same number of threads by default for * both the file case and the block device case. For the file * case, we need multiple threads to allow concurrency, because the * vnode interface is designed to be a blocking interface. For the * block device case, ZFS zvols at least will block the caller's * context in many instances, and so we need multiple threads to * overcome that problem. Other block devices don't need as many * threads, but they shouldn't cause too many problems. * * If the user wants to just have a single thread for a block * device, he can specify that when the LUN is created, or change * the tunable/sysctl to alter the default number of threads. */ retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, /*num threads*/num_threads, /*priority*/PUSER, /*proc*/control_softc->ctl_proc, /*thread name*/"block"); if (retval != 0) goto bailout_error; be_lun->num_threads = num_threads; retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "ctl_add_lun() returned error %d, see dmesg for " "details", retval); retval = 0; goto bailout_error; } be_lun->disk_stats = devstat_new_entry("cbb", cbe_lun->lun_id, cbe_lun->blocksize, DEVSTAT_ALL_SUPPORTED, cbe_lun->lun_type | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); mtx_lock(&softc->lock); softc->num_luns++; SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); params->req_lun_id = cbe_lun->lun_id; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); ctl_be_block_close(be_lun); if (be_lun->dev_path != NULL) free(be_lun->dev_path, M_CTLBLK); nvlist_destroy(cbe_lun->options); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); free(be_lun, M_CTLBLK); return (retval); } static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_rm_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; params = &req->reqdata.rm; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) { SLIST_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); softc->num_luns--; break; } } mtx_unlock(&softc->lock); sx_xunlock(&softc->modify_lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); ctl_be_block_close(be_lun); } mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_remove_lun(cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "error %d returned from ctl_remove_lun() for " "LUN %d", retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblockrm", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; if (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { mtx_unlock(&softc->lock); free(be_lun, M_CTLBLK); } else { mtx_unlock(&softc->lock); return (EINTR); } req->status = CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_modify_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; const char *value; uint64_t oldsize; int error, wasprim; params = &req->reqdata.modify; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; if (req->args_nvl != NULL) { nvlist_destroy(cbe_lun->options); cbe_lun->options = nvlist_clone(req->args_nvl); } wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } oldsize = be_lun->size_blocks; if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { if (be_lun->vn == NULL) error = ctl_be_block_open(be_lun, req); else if (vn_isdisk_error(be_lun->vn, &error)) error = ctl_be_block_open_dev(be_lun, req); else if (be_lun->vn->v_type == VREG) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = ctl_be_block_open_file(be_lun, req); VOP_UNLOCK(be_lun->vn); } else error = EINVAL; if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && be_lun->vn != NULL) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && be_lun->vn == NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; } else { if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); error = ctl_be_block_close(be_lun); } else error = 0; } if (be_lun->size_blocks != oldsize) ctl_lun_capacity_changed(cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; sx_xunlock(&softc->modify_lock); req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; return (0); bailout_error: sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_ERROR; return (0); } static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun) { struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)cbe_lun; struct ctl_be_block_softc *softc = be_lun->softc; taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); if (be_lun->disk_stats != NULL) devstat_remove_entry(be_lun->disk_stats); nvlist_destroy(be_lun->cbe_lun.options); free(be_lun->dev_path, M_CTLBLK); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; if (be_lun->flags & CTL_BE_BLOCK_LUN_WAITING) wakeup(be_lun); else free(be_lun, M_CTLBLK); mtx_unlock(&softc->lock); } static int ctl_be_block_config_write(union ctl_io *io) { struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; DPRINTF("entered\n"); cbe_lun = CTL_BACKEND_LUN(io); be_lun = (struct ctl_be_block_lun *)cbe_lun; retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: case WRITE_SAME_10: case WRITE_SAME_16: case UNMAP: /* * The upper level CTL code will filter out any CDBs with * the immediate bit set and return the proper error. * * We don't really need to worry about what LBA range the * user asked to be synced out. When they issue a sync * cache command, we'll sync out the whole thing. */ mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; struct ctl_lun_req req; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { retval = ctl_be_block_open(be_lun, &req); cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; if (retval == 0) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } } ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; ctl_lun_ejected(cbe_lun); if (be_lun->vn != NULL) ctl_be_block_close(be_lun); } } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_config_read(union ctl_io *io) { struct ctl_be_block_lun *be_lun; int retval = 0; DPRINTF("entered\n"); be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_read_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); retval = CTL_RETVAL_QUEUED; break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb) { struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; int retval; retval = sbuf_printf(sb, "\t"); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "%d", lun->num_threads); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "\n"); bailout: return (retval); } static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) { struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; if (lun->getattr == NULL) return (UINT64_MAX); return (lun->getattr(lun, attrname)); } static int ctl_be_block_init(void) { struct ctl_be_block_softc *softc = &backend_block_softc; sx_init(&softc->modify_lock, "ctlblock modify"); mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); SLIST_INIT(&softc->lun_list); return (0); } static int ctl_be_block_shutdown(void) { struct ctl_be_block_softc *softc = &backend_block_softc; struct ctl_be_block_lun *lun; mtx_lock(&softc->lock); while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { SLIST_REMOVE_HEAD(&softc->lun_list, links); softc->num_luns--; /* * Drop our lock here. Since ctl_remove_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_remove_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); uma_zdestroy(softc->bufmin_zone); if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) uma_zdestroy(softc->bufmax_zone); uma_zdestroy(softc->beio_zone); mtx_destroy(&softc->lock); sx_destroy(&softc->modify_lock); return (0); } diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c index 2595aa0be00e..e67d699bda70 100644 --- a/sys/cam/ctl/ctl_backend_ramdisk.c +++ b/sys/cam/ctl/ctl_backend_ramdisk.c @@ -1,1267 +1,1239 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003, 2008 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ */ /* * CAM Target Layer black hole and RAM disk backend. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) #define PPP (PAGE_SIZE / sizeof(uint8_t **)) #ifdef __LP64__ #define PPPS (PAGE_SHIFT - 3) #else #define PPPS (PAGE_SHIFT - 2) #endif #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) #define P_UNMAPPED NULL /* Page is unmapped. */ #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ typedef enum { GP_READ, /* Return data page or zero page. */ GP_WRITE, /* Return data page, try allocate if none. */ GP_ANCHOR, /* Return data page, try anchor if none. */ GP_OTHER, /* Return what present, do not allocate/anchor. */ } getpage_op_t; typedef enum { CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, CTL_BE_RAMDISK_LUN_WAITING = 0x04 } ctl_be_ramdisk_lun_flags; struct ctl_be_ramdisk_lun { struct ctl_be_lun cbe_lun; /* Must be first element. */ struct ctl_lun_create_params params; int indir; uint8_t **pages; uint8_t *zero_page; struct sx page_lock; u_int pblocksize; u_int pblockmul; uint64_t size_bytes; uint64_t size_blocks; uint64_t cap_bytes; uint64_t cap_used; struct ctl_be_ramdisk_softc *softc; ctl_be_ramdisk_lun_flags flags; SLIST_ENTRY(ctl_be_ramdisk_lun) links; struct taskqueue *io_taskqueue; struct task io_task; STAILQ_HEAD(, ctl_io_hdr) cont_queue; struct mtx_padalign queue_lock; }; struct ctl_be_ramdisk_softc { struct sx modify_lock; struct mtx lock; int num_luns; SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list; }; static struct ctl_be_ramdisk_softc rd_softc; extern struct ctl_softc *control_softc; static int ctl_backend_ramdisk_init(void); static int ctl_backend_ramdisk_shutdown(void); -static int ctl_backend_ramdisk_move_done(union ctl_io *io); +static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr); static void ctl_backend_ramdisk_compare(union ctl_io *io); static void ctl_backend_ramdisk_rw(union ctl_io *io); static int ctl_backend_ramdisk_submit(union ctl_io *io); static void ctl_backend_ramdisk_worker(void *context, int pending); static int ctl_backend_ramdisk_config_read(union ctl_io *io); static int ctl_backend_ramdisk_config_write(union ctl_io *io); static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun); static struct ctl_backend_driver ctl_be_ramdisk_driver = { .name = "ramdisk", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_backend_ramdisk_init, .shutdown = ctl_backend_ramdisk_shutdown, .data_submit = ctl_backend_ramdisk_submit, - .data_move_done = ctl_backend_ramdisk_move_done, .config_read = ctl_backend_ramdisk_config_read, .config_write = ctl_backend_ramdisk_config_write, .ioctl = ctl_backend_ramdisk_ioctl, .lun_attr = ctl_backend_ramdisk_lun_attr, }; MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk"); CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); static int ctl_backend_ramdisk_init(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; memset(softc, 0, sizeof(*softc)); sx_init(&softc->modify_lock, "ctlrammod"); mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF); SLIST_INIT(&softc->lun_list); return (0); } static int ctl_backend_ramdisk_shutdown(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_be_ramdisk_lun *lun; mtx_lock(&softc->lock); while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { SLIST_REMOVE_HEAD(&softc->lun_list, links); softc->num_luns--; /* * Drop our lock here. Since ctl_remove_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_remove_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); sx_destroy(&softc->modify_lock); return (0); } static uint8_t * ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, getpage_op_t op) { uint8_t **p, ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) { switch (op) { case GP_READ: return (be_lun->zero_page); case GP_WRITE: return ((uint8_t *)be_lun->pages); case GP_ANCHOR: return (P_ANCHORED); default: return (P_UNMAPPED); } } if (op == GP_WRITE || op == GP_ANCHOR) { sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) { *pp = malloc(PAGE_SIZE, M_RAMDISK, M_WAITOK|M_ZERO); } i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { if (op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } else *pp = P_ANCHORED; be_lun->cap_used += be_lun->pblocksize; } else if (*pp == P_ANCHORED && op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } sx_xunlock(&be_lun->page_lock); return ((uint8_t *)*pp); } else { sx_slock(&be_lun->page_lock); p = be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (p == NULL) break; i = pn >> s; p = (uint8_t **)p[i]; pn -= i << s; } sx_sunlock(&be_lun->page_lock); if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) return (be_lun->zero_page); return ((uint8_t *)p); } }; static void ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_ANCHORED) { be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } else if (*pp != P_UNMAPPED) { free(*pp, M_RAMDISK); be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { be_lun->cap_used += be_lun->pblocksize; *pp = P_ANCHORED; } else if (*pp != P_ANCHORED) { free(*pp, M_RAMDISK); *pp = P_ANCHORED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) { int i; if (p == NULL) return; if (indir == 0) { free(p, M_RAMDISK); return; } for (i = 0; i < PPP; i++) { if (p[i] == NULL) continue; ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); } free(p, M_RAMDISK); }; static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static int ctl_backend_ramdisk_cmp(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint8_t *page; uint8_t info[8]; uint64_t lba; u_int lbaoff, lbas, res, off; lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; lba = ARGS(io)->lba + PRIV(io)->len - lbas; off = 0; for (; lbas > 0; lbas--, lba++) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_READ); lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; res = cmp(io->scsiio.kern_data_ptr + off, page, cbe_lun->blocksize); off += res; if (res < cbe_lun->blocksize) break; } if (lbas > 0) { off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); return (1); } return (0); } static int -ctl_backend_ramdisk_move_done(union ctl_io *io) +ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io); -#ifdef CTL_TIME_IO - struct bintime cur_bt; -#endif CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); -#ifdef CTL_TIME_IO - getbinuptime(&cur_bt); - bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); - bintime_add(&io->io_hdr.dma_bt, &cur_bt); -#endif - io->io_hdr.num_dmas++; if (io->scsiio.kern_sg_entries > 0) free(io->scsiio.kern_data_ptr, M_RAMDISK); io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; - if (io->io_hdr.flags & CTL_FLAG_ABORT) { - ; - } else if (io->io_hdr.port_status != 0 && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, - /*retry_count*/ io->io_hdr.port_status); - } else if (io->scsiio.kern_data_resid != 0 && - (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || - (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { - ctl_set_invalid_field_ciu(&io->scsiio); - } else if ((io->io_hdr.port_status == 0) && - ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { + if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && + (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { if (ARGS(io)->flags & CTL_LLF_COMPARE) { /* We have data block ready for comparison. */ if (ctl_backend_ramdisk_cmp(io)) goto done; } if (ARGS(io)->len > PRIV(io)->len) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->cont_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (0); } ctl_set_success(&io->scsiio); } done: ctl_data_submit_done(io); return(0); } static void ctl_backend_ramdisk_compare(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); u_int lbas, len; lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, 131072 / cbe_lun->blocksize); len = lbas * cbe_lun->blocksize; io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); io->scsiio.kern_data_len = len; io->scsiio.kern_sg_entries = 0; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } static void ctl_backend_ramdisk_rw(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_sg_entry *sg_entries; uint8_t *page; uint64_t lba; u_int i, len, lbaoff, lbas, sgs, off; getpage_op_t op; lba = ARGS(io)->lba + PRIV(io)->len; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; off = lbaoff * cbe_lun->blocksize; op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; if (sgs > 1) { io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * sgs, M_RAMDISK, M_WAITOK); sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; len = lbas * cbe_lun->blocksize; for (i = 0; i < sgs; i++) { page = ctl_backend_ramdisk_getpage(be_lun, (lba >> cbe_lun->pblockexp) + i, op); if (page == P_UNMAPPED || page == P_ANCHORED) { free(io->scsiio.kern_data_ptr, M_RAMDISK); nospc: ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } sg_entries[i].addr = page + off; sg_entries[i].len = MIN(len, be_lun->pblocksize - off); len -= sg_entries[i].len; off = 0; } } else { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, op); if (page == P_UNMAPPED || page == P_ANCHORED) goto nospc; sgs = 0; io->scsiio.kern_data_ptr = page + off; } io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; io->scsiio.kern_sg_entries = sgs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; if ((ARGS(io)->flags & CTL_LLF_READ) && ARGS(io)->len <= PRIV(io)->len) { ctl_set_success(&io->scsiio); ctl_serseq_done(io); } -#ifdef CTL_TIME_IO - getbinuptime(&io->io_hdr.dma_start_bt); -#endif ctl_datamove(io); } static int ctl_backend_ramdisk_submit(union ctl_io *io) { struct ctl_lba_len_flags *lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_VERIFY) { ctl_set_success(&io->scsiio); ctl_data_submit_done(io); return (CTL_RETVAL_COMPLETE); } PRIV(io)->len = 0; if (lbalen->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); return (CTL_RETVAL_COMPLETE); } static void ctl_backend_ramdisk_worker(void *context, int pending) { struct ctl_be_ramdisk_lun *be_lun; union ctl_io *io; be_lun = (struct ctl_be_ramdisk_lun *)context; mtx_lock(&be_lun->queue_lock); for (;;) { io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links); mtx_unlock(&be_lun->queue_lock); if (ARGS(io)->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); mtx_lock(&be_lun->queue_lock); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ break; } mtx_unlock(&be_lun->queue_lock); } static int ctl_backend_ramdisk_gls(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct scsi_get_lba_status_data *data; uint8_t *page; u_int lbaoff; data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); page = ctl_backend_ramdisk_getpage(be_lun, ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); if (page == P_UNMAPPED) data->descr[0].status = 1; else if (page == P_ANCHORED) data->descr[0].status = 2; else data->descr[0].status = 0; ctl_config_read_done(io); return (CTL_RETVAL_COMPLETE); } static int ctl_backend_ramdisk_config_read(union ctl_io *io) { int retval = 0; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { retval = ctl_backend_ramdisk_gls(io); break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static void ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, int anchor) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint8_t *page; uint64_t p, lp; u_int lbaoff; getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; /* Partially zero first partial page. */ p = lba >> cbe_lun->pblockexp; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); if (lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, p, op); if (page != P_UNMAPPED && page != P_ANCHORED) { memset(page + lbaoff * cbe_lun->blocksize, 0, min(len, be_lun->pblockmul - lbaoff) * cbe_lun->blocksize); } p++; } /* Partially zero last partial page. */ lp = (lba + len) >> cbe_lun->pblockexp; lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); if (p <= lp && lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, lp, op); if (page != P_UNMAPPED && page != P_ANCHORED) memset(page, 0, lbaoff * cbe_lun->blocksize); } /* Delete remaining full pages. */ if (anchor) { for (; p < lp; p++) ctl_backend_ramdisk_anchorpage(be_lun, p); } else { for (; p < lp; p++) ctl_backend_ramdisk_unmappage(be_lun, p); } } static void ctl_backend_ramdisk_ws(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_lba_len_flags *lbalen = ARGS(io); uint8_t *page; uint64_t lba; u_int lbaoff, lbas; if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & SWS_UNMAP) { ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, (lbalen->flags & SWS_ANCHOR) != 0); ctl_set_success(&io->scsiio); ctl_config_write_done(io); return; } for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_WRITE); if (page == P_UNMAPPED || page == P_ANCHORED) { ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; if (lbalen->flags & SWS_NDOB) { memset(page, 0, cbe_lun->blocksize); } else { memcpy(page, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lba, page); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static void ctl_backend_ramdisk_unmap(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); struct scsi_unmap_desc *buf, *end; if ((ptrlen->flags & ~SU_ANCHOR) != 0) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { ctl_backend_ramdisk_delete(cbe_lun, scsi_8btou64(buf->lba), scsi_4btoul(buf->length), (ptrlen->flags & SU_ANCHOR) != 0); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static int ctl_backend_ramdisk_config_write(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); int retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: /* We have no cache to flush. */ ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if (cdb->how & SSS_LOEJ) ctl_lun_has_media(cbe_lun); ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) ctl_lun_ejected(cbe_lun); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_backend_ramdisk_ws(io); break; case UNMAP: ctl_backend_ramdisk_unmap(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint64_t val; val = UINT64_MAX; if (be_lun->cap_bytes == 0) return (val); sx_slock(&be_lun->page_lock); if (strcmp(attrname, "blocksused") == 0) { val = be_lun->cap_used / be_lun->cbe_lun.blocksize; } else if (strcmp(attrname, "blocksavail") == 0) { val = (be_lun->cap_bytes - be_lun->cap_used) / be_lun->cbe_lun.blocksize; } sx_sunlock(&be_lun->page_lock); return (val); } static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_lun_req *lun_req; int retval; retval = 0; switch (cmd) { case CTL_LUN_REQ: lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: retval = ctl_backend_ramdisk_create(softc, lun_req); break; case CTL_LUNREQ_RM: retval = ctl_backend_ramdisk_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: retval = ctl_backend_ramdisk_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "%s: invalid LUN request type %d", __func__, lun_req->reqtype); break; } break; default: retval = ENOTTY; break; } return (retval); } static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_lun_rm_params *params; int retval; params = &req->reqdata.rm; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) { SLIST_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; break; } } mtx_unlock(&softc->lock); sx_xunlock(&softc->modify_lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } /* * Set the waiting flag before we invalidate the LUN. Our shutdown * routine can be called any time after we invalidate the LUN, * and can be called from our context. * * This tells the shutdown routine that we're waiting, or we're * going to wait for the shutdown to happen. */ mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_remove_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_remove_lun() for " "LUN %d", __func__, retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { mtx_unlock(&softc->lock); free(be_lun, M_RAMDISK); } else { mtx_unlock(&softc->lock); return (EINTR); } req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_create_params *params; const char *value; char tmpstr[32]; uint64_t t; int retval; retval = 0; params = &req->reqdata.create; be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; cbe_lun->options = nvlist_clone(req->args_nvl); be_lun->params = req->reqdata.create; be_lun->softc = softc; if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = 0; cbe_lun->flags = 0; value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; be_lun->pblocksize = PAGE_SIZE; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) { ctl_expand_number(value, &t); be_lun->pblocksize = t; } if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { snprintf(req->error_str, sizeof(req->error_str), "%s: unsupported pblocksize %u", __func__, be_lun->pblocksize); goto bailout_error; } if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { snprintf(req->error_str, sizeof(req->error_str), "%s: pblocksize %u not exp2 of blocksize %u", __func__, be_lun->pblocksize, cbe_lun->blocksize); goto bailout_error; } if (params->lun_size_bytes < cbe_lun->blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, params->lun_size_bytes, cbe_lun->blocksize); goto bailout_error; } be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; be_lun->indir = 0; t = be_lun->size_bytes / be_lun->pblocksize; while (t > 1) { t /= PPP; be_lun->indir++; } cbe_lun->maxlba = be_lun->size_blocks - 1; cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; cbe_lun->pblockoff = 0; cbe_lun->ublockexp = cbe_lun->pblockexp; cbe_lun->ublockoff = 0; cbe_lun->atomicblock = be_lun->pblocksize; cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; value = dnvlist_get_string(cbe_lun->options, "capacity", NULL); if (value != NULL) ctl_expand_number(value, &be_lun->cap_bytes); } else { be_lun->pblockmul = 1; cbe_lun->pblockexp = 0; } /* Tell the user the blocksize we ended up using */ params->blocksize_bytes = cbe_lun->blocksize; params->lun_size_bytes = be_lun->size_bytes; value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); if (value == NULL || strcmp(value, "off") != 0) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); if (value != NULL) { if (strcmp(value, "on") == 0) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; } else if (cbe_lun->lun_type != T_DIRECT) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; cbe_lun->be = &ctl_be_ramdisk_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } STAILQ_INIT(&be_lun->cont_queue); sx_init(&be_lun->page_lock, "ctlram page"); if (be_lun->cap_bytes == 0) { be_lun->indir = 0; be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); } be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF); TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, be_lun); be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: Unable to create taskqueue", __func__); goto bailout_error; } retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, /*num threads*/1, /*priority*/PUSER, /*proc*/control_softc->ctl_proc, /*thread name*/"ramdisk"); if (retval != 0) goto bailout_error; retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: ctl_add_lun() returned error %d, see dmesg for " "details", __func__, retval); retval = 0; goto bailout_error; } mtx_lock(&softc->lock); softc->num_luns++; SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); params->req_lun_id = cbe_lun->lun_id; req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun != NULL) { if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); nvlist_destroy(cbe_lun->options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } return (retval); } static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_modify_params *params; const char *value; uint32_t blocksize; int wasprim; params = &req->reqdata.modify; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; if (req->args_nvl != NULL) { nvlist_destroy(cbe_lun->options); cbe_lun->options = nvlist_clone(req->args_nvl); } wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } blocksize = be_lun->cbe_lun.blocksize; if (be_lun->params.lun_size_bytes < blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, be_lun->params.lun_size_bytes, blocksize); goto bailout_error; } be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; be_lun->size_bytes = be_lun->size_blocks * blocksize; be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; ctl_lun_capacity_changed(&be_lun->cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_OK; return (0); bailout_error: sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_ERROR; return (0); } static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_be_ramdisk_softc *softc = be_lun->softc; taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); nvlist_destroy(be_lun->cbe_lun.options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING) wakeup(be_lun); else free(be_lun, M_RAMDISK); mtx_unlock(&softc->lock); } diff --git a/sys/cam/ctl/ctl_frontend_cam_sim.c b/sys/cam/ctl/ctl_frontend_cam_sim.c index fdcccee2f569..0e61a80e452c 100644 --- a/sys/cam/ctl/ctl_frontend_cam_sim.c +++ b/sys/cam/ctl/ctl_frontend_cam_sim.c @@ -1,791 +1,791 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend_cam_sim.c#4 $ */ /* * CTL frontend to CAM SIM interface. This allows access to CTL LUNs via * the da(4) and pass(4) drivers from inside the system. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define io_ptr spriv_ptr1 struct cfcs_io { union ccb *ccb; }; struct cfcs_softc { struct ctl_port port; char port_name[32]; struct cam_sim *sim; struct cam_devq *devq; struct cam_path *path; uint64_t wwnn; uint64_t wwpn; uint32_t cur_tag_num; int online; }; /* * We can't handle CCBs with these flags. For the most part, we just don't * handle physical addresses yet. That would require mapping things in * order to do the copy. */ #define CFCS_BAD_CCB_FLAGS (CAM_DATA_ISPHYS | CAM_CDB_PHYS | CAM_SENSE_PTR | \ CAM_SENSE_PHYS) static int cfcs_init(void); static int cfcs_shutdown(void); static void cfcs_poll(struct cam_sim *sim); static void cfcs_online(void *arg); static void cfcs_offline(void *arg); static void cfcs_datamove(union ctl_io *io); static void cfcs_done(union ctl_io *io); void cfcs_action(struct cam_sim *sim, union ccb *ccb); struct cfcs_softc cfcs_softc; /* * This is primarily intended to allow for error injection to test the CAM * sense data and sense residual handling code. This sets the maximum * amount of SCSI sense data that we will report to CAM. */ static int cfcs_max_sense = sizeof(struct scsi_sense_data); SYSCTL_NODE(_kern_cam, OID_AUTO, ctl2cam, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer SIM frontend"); SYSCTL_INT(_kern_cam_ctl2cam, OID_AUTO, max_sense, CTLFLAG_RW, &cfcs_max_sense, 0, "Maximum sense data size"); static struct ctl_frontend cfcs_frontend = { .name = "camsim", .init = cfcs_init, .shutdown = cfcs_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfcs, cfcs_frontend); static int cfcs_init(void) { struct cfcs_softc *softc; struct ctl_port *port; int retval; softc = &cfcs_softc; bzero(softc, sizeof(*softc)); port = &softc->port; port->frontend = &cfcs_frontend; port->port_type = CTL_PORT_INTERNAL; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; snprintf(softc->port_name, sizeof(softc->port_name), "camsim"); port->port_name = softc->port_name; port->port_online = cfcs_online; port->port_offline = cfcs_offline; port->onoff_arg = softc; port->fe_datamove = cfcs_datamove; port->fe_done = cfcs_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with error %d!\n", __func__, retval); return (retval); } /* * If the CTL frontend didn't tell us what our WWNN/WWPN is, go * ahead and set something random. */ if (port->wwnn == 0) { uint64_t random_bits; arc4rand(&random_bits, sizeof(random_bits), 0); softc->wwnn = (random_bits & 0x0000000fffffff00ULL) | /* Company ID */ 0x5000000000000000ULL | /* NL-Port */ 0x0300; softc->wwpn = softc->wwnn + port->targ_port + 1; ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn); } else { softc->wwnn = port->wwnn; softc->wwpn = port->wwpn; } softc->devq = cam_simq_alloc(port->num_requested_ctl_io); if (softc->devq == NULL) { printf("%s: error allocating devq\n", __func__); retval = ENOMEM; goto bailout; } softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name, softc, /*unit*/ 0, NULL, 1, port->num_requested_ctl_io, softc->devq); if (softc->sim == NULL) { printf("%s: error allocating SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) { printf("%s: error registering SIM\n", __func__); retval = ENOMEM; goto bailout; } if (xpt_create_path(&softc->path, /*periph*/NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: error creating path\n", __func__); xpt_bus_deregister(cam_sim_path(softc->sim)); retval = EINVAL; goto bailout; } return (retval); bailout: if (softc->sim) cam_sim_free(softc->sim, /*free_devq*/ TRUE); else if (softc->devq) cam_simq_free(softc->devq); return (retval); } static int cfcs_shutdown(void) { struct cfcs_softc *softc = &cfcs_softc; struct ctl_port *port = &softc->port; int error; ctl_port_offline(port); xpt_free_path(softc->path); xpt_bus_deregister(cam_sim_path(softc->sim)); cam_sim_free(softc->sim, /*free_devq*/ TRUE); if ((error = ctl_port_deregister(port)) != 0) printf("%s: cam_sim port deregistration failed\n", __func__); return (error); } static void cfcs_poll(struct cam_sim *sim) { } static void cfcs_onoffline(void *arg, int online) { struct cfcs_softc *softc = (struct cfcs_softc *)arg; union ccb *ccb; softc->online = online; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { printf("%s: unable to allocate CCB for rescan\n", __func__); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softc->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { printf("%s: can't allocate path for rescan\n", __func__); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void cfcs_online(void *arg) { cfcs_onoffline(arg, /*online*/ 1); } static void cfcs_offline(void *arg) { cfcs_onoffline(arg, /*online*/ 0); } /* * This function is very similar to ctl_ioctl_do_datamove(). Is there a * way to combine the functionality? * * XXX KDM may need to move this into a thread. We're doing a bcopy in the * caller's context, which will usually be the backend. That may not be a * good thing. */ static void cfcs_datamove(union ctl_io *io) { union ccb *ccb; bus_dma_segment_t cam_sg_entry, *cam_sglist; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; int cam_sg_count, ctl_sg_count, cam_sg_start; int cam_sg_offset; int len_to_copy; int ctl_watermark, cam_watermark; int i, j; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; /* * Note that we have a check in cfcs_action() to make sure that any * CCBs with "bad" flags are returned with CAM_REQ_INVALID. This * is just to make sure no one removes that check without updating * this code to provide the additional functionality necessary to * support those modes of operation. */ KASSERT(((ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) == 0), ("invalid " "CAM flags %#x", (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS))); /* * Simplify things on both sides by putting single buffers into a * single entry S/G list. */ switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { case CAM_DATA_SG: { int len_seen; cam_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr; cam_sg_count = ccb->csio.sglist_cnt; cam_sg_start = cam_sg_count; cam_sg_offset = 0; for (i = 0, len_seen = 0; i < cam_sg_count; i++) { if ((len_seen + cam_sglist[i].ds_len) >= io->scsiio.kern_rel_offset) { cam_sg_start = i; cam_sg_offset = io->scsiio.kern_rel_offset - len_seen; break; } len_seen += cam_sglist[i].ds_len; } break; } case CAM_DATA_VADDR: cam_sglist = &cam_sg_entry; cam_sglist[0].ds_len = ccb->csio.dxfer_len; cam_sglist[0].ds_addr = (bus_addr_t)(uintptr_t)ccb->csio.data_ptr; cam_sg_count = 1; cam_sg_start = 0; cam_sg_offset = io->scsiio.kern_rel_offset; break; default: panic("Invalid CAM flags %#x", ccb->ccb_h.flags); } if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } ctl_watermark = 0; cam_watermark = cam_sg_offset; for (i = cam_sg_start, j = 0; i < cam_sg_count && j < ctl_sg_count;) { uint8_t *cam_ptr, *ctl_ptr; len_to_copy = MIN(cam_sglist[i].ds_len - cam_watermark, ctl_sglist[j].len - ctl_watermark); cam_ptr = (uint8_t *)(uintptr_t)cam_sglist[i].ds_addr; cam_ptr = cam_ptr + cam_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else ctl_ptr = (uint8_t *)ctl_sglist[j].addr; ctl_ptr = ctl_ptr + ctl_watermark; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", ctl_ptr, __func__, cam_ptr)); bcopy(ctl_ptr, cam_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from CAM\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", cam_ptr, __func__, ctl_ptr)); bcopy(cam_ptr, ctl_ptr, len_to_copy); } io->scsiio.ext_data_filled += len_to_copy; io->scsiio.kern_data_resid -= len_to_copy; cam_watermark += len_to_copy; if (cam_sglist[i].ds_len == cam_watermark) { i++; cam_watermark = 0; } ctl_watermark += len_to_copy; if (ctl_sglist[j].len == ctl_watermark) { j++; ctl_watermark = 0; } } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = NULL; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_CMP; xpt_done(ccb); } - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); } static void cfcs_done(union ctl_io *io) { union ccb *ccb; ccb = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; if (ccb == NULL) { ctl_free_io(io); return; } /* * At this point we should have status. If we don't, that's a bug. */ KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); /* * Translate CTL status to CAM status. */ if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->csio.resid = ccb->csio.dxfer_len - io->scsiio.ext_data_filled; } ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (io->io_hdr.status & CTL_STATUS_MASK) { case CTL_SUCCESS: ccb->ccb_h.status |= CAM_REQ_CMP; break; case CTL_SCSI_ERROR: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; ccb->csio.scsi_status = io->scsiio.scsi_status; bcopy(&io->scsiio.sense_data, &ccb->csio.sense_data, min(io->scsiio.sense_len, ccb->csio.sense_len)); if (ccb->csio.sense_len > io->scsiio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - io->scsiio.sense_len; else ccb->csio.sense_resid = 0; if ((ccb->csio.sense_len - ccb->csio.sense_resid) > cfcs_max_sense) { ccb->csio.sense_resid = ccb->csio.sense_len - cfcs_max_sense; } break; case CTL_CMD_ABORTED: ccb->ccb_h.status |= CAM_REQ_ABORTED; break; case CTL_ERROR: default: ccb->ccb_h.status |= CAM_REQ_CMP_ERR; break; } ctl_free_io(io); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } xpt_done(ccb); } void cfcs_action(struct cam_sim *sim, union ccb *ccb) { struct cfcs_softc *softc; int err; softc = (struct cfcs_softc *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: { union ctl_io *io; struct ccb_scsiio *csio; csio = &ccb->csio; /* * Catch CCB flags, like physical address flags, that * indicate situations we currently can't handle. */ if (ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS) { ccb->ccb_h.status = CAM_REQ_INVALID; printf("%s: bad CCB flags %#x (all flags %#x)\n", __func__, ccb->ccb_h.flags & CFCS_BAD_CCB_FLAGS, ccb->ccb_h.flags); xpt_done(ccb); return; } /* * If we aren't online, there are no devices to see. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { printf("%s: can't allocate ctl_io\n", __func__); ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down via the XPT_RESET_BUS/LUN CCBs below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); io->scsiio.priority = csio->priority; /* * This tag scheme isn't the best, since we could in theory * have a very long-lived I/O and tag collision, especially * in a high I/O environment. But it should work well * enough for now. Since we're using unsigned ints, * they'll just wrap around. */ io->scsiio.tag_num = atomic_fetchadd_32(&softc->cur_tag_num, 1); csio->tag_id = io->scsiio.tag_num; switch (csio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, csio->tag_action); break; } if (csio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, csio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(csio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(scsiio_cdb_ptr(csio), io->scsiio.cdb, io->scsiio.cdb_len); ccb->ccb_h.status |= CAM_SIM_QUEUED; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s: func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return; } break; } case XPT_ABORT: { union ctl_io *io; union ccb *abort_ccb; abort_ccb = ccb->cab.abort_ccb; if (abort_ccb->ccb_h.func_code != XPT_SCSI_IO) { ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); } /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = abort_ccb->csio.tag_id; switch (abort_ccb->csio.tag_action) { case CAM_TAG_ACTION_NONE: io->taskio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->taskio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->taskio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->taskio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->taskio.tag_type = CTL_TAG_ACA; break; default: io->taskio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, abort_ccb->csio.tag_action); break; } err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_fc *fc; cts = &ccb->cts; scsi = &cts->proto_specific.scsi; fc = &cts->xport_specific.fc; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_FC; cts->transport_version = 0; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 800000; fc->wwnn = softc->wwnn; fc->wwpn = softc->wwpn; fc->port = softc->port.targ_port; fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SET_TRAN_SETTINGS: /* XXX KDM should we actually do something here? */ ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_RESET_BUS: case XPT_RESET_DEV: { union ctl_io *io; /* * If we aren't online, there are no devices to talk to. */ if (softc->online == 0) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; xpt_done(ccb); return; } io = ctl_alloc_io_nowait(softc->port.ctl_pool_ref); if (io == NULL) { ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); xpt_done(ccb); return; } ctl_zero_io(io); /* Save pointers on both sides */ if (ccb->ccb_h.func_code == XPT_RESET_DEV) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ccb; ccb->ccb_h.io_ptr = io; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = 1; io->io_hdr.nexus.targ_port = softc->port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); if (ccb->ccb_h.func_code == XPT_RESET_BUS) io->taskio.task_action = CTL_TASK_BUS_RESET; else io->taskio.task_action = CTL_TASK_LUN_RESET; err = ctl_queue(io); if (err != CTL_RETVAL_COMPLETE) { printf("%s func %d: error %d returned by " "ctl_queue()!\n", __func__, ccb->ccb_h.func_code, err); ctl_free_io(io); } break; } case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); xpt_done(ccb); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi; cpi = &ccb->cpi; cpi->version_num = 0; cpi->hba_inquiry = PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_EXTLUNS; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 1024; /* Do we really have a limit? */ cpi->maxio = 1024 * 1024; cpi->async_flags = 0; cpi->hpath_id = 0; cpi->initiator_id = 1; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "FreeBSD", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = 0; cpi->bus_id = 0; cpi->base_transfer_speed = 800000; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC2; /* * Pretend to be Fibre Channel. */ cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->xport_specific.fc.wwnn = softc->wwnn; cpi->xport_specific.fc.wwpn = softc->wwpn; cpi->xport_specific.fc.port = softc->port.targ_port; cpi->xport_specific.fc.bitrate = 8 * 1000 * 1000; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_PROVIDE_FAIL; printf("%s: unsupported CCB type %#x\n", __func__, ccb->ccb_h.func_code); xpt_done(ccb); break; } } diff --git a/sys/cam/ctl/ctl_frontend_ioctl.c b/sys/cam/ctl/ctl_frontend_ioctl.c index ef5e2bd22a86..f326100cb013 100644 --- a/sys/cam/ctl/ctl_frontend_ioctl.c +++ b/sys/cam/ctl/ctl_frontend_ioctl.c @@ -1,645 +1,645 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2015 Alexander Motin * Copyright (c) 2017 Jakub Wojciech Klama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { CTL_IOCTL_INPROG, CTL_IOCTL_DATAMOVE, CTL_IOCTL_DONE } ctl_fe_ioctl_state; struct ctl_fe_ioctl_params { struct cv sem; struct mtx ioctl_mtx; ctl_fe_ioctl_state state; }; struct cfi_port { TAILQ_ENTRY(cfi_port) link; uint32_t cur_tag_num; struct cdev * dev; struct ctl_port port; }; struct cfi_softc { TAILQ_HEAD(, cfi_port) ports; }; static struct cfi_softc cfi_softc; static int cfi_init(void); static int cfi_shutdown(void); static void cfi_datamove(union ctl_io *io); static void cfi_done(union ctl_io *io); static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfi_ioctl_port_create(struct ctl_req *req); static void cfi_ioctl_port_remove(struct ctl_req *req); static struct cdevsw cfi_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_ioctl = ctl_ioctl_io }; static struct ctl_frontend cfi_frontend = { .name = "ioctl", .init = cfi_init, .ioctl = cfi_ioctl, .shutdown = cfi_shutdown, }; CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend); static int cfi_init(void) { struct cfi_softc *isoftc = &cfi_softc; struct cfi_port *cfi; struct ctl_port *port; int error = 0; memset(isoftc, 0, sizeof(*isoftc)); TAILQ_INIT(&isoftc->ports); cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO); port = &cfi->port; port->frontend = &cfi_frontend; port->port_type = CTL_PORT_IOCTL; port->num_requested_ctl_io = 100; port->port_name = "ioctl"; port->fe_datamove = cfi_datamove; port->fe_done = cfi_done; port->physical_port = 0; port->targ_port = -1; if ((error = ctl_port_register(port)) != 0) { printf("%s: ioctl port registration failed\n", __func__); return (error); } ctl_port_online(port); TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link); return (0); } static int cfi_shutdown(void) { struct cfi_softc *isoftc = &cfi_softc; struct cfi_port *cfi, *temp; struct ctl_port *port; int error; TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) { port = &cfi->port; ctl_port_offline(port); error = ctl_port_deregister(port); if (error != 0) { printf("%s: ctl_frontend_deregister() failed\n", __func__); return (error); } TAILQ_REMOVE(&isoftc->ports, cfi, link); free(cfi, M_CTL); } return (0); } static void cfi_ioctl_port_create(struct ctl_req *req) { struct cfi_softc *isoftc = &cfi_softc; struct cfi_port *cfi; struct ctl_port *port; struct make_dev_args args; const char *val; int retval; int pp = -1, vp = 0; val = dnvlist_get_string(req->args_nvl, "pp", NULL); if (val != NULL) pp = strtol(val, NULL, 10); val = dnvlist_get_string(req->args_nvl, "vp", NULL); if (val != NULL) vp = strtol(val, NULL, 10); if (pp != -1) { /* Check for duplicates */ TAILQ_FOREACH(cfi, &isoftc->ports, link) { if (pp == cfi->port.physical_port && vp == cfi->port.virtual_port) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "port %d already exists", pp); return; } } } else { /* Find free port number */ TAILQ_FOREACH(cfi, &isoftc->ports, link) { pp = MAX(pp, cfi->port.physical_port); } pp++; } cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO); port = &cfi->port; port->frontend = &cfi_frontend; port->port_type = CTL_PORT_IOCTL; port->num_requested_ctl_io = 100; port->port_name = "ioctl"; port->fe_datamove = cfi_datamove; port->fe_done = cfi_done; port->physical_port = pp; port->virtual_port = vp; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "ctl_port_register() failed with error %d", retval); free(cfi, M_CTL); return; } req->result_nvl = nvlist_create(0); nvlist_add_number(req->result_nvl, "port_id", port->targ_port); ctl_port_online(port); make_dev_args_init(&args); args.mda_devsw = &cfi_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = NULL; args.mda_si_drv2 = cfi; retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp); if (retval != 0) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "make_dev_s() failed with error %d", retval); ctl_port_offline(port); ctl_port_deregister(port); free(cfi, M_CTL); return; } req->status = CTL_LUN_OK; TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link); } static void cfi_ioctl_port_remove(struct ctl_req *req) { struct cfi_softc *isoftc = &cfi_softc; struct cfi_port *cfi = NULL; const char *val; int port_id = -1; val = dnvlist_get_string(req->args_nvl, "port_id", NULL); if (val != NULL) port_id = strtol(val, NULL, 10); if (port_id == -1) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "port_id not provided"); return; } TAILQ_FOREACH(cfi, &isoftc->ports, link) { if (cfi->port.targ_port == port_id) break; } if (cfi == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "cannot find port %d", port_id); return; } if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "cannot destroy default ioctl port"); return; } ctl_port_offline(&cfi->port); ctl_port_deregister(&cfi->port); TAILQ_REMOVE(&isoftc->ports, cfi, link); destroy_dev(cfi->dev); free(cfi, M_CTL); req->status = CTL_LUN_OK; } static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } return (ENOTTY); } /* * Data movement routine for the CTL ioctl frontend port. */ static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sglen, ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy; int kern_watermark, ext_watermark; int ext_sglist_malloced; int i, j; CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); /* * If this flag is set, fake the data transfer. */ if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { ext_sglist_malloced = 0; ctsio->ext_data_filled += ctsio->kern_data_len; ctsio->kern_data_resid = 0; goto bailout; } /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->ext_sg_entries > 0) { int len_seen; ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, M_WAITOK); ext_sglist_malloced = 1; if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) { ctsio->io_hdr.port_status = 31343; goto bailout; } ext_sg_entries = ctsio->ext_sg_entries; ext_sg_start = ext_sg_entries; ext_offset = 0; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist_malloced = 0; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = MIN(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes to user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", kern_ptr, ext_ptr)); if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { ctsio->io_hdr.port_status = 31344; goto bailout; } } else { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes from user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", ext_ptr, kern_ptr)); if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ ctsio->io_hdr.port_status = 31345; goto bailout; } } ctsio->ext_data_filled += len_to_copy; ctsio->kern_data_resid -= len_to_copy; ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " "kern_sg_entries: %d\n", ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " "kern_data_len = %d\n", ctsio->ext_data_len, ctsio->kern_data_len)); bailout: if (ext_sglist_malloced != 0) free(ext_sglist, M_CTL); return (CTL_RETVAL_COMPLETE); } static void cfi_datamove(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DATAMOVE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static void cfi_done(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DONE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static int cfi_submit_wait(union ctl_io *io) { struct ctl_fe_ioctl_params params; ctl_fe_ioctl_state last_state; int done, retval; bzero(¶ms, sizeof(params)); mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); cv_init(¶ms.sem, "ctlioccv"); params.state = CTL_IOCTL_INPROG; last_state = params.state; io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; CTL_DEBUG_PRINT(("cfi_submit_wait\n")); /* This shouldn't happen */ if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE) return (retval); done = 0; do { mtx_lock(¶ms.ioctl_mtx); /* * Check the state here, and don't sleep if the state has * already changed (i.e. wakeup has already occurred, but we * weren't waiting yet). */ if (params.state == last_state) { /* XXX KDM cv_wait_sig instead? */ cv_wait(¶ms.sem, ¶ms.ioctl_mtx); } last_state = params.state; switch (params.state) { case CTL_IOCTL_INPROG: /* Why did we wake up? */ /* XXX KDM error here? */ mtx_unlock(¶ms.ioctl_mtx); break; case CTL_IOCTL_DATAMOVE: CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); /* * change last_state back to INPROG to avoid * deadlock on subsequent data moves. */ params.state = last_state = CTL_IOCTL_INPROG; mtx_unlock(¶ms.ioctl_mtx); ctl_ioctl_do_datamove(&io->scsiio); /* * Note that in some cases, most notably writes, * this will queue the I/O and call us back later. * In other cases, generally reads, this routine * will immediately call back and wake us up, * probably using our own context. */ - io->scsiio.be_move_done(io); + ctl_datamove_done(io, false); break; case CTL_IOCTL_DONE: mtx_unlock(¶ms.ioctl_mtx); CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); done = 1; break; default: mtx_unlock(¶ms.ioctl_mtx); /* XXX KDM error here? */ break; } } while (done == 0); mtx_destroy(¶ms.ioctl_mtx); cv_destroy(¶ms.sem); return (CTL_RETVAL_COMPLETE); } int ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cfi_port *cfi; union ctl_io *io; void *pool_tmp, *sc_tmp; int retval = 0; if (cmd != CTL_IO) return (ENOTTY); cfi = dev->si_drv2 == NULL ? TAILQ_FIRST(&cfi_softc.ports) : dev->si_drv2; /* * If we haven't been "enabled", don't allow any SCSI I/O * to this FETD. */ if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0) return (EPERM); io = ctl_alloc_io(cfi->port.ctl_pool_ref); /* * Need to save the pool reference so it doesn't get * spammed by the user's ctl_io. */ pool_tmp = io->io_hdr.pool; sc_tmp = CTL_SOFTC(io); memcpy(io, (void *)addr, sizeof(*io)); io->io_hdr.pool = pool_tmp; CTL_SOFTC(io) = sc_tmp; TAILQ_INIT(&io->io_hdr.blocked_queue); /* * No status yet, so make sure the status is set properly. */ io->io_hdr.status = CTL_STATUS_NONE; /* * The user sets the initiator ID, target and LUN IDs. */ io->io_hdr.nexus.targ_port = cfi->port.targ_port; io->io_hdr.flags |= CTL_FLAG_USER_REQ; if ((io->io_hdr.io_type == CTL_IO_SCSI) && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) io->scsiio.tag_num = cfi->cur_tag_num++; retval = cfi_submit_wait(io); if (retval == 0) memcpy((void *)addr, io, sizeof(*io)); ctl_free_io(io); return (retval); } diff --git a/sys/cam/ctl/ctl_frontend_iscsi.c b/sys/cam/ctl/ctl_frontend_iscsi.c index 73483fb155cc..fdbc06150f93 100644 --- a/sys/cam/ctl/ctl_frontend_iscsi.c +++ b/sys/cam/ctl/ctl_frontend_iscsi.c @@ -1,3044 +1,3045 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 The FreeBSD Foundation * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * CTL frontend for the iSCSI protocol. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY"); #endif static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend"); static uma_zone_t cfiscsi_data_wait_zone; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer iSCSI Frontend"); static int debug = 1; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int ping_timeout = 5; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); static int login_timeout = 60; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); static int maxtags = 256; SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxtags, CTLFLAG_RWTUN, &maxtags, 0, "Max number of requests queued by initiator"); #define CFISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) { \ printf("%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_LOCK(X) mtx_lock(&X->cs_lock) #define CFISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->cs_lock) #define CFISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->cs_lock, MA_OWNED) #define CONN_SESSION(X) ((struct cfiscsi_session *)(X)->ic_prv0) #define PDU_SESSION(X) CONN_SESSION((X)->ip_conn) struct cfiscsi_priv { void *request; uint32_t expdatasn; uint32_t r2tsn; }; #define PRIV(io) \ ((struct cfiscsi_priv *)&(io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND]) #define PRIV_REQUEST(io) PRIV(io)->request #define PRIV_EXPDATASN(io) PRIV(io)->expdatasn #define PRIV_R2TSN(io) PRIV(io)->r2tsn static int cfiscsi_init(void); static int cfiscsi_shutdown(void); static void cfiscsi_online(void *arg); static void cfiscsi_offline(void *arg); static int cfiscsi_info(void *arg, struct sbuf *sb); static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfiscsi_datamove(union ctl_io *io); static void cfiscsi_datamove_in(union ctl_io *io); static void cfiscsi_datamove_out(union ctl_io *io); static void cfiscsi_done(union ctl_io *io); static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request); static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request); static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request); static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request); static void cfiscsi_session_terminate(struct cfiscsi_session *cs); static struct cfiscsi_data_wait *cfiscsi_data_wait_new( struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp); static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw); static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag); static struct cfiscsi_target *cfiscsi_target_find_or_create( struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag); static void cfiscsi_target_release(struct cfiscsi_target *ct); static void cfiscsi_session_delete(struct cfiscsi_session *cs); static struct cfiscsi_softc cfiscsi_softc; static struct ctl_frontend cfiscsi_frontend = { .name = "iscsi", .init = cfiscsi_init, .ioctl = cfiscsi_ioctl, .shutdown = cfiscsi_shutdown, }; CTL_FRONTEND_DECLARE(cfiscsi, cfiscsi_frontend); MODULE_DEPEND(cfiscsi, icl, 1, 1, 1); static struct icl_pdu * cfiscsi_pdu_new_response(struct icl_pdu *request, int flags) { return (icl_pdu_new(request->ip_conn, flags)); } static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request) { const struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; uint32_t cmdsn, curcmdsn; cs = PDU_SESSION(request); /* * Every incoming PDU - not just NOP-Out - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. */ cs->cs_timeout = 0; /* * Immediate commands carry cmdsn, but it is neither incremented nor * verified. */ if (request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) return (false); /* * Data-Out PDUs don't contain CmdSN. */ if (request->ip_bhs->bhs_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) return (false); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; curcmdsn = cmdsn = ntohl(bhssc->bhssc_cmdsn); /* * Increment session cmdsn and exit if we received the expected value. */ do { if (atomic_fcmpset_32(&cs->cs_cmdsn, &curcmdsn, cmdsn + 1)) return (false); } while (curcmdsn == cmdsn); /* * The target MUST silently ignore any non-immediate command outside * of this range. */ if (ISCSI_SNLT(cmdsn, curcmdsn) || ISCSI_SNGT(cmdsn, curcmdsn - 1 + maxtags)) { CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u", cmdsn, curcmdsn); return (true); } /* * We don't support multiple connections now, so any discontinuity in * CmdSN means lost PDUs. Since we don't support PDU retransmission -- * terminate the connection. */ CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u; dropping connection", cmdsn, curcmdsn); cfiscsi_session_terminate(cs); return (true); } static void cfiscsi_pdu_handle(struct icl_pdu *request) { struct cfiscsi_session *cs; bool ignore; cs = PDU_SESSION(request); ignore = cfiscsi_pdu_update_cmdsn(request); if (ignore) { icl_pdu_free(request); return; } /* * Handle the PDU; this includes e.g. receiving the remaining * part of PDU and submitting the SCSI command to CTL * or queueing a reply. The handling routine is responsible * for freeing the PDU when it's no longer needed. */ switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_NOP_OUT: cfiscsi_pdu_handle_nop_out(request); break; case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_pdu_handle_scsi_command(request); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_pdu_handle_task_request(request); break; case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: cfiscsi_pdu_handle_data_out(request); break; case ISCSI_BHS_OPCODE_LOGOUT_REQUEST: cfiscsi_pdu_handle_logout_request(request); break; default: CFISCSI_SESSION_WARN(cs, "received PDU with unsupported " "opcode 0x%x; dropping connection", request->ip_bhs->bhs_opcode); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_receive_callback(struct icl_pdu *request) { #ifdef ICL_KERNEL_PROXY struct cfiscsi_session *cs; cs = PDU_SESSION(request); if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (cs->cs_login_pdu == NULL) cs->cs_login_pdu = request; else icl_pdu_free(request); cv_signal(&cs->cs_login_cv); return; } #endif cfiscsi_pdu_handle(request); } static void cfiscsi_error_callback(struct icl_conn *ic) { struct cfiscsi_session *cs; cs = CONN_SESSION(ic); CFISCSI_SESSION_WARN(cs, "connection error; dropping connection"); cfiscsi_session_terminate(cs); } static int cfiscsi_pdu_prepare(struct icl_pdu *response) { struct cfiscsi_session *cs; struct iscsi_bhs_scsi_response *bhssr; bool advance_statsn = true; uint32_t cmdsn; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK_ASSERT(cs); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; /* * 10.8.3: "The StatSN for this connection is not advanced * after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T) advance_statsn = false; /* * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff, * StatSN for the connection is not advanced after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && bhssr->bhssr_initiator_task_tag == 0xffffffff) advance_statsn = false; /* * See the comment below - StatSN is not meaningful and must * not be advanced. */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN && (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0) advance_statsn = false; /* * 10.7.3: "The fields StatSN, Status, and Residual Count * only have meaningful content if the S bit is set to 1." */ if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhssr->bhssr_flags & BHSDI_FLAGS_S)) bhssr->bhssr_statsn = htonl(cs->cs_statsn); cmdsn = cs->cs_cmdsn; bhssr->bhssr_expcmdsn = htonl(cmdsn); bhssr->bhssr_maxcmdsn = htonl(cmdsn - 1 + imax(0, maxtags - cs->cs_outstanding_ctl_pdus)); if (advance_statsn) cs->cs_statsn++; return (0); } static void cfiscsi_pdu_queue(struct icl_pdu *response) { struct cfiscsi_session *cs; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue(response); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_queue_cb(struct icl_pdu *response, icl_pdu_cb cb) { struct cfiscsi_session *cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue_cb(response, cb); CFISCSI_SESSION_UNLOCK(cs); } static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request) { struct cfiscsi_session *cs; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *response; void *data = NULL; size_t datasize; int error; cs = PDU_SESSION(request); bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; if (bhsno->bhsno_initiator_task_tag == 0xffffffff) { /* * Nothing to do, iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(request); return; } datasize = icl_pdu_data_segment_length(request); if (datasize > 0) { data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } icl_pdu_get_data(request, 0, data, datasize); } response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "droppping connection"); free(data, M_CFISCSI); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag; bhsni->bhsni_target_transfer_tag = 0xffffffff; if (datasize > 0) { error = icl_pdu_append_data(response, data, datasize, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); free(data, M_CFISCSI); icl_pdu_free(request); icl_pdu_free(response); cfiscsi_session_terminate(cs); return; } free(data, M_CFISCSI); } icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request) { struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); if (request->ip_data_len > 0 && cs->cs_immediate_data == false) { CFISCSI_SESSION_WARN(cs, "unsolicited data with " "ImmediateData=No; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = request; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhssc->bhssc_lun)); io->scsiio.priority = (bhssc->bhssc_pri & BHSSC_PRI_MASK) >> BHSSC_PRI_SHIFT; io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag; switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) { case BHSSC_FLAGS_ATTR_UNTAGGED: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case BHSSC_FLAGS_ATTR_SIMPLE: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case BHSSC_FLAGS_ATTR_ORDERED: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case BHSSC_FLAGS_ATTR_HOQ: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case BHSSC_FLAGS_ATTR_ACA: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; CFISCSI_SESSION_WARN(cs, "unhandled tag type %d", bhssc->bhssc_flags & BHSSC_FLAGS_ATTR); break; } io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */ memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb)); refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_run(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_run() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request) { struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct icl_pdu *response; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = request; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(be64toh(bhstmr->bhstmr_lun)); io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ switch (bhstmr->bhstmr_function & ~0x80) { case BHSTMR_FUNCTION_ABORT_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_ABORT_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case BHSTMR_FUNCTION_CLEAR_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_CLEAR_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET"); #endif io->taskio.task_action = CTL_TASK_LUN_RESET; break; case BHSTMR_FUNCTION_TARGET_WARM_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_TARGET_COLD_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_COLD_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case BHSTMR_FUNCTION_QUERY_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_QUERY_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case BHSTMR_FUNCTION_I_T_NEXUS_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_I_T_NEXUS_RESET"); #endif io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; break; case BHSTMR_FUNCTION_QUERY_ASYNC_EVENT: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_QUERY_ASYNC_EVENT"); #endif io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; default: CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x", bhstmr->bhstmr_function & ~0x80); ctl_free_io(io); response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); return; } refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_run(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_run() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static bool cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t copy_len, len, off, buffer_offset; int ctl_sg_count; union ctl_io *io; cs = PDU_SESSION(request); KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT || (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bad opcode 0x%x", request->ip_bhs->bhs_opcode)); /* * We're only using fields common for Data-Out and SCSI Command PDUs. */ bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); #if 0 CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d", request->ip_data_len, io->scsiio.kern_total_len); #endif if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset); else buffer_offset = 0; len = icl_pdu_data_segment_length(request); /* * Make sure the offset, as sent by the initiator, matches the offset * we're supposed to be at in the scatter-gather list. */ if (buffer_offset > io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled || buffer_offset + len <= io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) { CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, " "expected %zd; dropping connection", buffer_offset, (size_t)io->scsiio.kern_rel_offset + (size_t)io->scsiio.ext_data_filled); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } /* * This is the offset within the PDU data segment, as opposed * to buffer_offset, which is the offset within the task (SCSI * command). */ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled - buffer_offset; /* * Iterate over the scatter/gather segments, filling them with data * from the PDU data segment. Note that this can get called multiple * times for one SCSI command; the cdw structure holds state for the * scatter/gather list. */ for (;;) { KASSERT(cdw->cdw_sg_index < ctl_sg_count, ("cdw->cdw_sg_index >= ctl_sg_count")); if (cdw->cdw_sg_len == 0) { cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; } KASSERT(off <= len, ("len > off")); copy_len = len - off; if (copy_len > cdw->cdw_sg_len) copy_len = cdw->cdw_sg_len; icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len); cdw->cdw_sg_addr += copy_len; cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; io->scsiio.kern_data_resid -= copy_len; if (cdw->cdw_sg_len == 0) { /* * End of current segment. */ if (cdw->cdw_sg_index == ctl_sg_count - 1) { /* * Last segment in scatter/gather list. */ break; } cdw->cdw_sg_index++; } if (off == len) { /* * End of PDU payload. */ break; } } if (len > off) { /* * In case of unsolicited data, it's possible that the buffer * provided by CTL is smaller than negotiated FirstBurstLength. * Just ignore the superfluous data; will ask for them with R2T * on next call to cfiscsi_datamove(). * * This obviously can only happen with SCSI Command PDU. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND) return (true); CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, " "expected %zd; dropping connection", icl_pdu_data_segment_length(request), off); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) { CFISCSI_SESSION_WARN(cs, "got the final packet without " "the F flag; flags = 0x%x; dropping connection", bhsdo->bhsdo_flags); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) { if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { CFISCSI_SESSION_WARN(cs, "got the final packet, but the " "transmitted size was %zd bytes instead of %d; " "dropping connection", (size_t)io->scsiio.ext_data_filled, cdw->cdw_r2t_end); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } else { /* * For SCSI Command PDU, this just means we need to * solicit more data by sending R2T. */ return (false); } } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) { #if 0 CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target " "transfer tag 0x%x", cdw->cdw_target_transfer_tag); #endif return (true); } return (false); } static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct cfiscsi_data_wait *cdw = NULL; union ctl_io *io; bool done; cs = PDU_SESSION(request); bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) { #if 0 CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for " "ttt 0x%x, itt 0x%x", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag, cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag)); #endif if (bhsdo->bhsdo_target_transfer_tag == cdw->cdw_target_transfer_tag) break; } CFISCSI_SESSION_UNLOCK(cs); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag " "0x%x, not found; dropping connection", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) { CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with " "DataSN %u, while expected %u; dropping connection", ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } cdw->cdw_datasn++; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); done = cfiscsi_handle_data_segment(request, cdw); if (done) { CFISCSI_SESSION_LOCK(cs); TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end || io->scsiio.ext_data_filled == io->scsiio.kern_data_len); cfiscsi_data_wait_free(cs, cdw); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; if (done) - io->scsiio.be_move_done(io); + ctl_datamove_done(io, false); else cfiscsi_datamove_out(io); } icl_pdu_free(request); } static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request) { struct iscsi_bhs_logout_request *bhslr; struct iscsi_bhs_logout_response *bhslr2; struct icl_pdu *response; struct cfiscsi_session *cs; cs = PDU_SESSION(request); bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; switch (bhslr->bhslr_reason & 0x7f) { case BHSLR_REASON_CLOSE_SESSION: case BHSLR_REASON_CLOSE_CONNECTION: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); cfiscsi_session_terminate(cs); break; case BHSLR_REASON_REMOVE_FOR_RECOVERY: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); break; default: CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection", bhslr->bhslr_reason); icl_pdu_free(request); cfiscsi_session_terminate(cs); break; } } static void cfiscsi_callout(void *context) { struct icl_pdu *cp; struct iscsi_bhs_nop_in *bhsni; struct cfiscsi_session *cs; cs = context; if (cs->cs_terminating) return; callout_schedule(&cs->cs_callout, 1 * hz); atomic_add_int(&cs->cs_timeout, 1); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (login_timeout > 0 && cs->cs_timeout > login_timeout) { CFISCSI_SESSION_WARN(cs, "login timed out after " "%d seconds; dropping connection", cs->cs_timeout); cfiscsi_session_terminate(cs); } return; } #endif if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-In in this case; * user might have disabled pings to work around problems * with certain initiators that can't properly handle * NOP-In, such as iPXE. Reset the timeout, to avoid * triggering reconnection, should the user decide to * reenable them. */ cs->cs_timeout = 0; return; } if (cs->cs_timeout >= ping_timeout) { CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; " "dropping connection", ping_timeout); cfiscsi_session_terminate(cs); return; } /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (cs->cs_timeout < 2) return; cp = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (cp == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory"); return; } bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = 0xffffffff; cfiscsi_pdu_queue(cp); } static struct cfiscsi_data_wait * cfiscsi_data_wait_new(struct cfiscsi_session *cs, union ctl_io *io, uint32_t initiator_task_tag, uint32_t *target_transfer_tagp) { struct cfiscsi_data_wait *cdw; int error; cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate %zd bytes", sizeof(*cdw)); return (NULL); } error = icl_conn_transfer_setup(cs->cs_conn, io, target_transfer_tagp, &cdw->cdw_icl_prv); if (error != 0) { CFISCSI_SESSION_WARN(cs, "icl_conn_transfer_setup() failed with error %d", error); uma_zfree(cfiscsi_data_wait_zone, cdw); return (NULL); } cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = *target_transfer_tagp; cdw->cdw_initiator_task_tag = initiator_task_tag; return (cdw); } static void cfiscsi_data_wait_free(struct cfiscsi_session *cs, struct cfiscsi_data_wait *cdw) { icl_conn_transfer_done(cs->cs_conn, cdw->cdw_icl_prv); uma_zfree(cfiscsi_data_wait_zone, cdw); } static void cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs) { struct cfiscsi_data_wait *cdw; union ctl_io *io; int error, last, wait; if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); PRIV_REQUEST(io) = cs; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_lun = 0; io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; wait = cs->cs_outstanding_ctl_pdus; refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_run(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_run() failed; error %d", error); refcount_release(&cs->cs_outstanding_ctl_pdus); ctl_free_io(io); } CFISCSI_SESSION_LOCK(cs); while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) { TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * Set nonzero port status; this prevents backends from * assuming that the data transfer actually succeeded * and writing uninitialized data to disk. */ cdw->cdw_ctl_io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42; - cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); + ctl_datamove_done(cdw->cdw_ctl_io, false); cfiscsi_data_wait_free(cs, cdw); CFISCSI_SESSION_LOCK(cs); } CFISCSI_SESSION_UNLOCK(cs); /* * Wait for CTL to terminate all the tasks. */ if (wait > 0) CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate %d tasks", wait); for (;;) { refcount_acquire(&cs->cs_outstanding_ctl_pdus); last = refcount_release(&cs->cs_outstanding_ctl_pdus); if (last != 0) break; tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus), 0, "cfiscsi_terminate", hz / 100); } if (wait > 0) CFISCSI_SESSION_WARN(cs, "tasks terminated"); } static void cfiscsi_maintenance_thread(void *arg) { struct cfiscsi_session *cs; cs = arg; for (;;) { CFISCSI_SESSION_LOCK(cs); if (cs->cs_terminating == false || cs->cs_handoff_in_progress) cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock); CFISCSI_SESSION_UNLOCK(cs); if (cs->cs_terminating && cs->cs_handoff_in_progress == false) { /* * We used to wait up to 30 seconds to deliver queued * PDUs to the initiator. We also tried hard to deliver * SCSI Responses for the aborted PDUs. We don't do * that anymore. We might need to revisit that. */ callout_drain(&cs->cs_callout); icl_conn_close(cs->cs_conn); /* * At this point ICL receive thread is no longer * running; no new tasks can be queued. */ cfiscsi_session_terminate_tasks(cs); cfiscsi_session_delete(cs); kthread_exit(); return; } CFISCSI_SESSION_DEBUG(cs, "nothing to do"); } } static void cfiscsi_session_terminate(struct cfiscsi_session *cs) { cs->cs_terminating = true; cv_signal(&cs->cs_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_signal(&cs->cs_login_cv); #endif } static int cfiscsi_session_register_initiator(struct cfiscsi_session *cs) { struct cfiscsi_target *ct; char *name; int i; KASSERT(cs->cs_ctl_initid == -1, ("already registered")); ct = cs->cs_target; name = strdup(cs->cs_initiator_id, M_CTL); i = ctl_add_initiator(&ct->ct_port, -1, 0, name); if (i < 0) { CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", i); cs->cs_ctl_initid = -1; return (1); } cs->cs_ctl_initid = i; #if 0 CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i); #endif return (0); } static void cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs) { int error; if (cs->cs_ctl_initid == -1) return; error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid); if (error != 0) { CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d", error); } cs->cs_ctl_initid = -1; } static struct cfiscsi_session * cfiscsi_session_new(struct cfiscsi_softc *softc, const char *offload) { struct cfiscsi_session *cs; int error; cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO); if (cs == NULL) { CFISCSI_WARN("malloc failed"); return (NULL); } cs->cs_ctl_initid = -1; refcount_init(&cs->cs_outstanding_ctl_pdus, 0); TAILQ_INIT(&cs->cs_waiting_for_data_out); mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF); cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&cs->cs_login_cv, "cfiscsi_login"); #endif /* * The purpose of this is to avoid racing with session shutdown. * Otherwise we could have the maintenance thread call icl_conn_close() * before we call icl_conn_handoff(). */ cs->cs_handoff_in_progress = true; cs->cs_conn = icl_new_conn(offload, false, "cfiscsi", &cs->cs_lock); if (cs->cs_conn == NULL) { free(cs, M_CFISCSI); return (NULL); } cs->cs_conn->ic_receive = cfiscsi_receive_callback; cs->cs_conn->ic_error = cfiscsi_error_callback; cs->cs_conn->ic_prv0 = cs; error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt"); if (error != 0) { CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error); free(cs, M_CFISCSI); return (NULL); } mtx_lock(&softc->lock); cs->cs_id = ++softc->last_session_id; TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); /* * Start pinging the initiator. */ callout_init(&cs->cs_callout, 1); callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs); return (cs); } static void cfiscsi_session_delete(struct cfiscsi_session *cs) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; KASSERT(cs->cs_outstanding_ctl_pdus == 0, ("destroying session with outstanding CTL pdus")); KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out), ("destroying session with non-empty queue")); mtx_lock(&softc->lock); TAILQ_REMOVE(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); cfiscsi_session_unregister_initiator(cs); if (cs->cs_target != NULL) cfiscsi_target_release(cs->cs_target); icl_conn_close(cs->cs_conn); icl_conn_free(cs->cs_conn); free(cs, M_CFISCSI); cv_signal(&softc->sessions_cv); } static int cfiscsi_init(void) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF); cv_init(&softc->sessions_cv, "cfiscsi_sessions"); #ifdef ICL_KERNEL_PROXY cv_init(&softc->accept_cv, "cfiscsi_accept"); #endif TAILQ_INIT(&softc->sessions); TAILQ_INIT(&softc->targets); cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait", sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } static int cfiscsi_shutdown(void) { struct cfiscsi_softc *softc = &cfiscsi_softc; if (!TAILQ_EMPTY(&softc->sessions) || !TAILQ_EMPTY(&softc->targets)) return (EBUSY); uma_zdestroy(cfiscsi_data_wait_zone); #ifdef ICL_KERNEL_PROXY cv_destroy(&softc->accept_cv); #endif cv_destroy(&softc->sessions_cv); mtx_destroy(&softc->lock); return (0); } #ifdef ICL_KERNEL_PROXY static void cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id) { struct cfiscsi_session *cs; cs = cfiscsi_session_new(&cfiscsi_softc, NULL); if (cs == NULL) { CFISCSI_WARN("failed to create session"); return; } icl_conn_handoff_sock(cs->cs_conn, so); cs->cs_initiator_sa = sa; cs->cs_portal_id = portal_id; cs->cs_handoff_in_progress = false; cs->cs_waiting_for_ctld = true; cv_signal(&cfiscsi_softc.accept_cv); CFISCSI_SESSION_LOCK(cs); /* * Wake up the maintenance thread if we got scheduled for termination * somewhere between cfiscsi_session_new() and icl_conn_handoff_sock(). */ if (cs->cs_terminating) cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); } #endif static void cfiscsi_online(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 1; online = softc->online++; mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY if (softc->listener != NULL) icl_listen_free(softc->listener); softc->listener = icl_listen_new(cfiscsi_accept); #endif } static void cfiscsi_offline(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; struct cfiscsi_session *cs; int error, online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (!ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 0; online = --softc->online; do { TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) cfiscsi_session_terminate(cs); } TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) break; } if (cs != NULL) { error = cv_wait_sig(&softc->sessions_cv, &softc->lock); if (error != 0) { CFISCSI_SESSION_DEBUG(cs, "cv_wait failed with error %d\n", error); break; } } } while (cs != NULL && ct->ct_online == 0); mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY icl_listen_free(softc->listener); softc->listener = NULL; #endif } static int cfiscsi_info(void *arg, struct sbuf *sb) { struct cfiscsi_target *ct = (struct cfiscsi_target *)arg; int retval; retval = sbuf_printf(sb, "\t%d\n", ct->ct_state); return (retval); } static void cfiscsi_ioctl_handoff(struct ctl_iscsi *ci) { struct cfiscsi_softc *softc; struct cfiscsi_session *cs, *cs2; struct cfiscsi_target *ct; struct ctl_iscsi_handoff_params *cihp; int error; cihp = (struct ctl_iscsi_handoff_params *)&(ci->data); softc = &cfiscsi_softc; CFISCSI_DEBUG("new connection from %s (%s) to %s", cihp->initiator_name, cihp->initiator_addr, cihp->target_name); ct = cfiscsi_target_find(softc, cihp->target_name, cihp->portal_group_tag); if (ct == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: target not found", __func__); return; } #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0 && cihp->connection_id > 0) { snprintf(ci->error_str, sizeof(ci->error_str), "both socket and connection_id set"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } if (cihp->socket == 0) { mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cihp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } mtx_unlock(&cfiscsi_softc.lock); } else { #endif cs = cfiscsi_session_new(softc, cihp->offload); if (cs == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: cfiscsi_session_new failed", __func__); cfiscsi_target_release(ct); return; } #ifdef ICL_KERNEL_PROXY } #endif /* * First PDU of Full Feature phase has the same CmdSN as the last * PDU from the Login Phase received from the initiator. Thus, * the -1 below. */ cs->cs_cmdsn = cihp->cmdsn; cs->cs_statsn = cihp->statsn; cs->cs_max_recv_data_segment_length = cihp->max_recv_data_segment_length; cs->cs_max_send_data_segment_length = cihp->max_send_data_segment_length; cs->cs_max_burst_length = cihp->max_burst_length; cs->cs_first_burst_length = cihp->first_burst_length; cs->cs_immediate_data = !!cihp->immediate_data; if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_header_crc32c = true; if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_data_crc32c = true; strlcpy(cs->cs_initiator_name, cihp->initiator_name, sizeof(cs->cs_initiator_name)); strlcpy(cs->cs_initiator_addr, cihp->initiator_addr, sizeof(cs->cs_initiator_addr)); strlcpy(cs->cs_initiator_alias, cihp->initiator_alias, sizeof(cs->cs_initiator_alias)); memcpy(cs->cs_initiator_isid, cihp->initiator_isid, sizeof(cs->cs_initiator_isid)); snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id), "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name, cihp->initiator_isid[0], cihp->initiator_isid[1], cihp->initiator_isid[2], cihp->initiator_isid[3], cihp->initiator_isid[4], cihp->initiator_isid[5]); mtx_lock(&softc->lock); if (ct->ct_online == 0) { mtx_unlock(&softc->lock); CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); cfiscsi_target_release(ct); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: port offline", __func__); return; } cs->cs_target = ct; mtx_unlock(&softc->lock); restart: if (!cs->cs_terminating) { mtx_lock(&softc->lock); TAILQ_FOREACH(cs2, &softc->sessions, cs_next) { if (cs2 != cs && cs2->cs_tasks_aborted == false && cs->cs_target == cs2->cs_target && strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) { if (strcmp(cs->cs_initiator_addr, cs2->cs_initiator_addr) != 0) { CFISCSI_SESSION_WARN(cs2, "session reinstatement from " "different address %s", cs->cs_initiator_addr); } else { CFISCSI_SESSION_DEBUG(cs2, "session reinstatement"); } cfiscsi_session_terminate(cs2); mtx_unlock(&softc->lock); pause("cfiscsi_reinstate", 1); goto restart; } } mtx_unlock(&softc->lock); } /* * Register initiator with CTL. */ cfiscsi_session_register_initiator(cs); #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0) { #endif error = icl_conn_handoff(cs->cs_conn, cihp->socket); if (error != 0) { CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_conn_handoff failed with error %d", __func__, error); return; } #ifdef ICL_KERNEL_PROXY } #endif #ifdef ICL_KERNEL_PROXY cs->cs_login_phase = false; /* * First PDU of the Full Feature phase has likely already arrived. * We have to pick it up and execute properly. */ if (cs->cs_login_pdu != NULL) { CFISCSI_SESSION_DEBUG(cs, "picking up first PDU"); cfiscsi_pdu_handle(cs->cs_login_pdu); cs->cs_login_pdu = NULL; } #endif CFISCSI_SESSION_LOCK(cs); cs->cs_handoff_in_progress = false; /* * Wake up the maintenance thread if we got scheduled for termination. */ if (cs->cs_terminating) cfiscsi_session_terminate(cs); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_list(struct ctl_iscsi *ci) { struct ctl_iscsi_list_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; struct sbuf *sb; int error; cilp = (struct ctl_iscsi_list_params *)&(ci->data); softc = &cfiscsi_softc; sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate %d bytes for iSCSI session list", cilp->alloc_len); return; } sbuf_printf(sb, "\n"); mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == NULL) continue; error = sbuf_printf(sb, "" "%s" "%s" "%s" "%s" "%s" "%u" "%s" "%s" "%d" "%d" "%d" "%d" "%d" "%d" "%s" "\n", cs->cs_id, cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias, cs->cs_target->ct_name, cs->cs_target->ct_alias, cs->cs_target->ct_tag, cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None", cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None", cs->cs_max_recv_data_segment_length, cs->cs_max_send_data_segment_length, cs->cs_max_burst_length, cs->cs_first_burst_length, cs->cs_immediate_data, cs->cs_conn->ic_iser, cs->cs_conn->ic_offload); if (error != 0) break; } mtx_unlock(&softc->lock); error = sbuf_printf(sb, "\n"); if (error != 0) { sbuf_delete(sb); ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE; snprintf(ci->error_str, sizeof(ci->error_str), "Out of space, %d bytes is too small", cilp->alloc_len); return; } sbuf_finish(sb); error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1); if (error != 0) { sbuf_delete(sb); snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } cilp->fill_len = sbuf_len(sb) + 1; ci->status = CTL_ISCSI_OK; sbuf_delete(sb); } static void cfiscsi_ioctl_logout(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_logout_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; cilp = (struct ctl_iscsi_logout_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cilp->all == 0 && cs->cs_id != cilp->connection_id && strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate memory"); mtx_unlock(&softc->lock); return; } bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT; bhsam->bhsam_parameter3 = htons(10); cfiscsi_pdu_queue(response); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_terminate(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_terminate_params *citp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; citp = (struct ctl_iscsi_terminate_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (citp->all == 0 && cs->cs_id != citp->connection_id && strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { /* * Oh well. Just terminate the connection. */ } else { bhsam = (struct iscsi_bhs_asynchronous_message *) response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_0xffffffff = 0xffffffff; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_TERMINATES_SESSION; cfiscsi_pdu_queue(response); } cfiscsi_session_terminate(cs); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_limits(struct ctl_iscsi *ci) { struct ctl_iscsi_limits_params *cilp; struct icl_drv_limits idl; int error; cilp = (struct ctl_iscsi_limits_params *)&(ci->data); error = icl_limits(cilp->offload, false, &idl); if (error != 0) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_limits failed with error %d", __func__, error); return; } cilp->max_recv_data_segment_length = idl.idl_max_recv_data_segment_length; cilp->max_send_data_segment_length = idl.idl_max_send_data_segment_length; cilp->max_burst_length = idl.idl_max_burst_length; cilp->first_burst_length = idl.idl_first_burst_length; ci->status = CTL_ISCSI_OK; } #ifdef ICL_KERNEL_PROXY static void cfiscsi_ioctl_listen(struct ctl_iscsi *ci) { struct ctl_iscsi_listen_params *cilp; struct sockaddr *sa; int error; cilp = (struct ctl_iscsi_listen_params *)&(ci->data); if (cfiscsi_softc.listener == NULL) { CFISCSI_DEBUG("no listener"); snprintf(ci->error_str, sizeof(ci->error_str), "no listener"); ci->status = CTL_ISCSI_ERROR; return; } error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen); if (error != 0) { CFISCSI_DEBUG("getsockaddr, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed"); ci->status = CTL_ISCSI_ERROR; return; } error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain, cilp->socktype, cilp->protocol, sa, cilp->portal_id); if (error != 0) { free(sa, M_SONAME); CFISCSI_DEBUG("icl_listen_add, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "icl_listen_add failed, error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_accept(struct ctl_iscsi *ci) { struct ctl_iscsi_accept_params *ciap; struct cfiscsi_session *cs; int error; ciap = (struct ctl_iscsi_accept_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); for (;;) { TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_waiting_for_ctld) break; } if (cs != NULL) break; error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock); if (error != 0) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted"); ci->status = CTL_ISCSI_ERROR; return; } } mtx_unlock(&cfiscsi_softc.lock); cs->cs_waiting_for_ctld = false; cs->cs_login_phase = true; ciap->connection_id = cs->cs_id; ciap->portal_id = cs->cs_portal_id; ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len; error = copyout(cs->cs_initiator_sa, ciap->initiator_addr, cs->cs_initiator_sa->sa_len); if (error != 0) { snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_send(struct ctl_iscsi *ci) { struct ctl_iscsi_send_params *cisp; struct cfiscsi_session *cs; struct icl_pdu *ip; size_t datalen; void *data; int error; cisp = (struct ctl_iscsi_send_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cisp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (cs->cs_login_phase == false) return (EBUSY); #endif if (cs->cs_terminating) { snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating"); ci->status = CTL_ISCSI_ERROR; return; } datalen = cisp->data_segment_len; /* * XXX */ //if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) { if (datalen > 65535) { snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } if (datalen > 0) { data = malloc(datalen, M_CFISCSI, M_WAITOK); error = copyin(cisp->data_segment, data, datalen); if (error != 0) { free(data, M_CFISCSI); snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error); ci->status = CTL_ISCSI_ERROR; return; } } ip = icl_pdu_new(cs->cs_conn, M_WAITOK); memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { icl_pdu_append_data(ip, data, datalen, M_WAITOK); free(data, M_CFISCSI); } CFISCSI_SESSION_LOCK(cs); icl_pdu_queue(ip); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_receive(struct ctl_iscsi *ci) { struct ctl_iscsi_receive_params *cirp; struct cfiscsi_session *cs; struct icl_pdu *ip; void *data; int error; cirp = (struct ctl_iscsi_receive_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cirp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (is->is_login_phase == false) return (EBUSY); #endif CFISCSI_SESSION_LOCK(cs); while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) { error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock); if (error != 0) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted by signal"); ci->status = CTL_ISCSI_ERROR; return; } } if (cs->cs_terminating) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "connection terminating"); ci->status = CTL_ISCSI_ERROR; return; } ip = cs->cs_login_pdu; cs->cs_login_pdu = NULL; CFISCSI_SESSION_UNLOCK(cs); if (ip->ip_data_len > cirp->data_segment_len) { icl_pdu_free(ip); snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, cirp->data_segment, ip->ip_data_len); free(data, M_CFISCSI); } icl_pdu_free(ip); ci->status = CTL_ISCSI_OK; } #endif /* !ICL_KERNEL_PROXY */ static void cfiscsi_ioctl_port_create(struct ctl_req *req) { struct cfiscsi_target *ct; struct ctl_port *port; const char *target, *alias, *val; struct scsi_vpd_id_descriptor *desc; int retval, len, idlen; uint16_t tag; target = dnvlist_get_string(req->args_nvl, "cfiscsi_target", NULL); alias = dnvlist_get_string(req->args_nvl, "cfiscsi_target_alias", NULL); val = dnvlist_get_string(req->args_nvl, "cfiscsi_portal_group_tag", NULL); if (target == NULL || val == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtoul(val, NULL, 0); ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "failed to create target \"%s\"", target); return; } if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" for portal group tag %u already exists", target, tag); cfiscsi_target_release(ct); return; } port = &ct->ct_port; // WAT if (ct->ct_state == CFISCSI_TARGET_STATE_DYING) goto done; port->frontend = &cfiscsi_frontend; port->port_type = CTL_PORT_ISCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; port->port_name = "iscsi"; port->physical_port = (int)tag; port->virtual_port = ct->ct_target_id; port->port_online = cfiscsi_online; port->port_offline = cfiscsi_offline; port->port_info = cfiscsi_info; port->onoff_arg = ct; port->fe_datamove = cfiscsi_datamove; port->fe_done = cfiscsi_done; port->targ_port = -1; port->options = nvlist_clone(req->args_nvl); /* Generate Port ID. */ idlen = strlen(target) + strlen(",t,0x0001") + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, tag); /* Generate Target ID. */ idlen = strlen(target) + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; strlcpy(desc->identifier, target, idlen); retval = ctl_port_register(port); if (retval != 0) { free(port->port_devid, M_CFISCSI); free(port->target_devid, M_CFISCSI); cfiscsi_target_release(ct); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "ctl_port_register() failed with error %d", retval); return; } done: ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE; req->status = CTL_LUN_OK; req->result_nvl = nvlist_create(0); nvlist_add_number(req->result_nvl, "port_id", port->targ_port); } static void cfiscsi_ioctl_port_remove(struct ctl_req *req) { struct cfiscsi_target *ct; const char *target, *val; uint16_t tag; target = dnvlist_get_string(req->args_nvl, "cfiscsi_target", NULL); val = dnvlist_get_string(req->args_nvl, "cfiscsi_portal_group_tag", NULL); if (target == NULL || val == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } tag = strtoul(val, NULL, 0); ct = cfiscsi_target_find(&cfiscsi_softc, target, tag); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "can't find target \"%s\"", target); return; } ct->ct_state = CFISCSI_TARGET_STATE_DYING; ctl_port_offline(&ct->ct_port); cfiscsi_target_release(ct); cfiscsi_target_release(ct); req->status = CTL_LUN_OK; } static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_iscsi *ci; struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfiscsi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfiscsi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } if (cmd != CTL_ISCSI) return (ENOTTY); ci = (struct ctl_iscsi *)addr; switch (ci->type) { case CTL_ISCSI_HANDOFF: cfiscsi_ioctl_handoff(ci); break; case CTL_ISCSI_LIST: cfiscsi_ioctl_list(ci); break; case CTL_ISCSI_LOGOUT: cfiscsi_ioctl_logout(ci); break; case CTL_ISCSI_TERMINATE: cfiscsi_ioctl_terminate(ci); break; case CTL_ISCSI_LIMITS: cfiscsi_ioctl_limits(ci); break; #ifdef ICL_KERNEL_PROXY case CTL_ISCSI_LISTEN: cfiscsi_ioctl_listen(ci); break; case CTL_ISCSI_ACCEPT: cfiscsi_ioctl_accept(ci); break; case CTL_ISCSI_SEND: cfiscsi_ioctl_send(ci); break; case CTL_ISCSI_RECEIVE: cfiscsi_ioctl_receive(ci); break; #else case CTL_ISCSI_LISTEN: case CTL_ISCSI_ACCEPT: case CTL_ISCSI_SEND: case CTL_ISCSI_RECEIVE: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: CTL compiled without ICL_KERNEL_PROXY", __func__); break; #endif /* !ICL_KERNEL_PROXY */ default: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: invalid iSCSI request type %d", __func__, ci->type); break; } return (0); } static void cfiscsi_target_hold(struct cfiscsi_target *ct) { refcount_acquire(&ct->ct_refcount); } static void cfiscsi_target_release(struct cfiscsi_target *ct) { struct cfiscsi_softc *softc; softc = ct->ct_softc; mtx_lock(&softc->lock); if (refcount_release(&ct->ct_refcount)) { TAILQ_REMOVE(&softc->targets, ct, ct_next); mtx_unlock(&softc->lock); if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) { ct->ct_state = CFISCSI_TARGET_STATE_INVALID; if (ctl_port_deregister(&ct->ct_port) != 0) printf("%s: ctl_port_deregister() failed\n", __func__); } free(ct, M_CFISCSI); return; } mtx_unlock(&softc->lock); } static struct cfiscsi_target * cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name, uint16_t tag) { struct cfiscsi_target *ct; mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); return (ct); } mtx_unlock(&softc->lock); return (NULL); } static struct cfiscsi_target * cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name, const char *alias, uint16_t tag) { struct cfiscsi_target *ct, *newct; if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN) return (NULL); newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO); mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (ct->ct_tag != tag || strcmp(name, ct->ct_name) != 0 || ct->ct_state == CFISCSI_TARGET_STATE_INVALID) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); free(newct, M_CFISCSI); return (ct); } strlcpy(newct->ct_name, name, sizeof(newct->ct_name)); if (alias != NULL) strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias)); newct->ct_tag = tag; refcount_init(&newct->ct_refcount, 1); newct->ct_softc = softc; if (TAILQ_EMPTY(&softc->targets)) softc->last_target_id = 0; newct->ct_target_id = ++softc->last_target_id; TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next); mtx_unlock(&softc->lock); return (newct); } static void cfiscsi_pdu_done(struct icl_pdu *ip, int error) { if (error != 0) ; // XXX: Do something on error? ((ctl_ref)ip->ip_prv0)(ip->ip_prv1, -1); } static void cfiscsi_datamove_in(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_data_in *bhsdi; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t len, expected_len, sg_len, buffer_offset; const char *sg_addr; icl_pdu_cb cb; int ctl_sg_count, error, i; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } /* * This is the offset within the current SCSI command; for the first * call to cfiscsi_datamove() it will be 0, and for subsequent ones * it will be the sum of lengths of previous ones. */ buffer_offset = io->scsiio.kern_rel_offset; /* * This is the transfer length expected by the initiator. It can be * different from the amount of data from the SCSI point of view. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); /* * If the transfer is outside of expected length -- we are done. */ if (buffer_offset >= expected_len) { #if 0 CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, " "already sent the expected len", buffer_offset); #endif - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); return; } if (io->scsiio.kern_data_ref != NULL) cb = cfiscsi_pdu_done; else cb = NULL; i = 0; sg_addr = NULL; sg_len = 0; response = NULL; bhsdi = NULL; for (;;) { if (response == NULL) { response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); cfiscsi_session_terminate(cs); return; } bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN; bhsdi->bhsdi_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsdi->bhsdi_target_transfer_tag = 0xffffffff; bhsdi->bhsdi_datasn = htonl(PRIV_EXPDATASN(io)++); bhsdi->bhsdi_buffer_offset = htonl(buffer_offset); } KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count")); if (sg_len == 0) { sg_addr = ctl_sglist[i].addr; sg_len = ctl_sglist[i].len; KASSERT(sg_len > 0, ("sg_len <= 0")); } len = sg_len; /* * Truncate to maximum data segment length. */ KASSERT(response->ip_data_len < cs->cs_max_send_data_segment_length, ("ip_data_len %zd >= max_send_data_segment_length %d", response->ip_data_len, cs->cs_max_send_data_segment_length)); if (response->ip_data_len + len > cs->cs_max_send_data_segment_length) { len = cs->cs_max_send_data_segment_length - response->ip_data_len; KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } /* * Truncate to expected data transfer length. */ KASSERT(buffer_offset + response->ip_data_len < expected_len, ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len + len > expected_len) { CFISCSI_SESSION_DEBUG(cs, "truncating from %zd " "to expected data transfer length %zd", buffer_offset + response->ip_data_len + len, expected_len); len = expected_len - (buffer_offset + response->ip_data_len); KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT | (cb ? ICL_NOCOPY : 0)); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); icl_pdu_free(response); ctl_set_busy(&io->scsiio); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); cfiscsi_session_terminate(cs); return; } sg_addr += len; sg_len -= len; io->scsiio.kern_data_resid -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len == expected_len) { /* * Already have the amount of data the initiator wanted. */ break; } if (sg_len == 0) { /* * End of scatter-gather segment; * proceed to the next one... */ if (i == ctl_sg_count - 1) { /* * ... unless this was the last one. */ break; } i++; } if (response->ip_data_len == cs->cs_max_send_data_segment_length) { /* * Can't stuff more data into the current PDU; * queue it. Note that's not enough to check * for kern_data_resid == 0 instead; there * may be several Data-In PDUs for the final * call to cfiscsi_datamove(), and we want * to set the F flag only on the last of them. */ buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { buffer_offset -= response->ip_data_len; break; } if (cb != NULL) { response->ip_prv0 = io->scsiio.kern_data_ref; response->ip_prv1 = io->scsiio.kern_data_arg; io->scsiio.kern_data_ref(io->scsiio.kern_data_arg, 1); } cfiscsi_pdu_queue_cb(response, cb); response = NULL; bhsdi = NULL; } } if (response != NULL) { buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_F; if (io->io_hdr.status == CTL_SUCCESS) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_S; if (io->scsiio.kern_total_len < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhsdi->bhsdi_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - io->scsiio.kern_total_len); } else if (io->scsiio.kern_total_len > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhsdi->bhsdi_residual_count = htonl(io->scsiio.kern_total_len - ntohl(bhssc->bhssc_expected_data_transfer_length)); } bhsdi->bhsdi_status = io->scsiio.scsi_status; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } } KASSERT(response->ip_data_len > 0, ("sending empty Data-In")); if (cb != NULL) { response->ip_prv0 = io->scsiio.kern_data_ref; response->ip_prv1 = io->scsiio.kern_data_arg; io->scsiio.kern_data_ref(io->scsiio.kern_data_arg, 1); } cfiscsi_pdu_queue_cb(response, cb); } - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); } static void cfiscsi_datamove_out(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; uint32_t expected_len, datamove_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); /* * Complete write underflow. Not a single byte to read. Return. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); if (io->scsiio.kern_rel_offset >= expected_len) { - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); return; } + datamove_len = MIN(io->scsiio.kern_data_len, expected_len - io->scsiio.kern_rel_offset); target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); if (target_transfer_tag == 0xffffffff) { target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); } cdw = cfiscsi_data_wait_new(cs, io, bhssc->bhssc_initiator_task_tag, &target_transfer_tag); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); cfiscsi_session_terminate(cs); return; } #if 0 CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator " "task tag 0x%x, target transfer tag 0x%x", bhssc->bhssc_initiator_task_tag, target_transfer_tag); #endif cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; cdw->cdw_r2t_end = datamove_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = datamove_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; r2t_off = io->scsiio.ext_data_filled; while (r2t_off > 0) { if (r2t_off >= cdw->cdw_sg_len) { r2t_off -= cdw->cdw_sg_len; cdw->cdw_sg_index++; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; continue; } cdw->cdw_sg_addr += r2t_off; cdw->cdw_sg_len -= r2t_off; r2t_off = 0; } if (cs->cs_immediate_data && io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled < icl_pdu_data_segment_length(request)) { done = cfiscsi_handle_data_segment(request, cdw); if (done) { cfiscsi_data_wait_free(cs, cdw); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); return; } } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; r2t_len = MIN(datamove_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; CFISCSI_SESSION_LOCK(cs); TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * XXX: We should limit the number of outstanding R2T PDUs * per task to MaxOutstandingR2T. */ response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); cfiscsi_session_terminate(cs); return; } io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T; bhsr2t->bhsr2t_flags = 0x80; bhsr2t->bhsr2t_lun = bhssc->bhssc_lun; bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag; /* * XXX: Here we assume that cfiscsi_datamove() won't ever * be running concurrently on several CPUs for a given * command. */ bhsr2t->bhsr2t_r2tsn = htonl(PRIV_R2TSN(io)++); /* * This is the offset within the current SCSI command; * i.e. for the first call of datamove(), it will be 0, * and for subsequent ones it will be the sum of lengths * of previous ones. * * The ext_data_filled is to account for unsolicited * (immediate) data that might have already arrived. */ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off); /* * This is the total length (sum of S/G lengths) this call * to cfiscsi_datamove() is supposed to handle, limited by * MaxBurstLength. */ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len); cfiscsi_pdu_queue(response); } static void cfiscsi_datamove(union ctl_io *io) { if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) cfiscsi_datamove_in(io); else { /* We hadn't received anything during this datamove yet. */ io->scsiio.ext_data_filled = 0; cfiscsi_datamove_out(io); } } static void cfiscsi_scsi_command_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_scsi_response *bhssr; #ifdef DIAGNOSTIC struct cfiscsi_data_wait *cdw; #endif struct cfiscsi_session *cs; uint16_t sense_length; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode)); //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); #ifdef DIAGNOSTIC CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) KASSERT(bhssc->bhssc_initiator_task_tag != cdw->cdw_initiator_task_tag, ("dangling cdw")); CFISCSI_SESSION_UNLOCK(cs); #endif /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); icl_pdu_free(request); return; } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE; bhssr->bhssr_flags = 0x80; /* * XXX: We don't deal with bidirectional under/overflows; * does anything actually support those? */ if (io->scsiio.kern_total_len < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhssr->bhssr_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - io->scsiio.kern_total_len); //CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } else if (io->scsiio.kern_total_len > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhssr->bhssr_residual_count = htonl(io->scsiio.kern_total_len - ntohl(bhssc->bhssc_expected_data_transfer_length)); //CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED; bhssr->bhssr_status = io->scsiio.scsi_status; bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhssr->bhssr_expdatasn = htonl(PRIV_EXPDATASN(io)); if (io->scsiio.sense_len > 0) { #if 0 CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data", io->scsiio.sense_len); #endif sense_length = htons(io->scsiio.sense_len); icl_pdu_append_data(response, &sense_length, sizeof(sense_length), M_WAITOK); icl_pdu_append_data(response, &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK); } ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_task_management_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct cfiscsi_data_wait *cdw, *tmpcdw; struct cfiscsi_session *cs, *tcs; struct cfiscsi_softc *softc; int cold_reset = 0; request = PRIV_REQUEST(io); cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_TASK_REQUEST, ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode)); #if 0 CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x", bhstmr->bhstmr_initiator_task_tag, bhstmr->bhstmr_referenced_task_tag); #endif if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_ABORT_TASK) { /* * Make sure we no longer wait for Data-Out for this command. */ CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH_SAFE(cdw, &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) { if (bhstmr->bhstmr_referenced_task_tag != cdw->cdw_initiator_task_tag) continue; #if 0 CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task " "tag 0x%x", bhstmr->bhstmr_initiator_task_tag); #endif TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 43; - cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); + ctl_datamove_done(cdw->cdw_ctl_io, false); cfiscsi_data_wait_free(cs, cdw); } CFISCSI_SESSION_UNLOCK(cs); } if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_TARGET_COLD_RESET && io->io_hdr.status == CTL_SUCCESS) cold_reset = 1; response = cfiscsi_pdu_new_response(request, M_WAITOK); bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_SUCCEEDED; break; case CTL_TASK_LUN_DOES_NOT_EXIST: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_LUN_DOES_NOT_EXIST; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: default: bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; break; } memcpy(bhstmr2->bhstmr_additional_reponse_information, io->taskio.task_resp, sizeof(io->taskio.task_resp)); bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); if (cold_reset) { softc = cs->cs_target->ct_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(tcs, &softc->sessions, cs_next) { if (tcs->cs_target == cs->cs_target) cfiscsi_session_terminate(tcs); } mtx_unlock(&softc->lock); } } static void cfiscsi_done(union ctl_io *io) { struct icl_pdu *request; struct cfiscsi_session *cs; KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ cs = PRIV_REQUEST(io); cs->cs_tasks_aborted = true; refcount_release(&cs->cs_outstanding_ctl_pdus); wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus)); ctl_free_io(io); return; } request = PRIV_REQUEST(io); cs = PDU_SESSION(request); switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_scsi_command_done(io); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_task_management_done(io); break; default: panic("cfiscsi_done called with wrong opcode 0x%x", request->ip_bhs->bhs_opcode); } refcount_release(&cs->cs_outstanding_ctl_pdus); } diff --git a/sys/cam/ctl/ctl_io.h b/sys/cam/ctl/ctl_io.h index 52ba98f3a9bd..60f8aef82d02 100644 --- a/sys/cam/ctl/ctl_io.h +++ b/sys/cam/ctl/ctl_io.h @@ -1,602 +1,602 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_io.h#5 $ * $FreeBSD$ */ /* * CAM Target Layer data movement structures/interface. * * Author: Ken Merry */ #ifndef _CTL_IO_H_ #define _CTL_IO_H_ #define CTL_MAX_CDBLEN 32 /* * Uncomment this next line to enable printing out times for I/Os * that take longer than CTL_TIME_IO_SECS seconds to get to the datamove * and/or done stage. */ #define CTL_TIME_IO #ifdef CTL_TIME_IO #define CTL_TIME_IO_DEFAULT_SECS 90 #endif /* * Uncomment this next line to enable the CTL I/O delay feature. You * can delay I/O at two different points -- datamove and done. This is * useful for diagnosing abort conditions (for hosts that send an abort on a * timeout), and for determining how long a host's timeout is. */ //#define CTL_IO_DELAY typedef enum { CTL_STATUS_NONE, /* No status */ CTL_SUCCESS, /* Transaction completed successfully */ CTL_CMD_TIMEOUT, /* Command timed out, shouldn't happen here */ CTL_SEL_TIMEOUT, /* Selection timeout, shouldn't happen here */ CTL_ERROR, /* General CTL error XXX expand on this? */ CTL_SCSI_ERROR, /* SCSI error, look at status byte/sense data */ CTL_CMD_ABORTED, /* Command aborted, don't return status */ CTL_STATUS_MASK = 0xfff,/* Mask off any status flags */ CTL_AUTOSENSE = 0x1000 /* Autosense performed */ } ctl_io_status; /* * WARNING: Keep the data in/out/none flags where they are. They're used * in conjunction with ctl_cmd_flags. See comment above ctl_cmd_flags * definition in ctl_private.h. */ typedef enum { CTL_FLAG_NONE = 0x00000000, /* no flags */ CTL_FLAG_DATA_IN = 0x00000001, /* DATA IN */ CTL_FLAG_DATA_OUT = 0x00000002, /* DATA OUT */ CTL_FLAG_DATA_NONE = 0x00000003, /* no data */ CTL_FLAG_DATA_MASK = 0x00000003, CTL_FLAG_DO_AUTOSENSE = 0x00000020, /* grab sense info */ CTL_FLAG_USER_REQ = 0x00000040, /* request came from userland */ CTL_FLAG_ALLOCATED = 0x00000100, /* data space allocated */ CTL_FLAG_ABORT_STATUS = 0x00000400, /* return TASK ABORTED status */ CTL_FLAG_ABORT = 0x00000800, /* this I/O should be aborted */ CTL_FLAG_DMA_INPROG = 0x00001000, /* DMA in progress */ CTL_FLAG_DELAY_DONE = 0x00004000, /* delay injection done */ CTL_FLAG_INT_COPY = 0x00008000, /* internal copy, no done call*/ CTL_FLAG_SENT_2OTHER_SC = 0x00010000, CTL_FLAG_FROM_OTHER_SC = 0x00020000, CTL_FLAG_IS_WAS_ON_RTR = 0x00040000, /* Don't rerun cmd on failover*/ CTL_FLAG_BUS_ADDR = 0x00080000, /* ctl_sglist contains BUS addresses, not virtual ones*/ CTL_FLAG_IO_CONT = 0x00100000, /* Continue I/O instead of completing */ #if 0 CTL_FLAG_ALREADY_DONE = 0x00200000 /* I/O already completed */ #endif CTL_FLAG_NO_DATAMOVE = 0x00400000, CTL_FLAG_DMA_QUEUED = 0x00800000, /* DMA queued but not started*/ CTL_FLAG_STATUS_QUEUED = 0x01000000, /* Status queued but not sent*/ CTL_FLAG_FAILOVER = 0x04000000, /* Killed by a failover */ CTL_FLAG_IO_ACTIVE = 0x08000000, /* I/O active on this SC */ CTL_FLAG_STATUS_SENT = 0x10000000, /* Status sent by datamove */ CTL_FLAG_SERSEQ_DONE = 0x20000000 /* All storage I/O started */ } ctl_io_flags; struct ctl_lba_len { uint64_t lba; uint32_t len; }; struct ctl_lba_len_flags { uint64_t lba; uint32_t len; uint32_t flags; #define CTL_LLF_FUA 0x04000000 #define CTL_LLF_DPO 0x08000000 #define CTL_LLF_READ 0x10000000 #define CTL_LLF_WRITE 0x20000000 #define CTL_LLF_VERIFY 0x40000000 #define CTL_LLF_COMPARE 0x80000000 }; struct ctl_ptr_len_flags { uint8_t *ptr; uint32_t len; uint32_t flags; }; union ctl_priv { uint8_t bytes[sizeof(uint64_t) * 2]; uint64_t integer; uint64_t integers[2]; void *ptr; void *ptrs[2]; }; /* * Number of CTL private areas. */ #define CTL_NUM_PRIV 6 /* * Which private area are we using for a particular piece of data? */ #define CTL_PRIV_LUN 0 /* CTL LUN pointer goes here */ #define CTL_PRIV_LBA_LEN 1 /* Decoded LBA/len for read/write*/ #define CTL_PRIV_MODEPAGE 1 /* Modepage info for config write */ #define CTL_PRIV_BACKEND 2 /* Reserved for block, RAIDCore */ #define CTL_PRIV_BACKEND_LUN 3 /* Backend LUN pointer */ #define CTL_PRIV_FRONTEND 4 /* Frontend storage */ #define CTL_PRIV_FRONTEND2 5 /* Another frontend storage */ #define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0]) #define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1]) #define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0]) #define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \ ctl_ports[(io)->io_hdr.nexus.targ_port]) /* * These are used only on Originating SC in XFER mode, where requests don't * ever reach backends, so we can reuse backend's private storage. */ #define CTL_RSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[0]) #define CTL_LSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[1]) #define CTL_RSGLT(io) ((struct ctl_sg_entry *)CTL_RSGL(io)) #define CTL_LSGLT(io) ((struct ctl_sg_entry *)CTL_LSGL(io)) #define CTL_INVALID_PORTNAME 0xFF #define CTL_UNMAPPED_IID 0xFF struct ctl_sg_entry { void *addr; size_t len; }; typedef enum { CTL_IO_NONE, CTL_IO_SCSI, CTL_IO_TASK, } ctl_io_type; struct ctl_nexus { uint32_t initid; /* Initiator ID */ uint32_t targ_port; /* Target port, filled in by PORT */ uint32_t targ_lun; /* Destination lun */ uint32_t targ_mapped_lun; /* Destination lun CTL-wide */ }; typedef enum { CTL_MSG_SERIALIZE, CTL_MSG_R2R, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, CTL_MSG_MANAGE_TASKS, CTL_MSG_PERS_ACTION, CTL_MSG_DATAMOVE, CTL_MSG_DATAMOVE_DONE, CTL_MSG_UA, /* Set/clear UA on secondary. */ CTL_MSG_PORT_SYNC, /* Information about port. */ CTL_MSG_LUN_SYNC, /* Information about LUN. */ CTL_MSG_IID_SYNC, /* Information about initiator. */ CTL_MSG_LOGIN, /* Information about HA peer. */ CTL_MSG_MODE_SYNC, /* Mode page current content. */ CTL_MSG_FAILOVER /* Fake, never sent though the wire */ } ctl_msg_type; struct ctl_scsiio; struct ctl_io_hdr { uint32_t version; /* interface version XXX */ ctl_io_type io_type; /* task I/O, SCSI I/O, etc. */ ctl_msg_type msg_type; struct ctl_nexus nexus; /* Initiator, port, target, lun */ uint32_t iid_indx; /* the index into the iid mapping */ uint32_t flags; /* transaction flags */ uint32_t status; /* transaction status */ uint32_t port_status; /* trans status, set by PORT, 0 = good*/ uint32_t timeout; /* timeout in ms */ uint32_t retries; /* retry count */ #ifdef CTL_IO_DELAY struct callout delay_callout; #endif /* CTL_IO_DELAY */ #ifdef CTL_TIME_IO time_t start_time; /* I/O start time */ struct bintime start_bt; /* Timer start ticks */ struct bintime dma_start_bt; /* DMA start ticks */ struct bintime dma_bt; /* DMA total ticks */ #endif /* CTL_TIME_IO */ uint32_t num_dmas; /* Number of DMAs */ union ctl_io *remote_io; /* I/O counterpart on remote HA side */ union ctl_io *blocker; /* I/O blocking this one */ void *pool; /* I/O pool */ union ctl_priv ctl_private[CTL_NUM_PRIV];/* CTL private area */ TAILQ_HEAD(, ctl_io_hdr) blocked_queue; /* I/Os blocked by this one */ STAILQ_ENTRY(ctl_io_hdr) links; /* linked list pointer */ LIST_ENTRY(ctl_io_hdr) ooa_links; /* ooa_queue links */ TAILQ_ENTRY(ctl_io_hdr) blocked_links; /* blocked_queue links */ }; typedef enum { CTL_TAG_UNTAGGED, CTL_TAG_SIMPLE, CTL_TAG_ORDERED, CTL_TAG_HEAD_OF_QUEUE, CTL_TAG_ACA } ctl_tag_type; union ctl_io; typedef void (*ctl_ref)(void *arg, int diff); /* * SCSI passthrough I/O structure for the CAM Target Layer. Note * that some of these fields are here for completeness, but they aren't * used in the CTL implementation. e.g., timeout and retries won't be * used. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_scsiio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ /* * The ext_* fields are generally intended for frontend use; CTL itself * doesn't modify or use them. */ uint32_t ext_sg_entries; /* 0 = no S/G list, > 0 = num entries */ uint8_t *ext_data_ptr; /* data buffer or S/G list */ uint32_t ext_data_len; /* Data transfer length */ uint32_t ext_data_filled; /* Amount of data filled so far */ /* * The number of scatter/gather entries in the list pointed to * by kern_data_ptr. 0 means there is no list, just a data pointer. */ uint32_t kern_sg_entries; uint32_t rem_sg_entries; /* Unused. */ /* * The data pointer or a pointer to the scatter/gather list. */ uint8_t *kern_data_ptr; /* * Length of the data buffer or scatter/gather list. It's also * the length of this particular piece of the data transfer, * ie. number of bytes expected to be transferred by the current * invocation of frontend's datamove() callback. It's always * less than or equal to kern_total_len. */ uint32_t kern_data_len; /* * Total length of data to be transferred during this particular * SCSI command, as decoded from SCSI CDB. */ uint32_t kern_total_len; /* * Amount of data left after the current data transfer. */ uint32_t kern_data_resid; /* * Byte offset of this transfer, equal to the amount of data * already transferred for this SCSI command during previous * datamove() invocations. */ uint32_t kern_rel_offset; struct scsi_sense_data sense_data; /* sense data */ uint8_t sense_len; /* Returned sense length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_residual; /* Unused. */ uint8_t priority; /* Command priority */ uint32_t residual; /* Unused */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, head of queue,etc.*/ uint8_t cdb_len; /* CDB length */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ - int (*be_move_done)(union ctl_io *io); /* called by fe */ + int (*be_move_done)(union ctl_io *io, bool samethr); /* called by fe */ int (*io_cont)(union ctl_io *io); /* to continue processing */ ctl_ref kern_data_ref; /* Method to reference/release data */ void *kern_data_arg; /* Opaque argument for kern_data_ref() */ }; typedef enum { CTL_TASK_ABORT_TASK, CTL_TASK_ABORT_TASK_SET, CTL_TASK_CLEAR_ACA, CTL_TASK_CLEAR_TASK_SET, CTL_TASK_I_T_NEXUS_RESET, CTL_TASK_LUN_RESET, CTL_TASK_TARGET_RESET, CTL_TASK_BUS_RESET, CTL_TASK_PORT_LOGIN, CTL_TASK_PORT_LOGOUT, CTL_TASK_QUERY_TASK, CTL_TASK_QUERY_TASK_SET, CTL_TASK_QUERY_ASYNC_EVENT } ctl_task_type; typedef enum { CTL_TASK_FUNCTION_COMPLETE, CTL_TASK_FUNCTION_SUCCEEDED, CTL_TASK_FUNCTION_REJECTED, CTL_TASK_LUN_DOES_NOT_EXIST, CTL_TASK_FUNCTION_NOT_SUPPORTED } ctl_task_status; /* * Task management I/O structure. Aborts, bus resets, etc., are sent using * this structure. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_taskio { struct ctl_io_hdr io_hdr; /* common to all I/O types */ ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t task_status; /* Complete, Succeeded, etc. */ uint8_t task_resp[3];/* Response information */ }; /* * HA link messages. */ #define CTL_HA_VERSION 3 /* * Used for CTL_MSG_LOGIN. */ struct ctl_ha_msg_login { ctl_msg_type msg_type; int version; int ha_mode; int ha_id; int max_luns; int max_ports; int max_init_per_port; }; typedef enum { CTL_PR_REG_KEY, CTL_PR_UNREG_KEY, CTL_PR_PREEMPT, CTL_PR_CLEAR, CTL_PR_RESERVE, CTL_PR_RELEASE } ctl_pr_action; /* * The PR info is specifically for sending Persistent Reserve actions * to the other SC which it must also act on. * * Note: Make sure the io_hdr is *always* the first element in this * structure. */ struct ctl_pr_info { ctl_pr_action action; uint8_t sa_res_key[8]; uint8_t res_type; uint32_t residx; }; struct ctl_ha_msg_hdr { ctl_msg_type msg_type; uint32_t status; /* transaction status */ union ctl_io *original_sc; union ctl_io *serializing_sc; struct ctl_nexus nexus; /* Initiator, port, target, lun */ }; #define CTL_HA_MAX_SG_ENTRIES 16 #define CTL_HA_DATAMOVE_SEGMENT 131072 /* * Used for CTL_MSG_PERS_ACTION. */ struct ctl_ha_msg_pr { struct ctl_ha_msg_hdr hdr; struct ctl_pr_info pr_info; }; /* * Used for CTL_MSG_UA. */ struct ctl_ha_msg_ua { struct ctl_ha_msg_hdr hdr; int ua_all; int ua_set; int ua_type; uint8_t ua_info[8]; }; /* * The S/G handling here is a little different than the standard ctl_scsiio * structure, because we can't pass data by reference in between controllers. * The S/G list in the ctl_scsiio struct is normally passed in the * kern_data_ptr field. So kern_sg_entries here will always be non-zero, * even if there is only one entry. * * Used for CTL_MSG_DATAMOVE. */ struct ctl_ha_msg_dt { struct ctl_ha_msg_hdr hdr; ctl_io_flags flags; /* Only I/O flags are used here */ uint32_t sg_sequence; /* S/G portion number */ uint8_t sg_last; /* last S/G batch = 1 */ uint32_t sent_sg_entries; /* previous S/G count */ uint32_t cur_sg_entries; /* current S/G entries */ uint32_t kern_sg_entries; /* total S/G entries */ uint32_t kern_data_len; /* Length of this S/G list */ uint32_t kern_total_len; /* Total length of this transaction */ uint32_t kern_data_resid; /* Length left to transfer after this*/ uint32_t kern_rel_offset; /* Byte Offset of this transfer */ struct ctl_sg_entry sg_list[CTL_HA_MAX_SG_ENTRIES]; }; /* * Used for CTL_MSG_SERIALIZE, CTL_MSG_FINISH_IO, CTL_MSG_BAD_JUJU, * and CTL_MSG_DATAMOVE_DONE. */ struct ctl_ha_msg_scsi { struct ctl_ha_msg_hdr hdr; uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ uint8_t cdb[CTL_MAX_CDBLEN]; /* CDB */ uint8_t cdb_len; /* CDB length */ uint8_t scsi_status; /* SCSI status byte */ uint8_t sense_len; /* Returned sense length */ uint8_t priority; /* Command priority */ uint32_t port_status; /* trans status, set by FETD, 0 = good*/ uint32_t kern_data_resid; /* for DATAMOVE_DONE */ struct scsi_sense_data sense_data; /* sense data */ }; /* * Used for CTL_MSG_MANAGE_TASKS. */ struct ctl_ha_msg_task { struct ctl_ha_msg_hdr hdr; ctl_task_type task_action; /* Target Reset, Abort, etc. */ uint32_t tag_num; /* tag number */ ctl_tag_type tag_type; /* simple, ordered, etc. */ }; /* * Used for CTL_MSG_PORT_SYNC. */ struct ctl_ha_msg_port { struct ctl_ha_msg_hdr hdr; int port_type; int physical_port; int virtual_port; int status; int name_len; int lun_map_len; int port_devid_len; int target_devid_len; int init_devid_len; uint8_t data[]; }; /* * Used for CTL_MSG_LUN_SYNC. */ struct ctl_ha_msg_lun { struct ctl_ha_msg_hdr hdr; int flags; unsigned int pr_generation; uint32_t pr_res_idx; uint8_t pr_res_type; int lun_devid_len; int pr_key_count; uint8_t data[]; }; struct ctl_ha_msg_lun_pr_key { uint32_t pr_iid; uint64_t pr_key; }; /* * Used for CTL_MSG_IID_SYNC. */ struct ctl_ha_msg_iid { struct ctl_ha_msg_hdr hdr; int in_use; int name_len; uint64_t wwpn; uint8_t data[]; }; /* * Used for CTL_MSG_MODE_SYNC. */ struct ctl_ha_msg_mode { struct ctl_ha_msg_hdr hdr; uint8_t page_code; uint8_t subpage; uint16_t page_len; uint8_t data[]; }; union ctl_ha_msg { struct ctl_ha_msg_hdr hdr; struct ctl_ha_msg_task task; struct ctl_ha_msg_scsi scsi; struct ctl_ha_msg_dt dt; struct ctl_ha_msg_pr pr; struct ctl_ha_msg_ua ua; struct ctl_ha_msg_port port; struct ctl_ha_msg_lun lun; struct ctl_ha_msg_iid iid; struct ctl_ha_msg_login login; struct ctl_ha_msg_mode mode; }; struct ctl_prio { struct ctl_io_hdr io_hdr; struct ctl_ha_msg_pr pr_msg; }; union ctl_io { struct ctl_io_hdr io_hdr; /* common to all I/O types */ struct ctl_scsiio scsiio; /* Normal SCSI commands */ struct ctl_taskio taskio; /* SCSI task management/reset */ struct ctl_prio presio; /* update per. res info on other SC */ }; #ifdef _KERNEL union ctl_io *ctl_alloc_io(void *pool_ref); union ctl_io *ctl_alloc_io_nowait(void *pool_ref); void ctl_free_io(union ctl_io *io); void ctl_zero_io(union ctl_io *io); #endif /* _KERNEL */ #endif /* _CTL_IO_H_ */ /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl_tpc_local.c b/sys/cam/ctl/ctl_tpc_local.c index c2d628033037..ba6deada86a3 100644 --- a/sys/cam/ctl/ctl_tpc_local.c +++ b/sys/cam/ctl/ctl_tpc_local.c @@ -1,331 +1,331 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2014 Alexander Motin * Copyright (c) 2004, 2005 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct tpcl_softc { struct ctl_port port; int cur_tag_num; }; static struct tpcl_softc tpcl_softc; static int tpcl_init(void); static int tpcl_shutdown(void); static void tpcl_datamove(union ctl_io *io); static void tpcl_done(union ctl_io *io); static struct ctl_frontend tpcl_frontend = { .name = "tpc", .init = tpcl_init, .shutdown = tpcl_shutdown, }; CTL_FRONTEND_DECLARE(ctltpc, tpcl_frontend); static int tpcl_init(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port; struct scsi_transportid_spi *tid; int error, len; memset(tsoftc, 0, sizeof(*tsoftc)); port = &tsoftc->port; port->frontend = &tpcl_frontend; port->port_type = CTL_PORT_INTERNAL; port->num_requested_ctl_io = 100; port->port_name = "tpc"; port->fe_datamove = tpcl_datamove; port->fe_done = tpcl_done; port->targ_port = -1; port->max_initiators = 1; if ((error = ctl_port_register(port)) != 0) { printf("%s: tpc port registration failed\n", __func__); return (error); } len = sizeof(struct scsi_transportid_spi); port->init_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->init_devid->len = len; tid = (struct scsi_transportid_spi *)port->init_devid->data; tid->format_protocol = SCSI_TRN_SPI_FORMAT_DEFAULT | SCSI_PROTO_SPI; scsi_ulto2b(0, tid->scsi_addr); scsi_ulto2b(port->targ_port, tid->rel_trgt_port_id); ctl_port_online(port); return (0); } static int tpcl_shutdown(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port = &tsoftc->port; int error; ctl_port_offline(port); if ((error = ctl_port_deregister(port)) != 0) printf("%s: tpc port deregistration failed\n", __func__); return (error); } static void tpcl_datamove(union ctl_io *io) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy; int kern_watermark, ext_watermark; struct ctl_scsiio *ctsio; int i, j; CTL_DEBUG_PRINT(("%s\n", __func__)); ctsio = &io->scsiio; /* * If this is the case, we're probably doing a BBR read and don't * actually need to transfer the data. This will effectively * bit-bucket the data. */ if (ctsio->ext_data_ptr == NULL) goto bailout; /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->ext_sg_entries > 0) { int len_seen; ext_sglist = (struct ctl_sg_entry *)ctsio->ext_data_ptr; ext_sg_entries = ctsio->ext_sg_entries; ext_sg_start = 0; ext_offset = 0; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = min(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, kern_ptr, ext_ptr)); memcpy(ext_ptr, kern_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, ext_ptr, kern_ptr)); memcpy(kern_ptr, ext_ptr, len_to_copy); } ctsio->ext_data_filled += len_to_copy; ctsio->kern_data_resid -= len_to_copy; ext_watermark += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } kern_watermark += len_to_copy; if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n", __func__, ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n", __func__, ctsio->ext_data_len, ctsio->kern_data_len)); bailout: - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); } static void tpcl_done(union ctl_io *io) { tpc_done(io); } uint64_t tpcl_resolve(struct ctl_softc *softc, int init_port, struct scsi_ec_cscd *cscd, uint32_t *ss, uint32_t *ps, uint32_t *pso) { struct scsi_ec_cscd_id *cscdid; struct ctl_port *port; struct ctl_lun *lun; uint64_t lunid = UINT64_MAX; if (cscd->type_code != EC_CSCD_ID || (cscd->luidt_pdt & EC_LUIDT_MASK) != EC_LUIDT_LUN || (cscd->luidt_pdt & EC_NUL) != 0) return (lunid); cscdid = (struct scsi_ec_cscd_id *)cscd; mtx_lock(&softc->ctl_lock); if (init_port >= 0) port = softc->ctl_ports[init_port]; else port = NULL; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; if (lun->lun_devid == NULL) continue; if (scsi_devid_match(lun->lun_devid->data, lun->lun_devid->len, &cscdid->codeset, cscdid->length + 4) == 0) { lunid = lun->lun; if (ss) *ss = lun->be_lun->blocksize; if (ps) *ps = lun->be_lun->blocksize << lun->be_lun->pblockexp; if (pso) *pso = lun->be_lun->blocksize * lun->be_lun->pblockoff; break; } } mtx_unlock(&softc->ctl_lock); return (lunid); }; union ctl_io * tpcl_alloc_io(void) { struct tpcl_softc *tsoftc = &tpcl_softc; return (ctl_alloc_io(tsoftc->port.ctl_pool_ref)); }; int tpcl_queue(union ctl_io *io, uint64_t lun) { struct tpcl_softc *tsoftc = &tpcl_softc; io->io_hdr.nexus.initid = 0; io->io_hdr.nexus.targ_port = tsoftc->port.targ_port; io->io_hdr.nexus.targ_lun = lun; io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1); io->scsiio.ext_data_filled = 0; return (ctl_queue(io)); } diff --git a/sys/cam/ctl/scsi_ctl.c b/sys/cam/ctl/scsi_ctl.c index 646b3fe07053..d3023f9a6c8c 100644 --- a/sys/cam/ctl/scsi_ctl.c +++ b/sys/cam/ctl/scsi_ctl.c @@ -1,1995 +1,1994 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, 2009 Silicon Graphics International Corp. * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/scsi_ctl.c#4 $ */ /* * Peripheral driver interface between CAM and CTL (CAM Target Layer). * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctlfe_softc { struct ctl_port port; path_id_t path_id; target_id_t target_id; uint32_t hba_misc; u_int maxio; struct cam_sim *sim; char port_name[DEV_IDLEN]; struct mtx lun_softc_mtx; STAILQ_HEAD(, ctlfe_lun_softc) lun_softc_list; STAILQ_ENTRY(ctlfe_softc) links; }; STAILQ_HEAD(, ctlfe_softc) ctlfe_softc_list; struct mtx ctlfe_list_mtx; static char ctlfe_mtx_desc[] = "ctlfelist"; typedef enum { CTLFE_LUN_NONE = 0x00, CTLFE_LUN_WILDCARD = 0x01 } ctlfe_lun_flags; struct ctlfe_lun_softc { struct ctlfe_softc *parent_softc; struct cam_periph *periph; ctlfe_lun_flags flags; int ctios_sent; /* Number of active CTIOs */ int refcount; /* Number of active xpt_action() */ int atios_alloced; /* Number of ATIOs not freed */ int inots_alloced; /* Number of INOTs not freed */ struct task refdrain_task; STAILQ_HEAD(, ccb_hdr) work_queue; LIST_HEAD(, ccb_hdr) atio_list; /* List of ATIOs queued to SIM. */ LIST_HEAD(, ccb_hdr) inot_list; /* List of INOTs queued to SIM. */ STAILQ_ENTRY(ctlfe_lun_softc) links; }; typedef enum { CTLFE_CMD_NONE = 0x00, CTLFE_CMD_PIECEWISE = 0x01 } ctlfe_cmd_flags; struct ctlfe_cmd_info { int cur_transfer_index; size_t cur_transfer_off; ctlfe_cmd_flags flags; /* * XXX KDM struct bus_dma_segment is 8 bytes on i386, and 16 * bytes on amd64. So with 32 elements, this is 256 bytes on * i386 and 512 bytes on amd64. */ #define CTLFE_MAX_SEGS 32 bus_dma_segment_t cam_sglist[CTLFE_MAX_SEGS]; }; /* * When we register the adapter/bus, request that this many ctl_ios be * allocated. This should be the maximum supported by the adapter, but we * currently don't have a way to get that back from the path inquiry. * XXX KDM add that to the path inquiry. */ #define CTLFE_REQ_CTL_IO 4096 /* * Number of Accept Target I/O CCBs to allocate and queue down to the * adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_ATIO_PER_LUN 1024 /* * Number of Immediate Notify CCBs (used for aborts, resets, etc.) to * allocate and queue down to the adapter per LUN. * XXX KDM should this be controlled by CTL? */ #define CTLFE_IN_PER_LUN 1024 /* * Timeout (in seconds) on CTIO CCB doing DMA or sending status */ #define CTLFE_TIMEOUT 5 /* * Turn this on to enable extra debugging prints. */ #if 0 #define CTLFE_DEBUG #endif MALLOC_DEFINE(M_CTLFE, "CAM CTL FE", "CAM CTL FE interface"); #define io_ptr ppriv_ptr0 /* This is only used in the CTIO */ #define ccb_atio ppriv_ptr1 #define PRIV_CCB(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[0]) #define PRIV_INFO(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptrs[1]) static int ctlfeinitialize(void); static int ctlfeshutdown(void); static periph_init_t ctlfeperiphinit; static periph_deinit_t ctlfeperiphdeinit; static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static periph_ctor_t ctlferegister; static periph_oninv_t ctlfeoninvalidate; static periph_dtor_t ctlfecleanup; static periph_start_t ctlfestart; static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb); static void ctlfe_onoffline(void *arg, int online); static void ctlfe_online(void *arg); static void ctlfe_offline(void *arg); static int ctlfe_lun_enable(void *arg, int lun_id); static int ctlfe_lun_disable(void *arg, int lun_id); static void ctlfe_dump_sim(struct cam_sim *sim); static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc); static void ctlfe_datamove(union ctl_io *io); static void ctlfe_done(union ctl_io *io); static void ctlfe_dump(void); static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb); static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock); static struct periph_driver ctlfe_driver = { ctlfeperiphinit, "ctl", TAILQ_HEAD_INITIALIZER(ctlfe_driver.units), /*generation*/ 0, CAM_PERIPH_DRV_EARLY, ctlfeperiphdeinit }; static struct ctl_frontend ctlfe_frontend = { .name = "camtgt", .init = ctlfeinitialize, .fe_dump = ctlfe_dump, .shutdown = ctlfeshutdown, }; CTL_FRONTEND_DECLARE(ctlfe, ctlfe_frontend); static int ctlfeinitialize(void) { STAILQ_INIT(&ctlfe_softc_list); mtx_init(&ctlfe_list_mtx, ctlfe_mtx_desc, NULL, MTX_DEF); periphdriver_register(&ctlfe_driver); return (0); } static int ctlfeshutdown(void) { int error; error = periphdriver_unregister(&ctlfe_driver); if (error != 0) return (error); mtx_destroy(&ctlfe_list_mtx); return (0); } static void ctlfeperiphinit(void) { cam_status status; status = xpt_register_async(AC_PATH_REGISTERED | AC_PATH_DEREGISTERED | AC_CONTRACT, ctlfeasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ctl: Failed to attach async callback due to CAM " "status 0x%x!\n", status); } } static int ctlfeperiphdeinit(void) { /* XXX: It would be good to tear down active ports here. */ if (!TAILQ_EMPTY(&ctlfe_driver.units)) return (EBUSY); xpt_register_async(0, ctlfeasync, NULL, NULL); return (0); } static void ctlfeasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct ctlfe_softc *softc; #ifdef CTLFEDEBUG printf("%s: entered\n", __func__); #endif mtx_lock(&ctlfe_list_mtx); STAILQ_FOREACH(softc, &ctlfe_softc_list, links) { if (softc->path_id == xpt_path_path_id(path)) break; } mtx_unlock(&ctlfe_list_mtx); /* * When a new path gets registered, and it is capable of target * mode, go ahead and attach. Later on, we may need to be more * selective, but for now this will be sufficient. */ switch (code) { case AC_PATH_REGISTERED: { struct ctl_port *port; struct ccb_pathinq *cpi; int retval; cpi = (struct ccb_pathinq *)arg; /* Don't attach if it doesn't support target mode */ if ((cpi->target_sprt & PIT_PROCESSOR) == 0) { #ifdef CTLFEDEBUG printf("%s: SIM %s%d doesn't support target mode\n", __func__, cpi->dev_name, cpi->unit_number); #endif break; } if (softc != NULL) { #ifdef CTLFEDEBUG printf("%s: CTL port for CAM path %u already exists\n", __func__, xpt_path_path_id(path)); #endif break; } /* * We're in an interrupt context here, so we have to * use M_NOWAIT. Of course this means trouble if we * can't allocate memory. */ softc = malloc(sizeof(*softc), M_CTLFE, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("%s: unable to malloc %zd bytes for softc\n", __func__, sizeof(*softc)); return; } softc->path_id = cpi->ccb_h.path_id; softc->target_id = cpi->initiator_id; softc->sim = xpt_path_sim(path); softc->hba_misc = cpi->hba_misc; if (cpi->maxio != 0) softc->maxio = cpi->maxio; else softc->maxio = DFLTPHYS; mtx_init(&softc->lun_softc_mtx, "LUN softc mtx", NULL, MTX_DEF); STAILQ_INIT(&softc->lun_softc_list); port = &softc->port; port->frontend = &ctlfe_frontend; /* * XXX KDM should we be more accurate here ? */ if (cpi->transport == XPORT_FC) port->port_type = CTL_PORT_FC; else if (cpi->transport == XPORT_SAS) port->port_type = CTL_PORT_SAS; else port->port_type = CTL_PORT_SCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = CTLFE_REQ_CTL_IO; snprintf(softc->port_name, sizeof(softc->port_name), "%s%d", cpi->dev_name, cpi->unit_number); /* * XXX KDM it would be nice to allocate storage in the * frontend structure itself. */ port->port_name = softc->port_name; port->physical_port = cpi->bus_id; port->virtual_port = 0; port->port_online = ctlfe_online; port->port_offline = ctlfe_offline; port->onoff_arg = softc; port->lun_enable = ctlfe_lun_enable; port->lun_disable = ctlfe_lun_disable; port->targ_lun_arg = softc; port->fe_datamove = ctlfe_datamove; port->fe_done = ctlfe_done; port->targ_port = -1; retval = ctl_port_register(port); if (retval != 0) { printf("%s: ctl_port_register() failed with " "error %d!\n", __func__, retval); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); break; } else { mtx_lock(&ctlfe_list_mtx); STAILQ_INSERT_TAIL(&ctlfe_softc_list, softc, links); mtx_unlock(&ctlfe_list_mtx); } break; } case AC_PATH_DEREGISTERED: { if (softc != NULL) { /* * XXX KDM are we certain at this point that there * are no outstanding commands for this frontend? */ mtx_lock(&ctlfe_list_mtx); STAILQ_REMOVE(&ctlfe_softc_list, softc, ctlfe_softc, links); mtx_unlock(&ctlfe_list_mtx); ctl_port_deregister(&softc->port); mtx_destroy(&softc->lun_softc_mtx); free(softc, M_CTLFE); } break; } case AC_CONTRACT: { struct ac_contract *ac; ac = (struct ac_contract *)arg; switch (ac->contract_number) { case AC_CONTRACT_DEV_CHG: { struct ac_device_changed *dev_chg; int retval; dev_chg = (struct ac_device_changed *)ac->contract_data; printf("%s: WWPN %#jx port 0x%06x path %u target %u %s\n", __func__, dev_chg->wwpn, dev_chg->port, xpt_path_path_id(path), dev_chg->target, (dev_chg->arrived == 0) ? "left" : "arrived"); if (softc == NULL) { printf("%s: CTL port for CAM path %u not " "found!\n", __func__, xpt_path_path_id(path)); break; } if (dev_chg->arrived != 0) { retval = ctl_add_initiator(&softc->port, dev_chg->target, dev_chg->wwpn, NULL); } else { retval = ctl_remove_initiator(&softc->port, dev_chg->target); } if (retval < 0) { printf("%s: could not %s port %d iid %u " "WWPN %#jx!\n", __func__, (dev_chg->arrived != 0) ? "add" : "remove", softc->port.targ_port, dev_chg->target, (uintmax_t)dev_chg->wwpn); } break; } default: printf("%s: unsupported contract number %ju\n", __func__, (uintmax_t)ac->contract_number); break; } break; } default: break; } } static cam_status ctlferegister(struct cam_periph *periph, void *arg) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; union ccb ccb; cam_status status; int i, acstatus; softc = (struct ctlfe_lun_softc *)arg; bus_softc = softc->parent_softc; STAILQ_INIT(&softc->work_queue); LIST_INIT(&softc->atio_list); LIST_INIT(&softc->inot_list); softc->periph = periph; periph->softc = softc; /* Increase device openings to maximum for the SIM. */ if (bus_softc->sim->max_tagged_dev_openings > bus_softc->sim->max_dev_openings) { cam_release_devq(periph->path, /*relsim_flags*/RELSIM_ADJUST_OPENINGS, /*openings*/bus_softc->sim->max_tagged_dev_openings, /*timeout*/0, /*getcount_only*/1); } xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 1; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Enable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); return (status); } status = CAM_REQ_CMP; for (i = 0; i < CTLFE_ATIO_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; struct ctlfe_cmd_info *cmd_info; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } cmd_info = malloc(sizeof(*cmd_info), M_CTLFE, M_ZERO | M_NOWAIT); if (cmd_info == NULL) { ctl_free_io(new_io); free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } PRIV_INFO(new_io) = cmd_info; softc->atios_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->atio_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_ACCEPT_TARGET_IO; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { free(cmd_info, M_CTLFE); ctl_free_io(new_io); free(new_ccb, M_CTLFE); break; } } acstatus = cam_periph_acquire(periph); if (acstatus != 0) { xpt_print(periph->path, "%s: could not acquire reference " "count, status = %#x\n", __func__, acstatus); return (CAM_REQ_CMP_ERR); } if (i == 0) { xpt_print(periph->path, "%s: could not allocate ATIO CCBs, " "status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } for (i = 0; i < CTLFE_IN_PER_LUN; i++) { union ccb *new_ccb; union ctl_io *new_io; new_ccb = (union ccb *)malloc(sizeof(*new_ccb), M_CTLFE, M_ZERO|M_NOWAIT); if (new_ccb == NULL) { status = CAM_RESRC_UNAVAIL; break; } new_io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref); if (new_io == NULL) { free(new_ccb, M_CTLFE); status = CAM_RESRC_UNAVAIL; break; } softc->inots_alloced++; new_ccb->ccb_h.io_ptr = new_io; LIST_INSERT_HEAD(&softc->inot_list, &new_ccb->ccb_h, periph_links.le); xpt_setup_ccb(&new_ccb->ccb_h, periph->path, CAM_PRIORITY_NONE); new_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; new_ccb->ccb_h.cbfcnp = ctlfedone; new_ccb->ccb_h.flags |= CAM_UNLOCKED; xpt_action(new_ccb); status = new_ccb->ccb_h.status; if ((status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { /* * Note that we don't free the CCB here. If the * status is not CAM_REQ_INPROG, then we're * probably talking to a SIM that says it is * target-capable but doesn't support the * XPT_IMMEDIATE_NOTIFY CCB. i.e. it supports the * older API. In that case, it'll call xpt_done() * on the CCB, and we need to free it in our done * routine as a result. */ break; } } if ((i == 0) || (status != CAM_REQ_INPROG)) { xpt_print(periph->path, "%s: could not allocate immediate " "notify CCBs, status 0x%x\n", __func__, status); return (CAM_REQ_CMP_ERR); } mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_INSERT_TAIL(&bus_softc->lun_softc_list, softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); return (CAM_REQ_CMP); } static void ctlfeoninvalidate(struct cam_periph *periph) { struct ctlfe_lun_softc *softc = (struct ctlfe_lun_softc *)periph->softc; struct ctlfe_softc *bus_softc; union ccb ccb; struct ccb_hdr *hdr; cam_status status; /* Abort all ATIOs and INOTs queued to SIM. */ xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NONE); ccb.ccb_h.func_code = XPT_ABORT; LIST_FOREACH(hdr, &softc->atio_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } LIST_FOREACH(hdr, &softc->inot_list, periph_links.le) { ccb.cab.abort_ccb = (union ccb *)hdr; xpt_action(&ccb); } /* Disable the LUN in SIM. */ ccb.ccb_h.func_code = XPT_EN_LUN; ccb.cel.grp6_len = 0; ccb.cel.grp7_len = 0; ccb.cel.enable = 0; xpt_action(&ccb); status = (ccb.ccb_h.status & CAM_STATUS_MASK); if (status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: Disable LUN failed, status 0x%x\n", __func__, ccb.ccb_h.status); /* * XXX KDM what do we do now? */ } bus_softc = softc->parent_softc; mtx_lock(&bus_softc->lun_softc_mtx); STAILQ_REMOVE(&bus_softc->lun_softc_list, softc, ctlfe_lun_softc, links); mtx_unlock(&bus_softc->lun_softc_mtx); } static void ctlfecleanup(struct cam_periph *periph) { struct ctlfe_lun_softc *softc; softc = (struct ctlfe_lun_softc *)periph->softc; KASSERT(softc->ctios_sent == 0, ("%s: ctios_sent %d != 0", __func__, softc->ctios_sent)); KASSERT(softc->refcount == 0, ("%s: refcount %d != 0", __func__, softc->refcount)); KASSERT(softc->atios_alloced == 0, ("%s: atios_alloced %d != 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced == 0, ("%s: inots_alloced %d != 0", __func__, softc->inots_alloced)); free(softc, M_CTLFE); } static void ctlfedata(struct ctlfe_lun_softc *softc, union ctl_io *io, ccb_flags *flags, uint8_t **data_ptr, uint32_t *dxfer_len, u_int16_t *sglist_cnt) { struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ctl_sg_entry *ctl_sglist; bus_dma_segment_t *cam_sglist; size_t off; int i, idx; cmd_info = PRIV_INFO(io); bus_softc = softc->parent_softc; /* * Set the direction, relative to the initiator. */ *flags &= ~CAM_DIR_MASK; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) *flags |= CAM_DIR_IN; else *flags |= CAM_DIR_OUT; *flags &= ~CAM_DATA_MASK; idx = cmd_info->cur_transfer_index; off = cmd_info->cur_transfer_off; cmd_info->flags &= ~CTLFE_CMD_PIECEWISE; if (io->scsiio.kern_sg_entries == 0) { /* No S/G list. */ /* One time shift for SRR offset. */ off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; *data_ptr = io->scsiio.kern_data_ptr + off; if (io->scsiio.kern_data_len - off <= bus_softc->maxio) { *dxfer_len = io->scsiio.kern_data_len - off; } else { *dxfer_len = bus_softc->maxio; cmd_info->cur_transfer_off += bus_softc->maxio; cmd_info->flags |= CTLFE_CMD_PIECEWISE; } *sglist_cnt = 0; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_PADDR; else *flags |= CAM_DATA_VADDR; } else { /* S/G list with physical or virtual pointers. */ ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; /* One time shift for SRR offset. */ while (io->scsiio.ext_data_filled >= ctl_sglist[idx].len - off) { io->scsiio.ext_data_filled -= ctl_sglist[idx].len - off; idx++; off = 0; } off += io->scsiio.ext_data_filled; io->scsiio.ext_data_filled = 0; cam_sglist = cmd_info->cam_sglist; *dxfer_len = 0; for (i = 0; i < io->scsiio.kern_sg_entries - idx; i++) { cam_sglist[i].ds_addr = (bus_addr_t)(uintptr_t)ctl_sglist[i + idx].addr + off; if (ctl_sglist[i + idx].len - off <= bus_softc->maxio - *dxfer_len) { cam_sglist[i].ds_len = ctl_sglist[idx + i].len - off; *dxfer_len += cam_sglist[i].ds_len; } else { cam_sglist[i].ds_len = bus_softc->maxio - *dxfer_len; cmd_info->cur_transfer_index = idx + i; cmd_info->cur_transfer_off = cam_sglist[i].ds_len + off; cmd_info->flags |= CTLFE_CMD_PIECEWISE; *dxfer_len += cam_sglist[i].ds_len; if (ctl_sglist[i].len != 0) i++; break; } if (i == (CTLFE_MAX_SEGS - 1) && idx + i < (io->scsiio.kern_sg_entries - 1)) { cmd_info->cur_transfer_index = idx + i + 1; cmd_info->cur_transfer_off = 0; cmd_info->flags |= CTLFE_CMD_PIECEWISE; i++; break; } off = 0; } *sglist_cnt = i; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) *flags |= CAM_DATA_SG_PADDR; else *flags |= CAM_DATA_SG; *data_ptr = (uint8_t *)cam_sglist; } } static void ctlfestart(struct cam_periph *periph, union ccb *start_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_cmd_info *cmd_info; struct ccb_hdr *ccb_h; struct ccb_accept_tio *atio; struct ccb_scsiio *csio; uint8_t *data_ptr; uint32_t dxfer_len; ccb_flags flags; union ctl_io *io; uint8_t scsi_status; softc = (struct ctlfe_lun_softc *)periph->softc; next: /* Take the ATIO off the work queue */ ccb_h = STAILQ_FIRST(&softc->work_queue); if (ccb_h == NULL) { xpt_release_ccb(start_ccb); return; } STAILQ_REMOVE_HEAD(&softc->work_queue, periph_links.stqe); atio = (struct ccb_accept_tio *)ccb_h; io = (union ctl_io *)ccb_h->io_ptr; csio = &start_ccb->csio; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT|CAM_TAG_ACTION_VALID|CAM_DIR_MASK); cmd_info = PRIV_INFO(io); cmd_info->cur_transfer_index = 0; cmd_info->cur_transfer_off = 0; cmd_info->flags = 0; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { /* * Datamove call, we need to setup the S/G list. */ ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); } else { /* * We're done, send status back. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) { io->io_hdr.flags &= ~CTL_FLAG_STATUS_QUEUED; /* Tell the SIM that we've aborted this ATIO */ #ifdef CTLFEDEBUG printf("%s: tag %04x abort\n", __func__, atio->tag_id); #endif KASSERT(atio->ccb_h.func_code == XPT_ACCEPT_TARGET_IO, ("func_code %#x is not ATIO", atio->ccb_h.func_code)); start_ccb->ccb_h.func_code = XPT_ABORT; start_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(start_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */0); /* XPT_ABORT is not queued, so we can take next I/O. */ goto next; } data_ptr = NULL; dxfer_len = 0; csio->sglist_cnt = 0; } scsi_status = 0; if ((io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) && (cmd_info->flags & CTLFE_CMD_PIECEWISE) == 0 && ((io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) == 0 || io->io_hdr.status == CTL_SUCCESS)) { flags |= CAM_SEND_STATUS; scsi_status = io->scsiio.scsi_status; csio->sense_len = io->scsiio.sense_len; #ifdef CTLFEDEBUG printf("%s: tag %04x status %x\n", __func__, atio->tag_id, io->io_hdr.status); #endif if (csio->sense_len != 0) { csio->sense_data = io->scsiio.sense_data; flags |= CAM_SEND_SENSE; } } #ifdef CTLFEDEBUG printf("%s: %s: tag %04x flags %x ptr %p len %u\n", __func__, (flags & CAM_SEND_STATUS) ? "done" : "datamove", atio->tag_id, flags, data_ptr, dxfer_len); #endif /* * Valid combinations: * - CAM_SEND_STATUS, CAM_DATA_SG = 0, dxfer_len = 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG = 0, dxfer_len != 0, * sglist_cnt = 0 * - CAM_SEND_STATUS = 0, CAM_DATA_SG, dxfer_len != 0, * sglist_cnt != 0 */ #ifdef CTLFEDEBUG if (((flags & CAM_SEND_STATUS) && (((flags & CAM_DATA_SG) != 0) || (dxfer_len != 0) || (csio->sglist_cnt != 0))) || (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) || ((flags & CAM_DATA_SG) && (csio->sglist_cnt == 0)) || (((flags & CAM_DATA_SG) == 0) && (csio->sglist_cnt != 0))) { printf("%s: tag %04x cdb %02x flags %#x dxfer_len " "%d sg %u\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0], flags, dxfer_len, csio->sglist_cnt); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } #endif cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, scsi_status, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ CTLFE_TIMEOUT * 1000); start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.ccb_atio = atio; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; io->io_hdr.flags &= ~(CTL_FLAG_DMA_QUEUED | CTL_FLAG_STATUS_QUEUED); softc->ctios_sent++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* * If we still have work to do, ask for another CCB. */ if (!STAILQ_EMPTY(&softc->work_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); } static void ctlfe_drain(void *context, int pending) { struct cam_periph *periph = context; struct ctlfe_lun_softc *softc = periph->softc; cam_periph_lock(periph); while (softc->refcount != 0) { cam_periph_sleep(periph, &softc->refcount, PRIBIO, "ctlfe_drain", 1); } cam_periph_unlock(periph); cam_periph_release(periph); } static void ctlfe_free_ccb(struct cam_periph *periph, union ccb *ccb) { struct ctlfe_lun_softc *softc; union ctl_io *io; struct ctlfe_cmd_info *cmd_info; softc = (struct ctlfe_lun_softc *)periph->softc; io = ccb->ccb_h.io_ptr; switch (ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: softc->atios_alloced--; cmd_info = PRIV_INFO(io); free(cmd_info, M_CTLFE); break; case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: softc->inots_alloced--; break; default: break; } ctl_free_io(io); free(ccb, M_CTLFE); KASSERT(softc->atios_alloced >= 0, ("%s: atios_alloced %d < 0", __func__, softc->atios_alloced)); KASSERT(softc->inots_alloced >= 0, ("%s: inots_alloced %d < 0", __func__, softc->inots_alloced)); /* * If we have received all of our CCBs, we can release our * reference on the peripheral driver. It will probably go away * now. */ if (softc->atios_alloced == 0 && softc->inots_alloced == 0) { if (softc->refcount == 0) { cam_periph_release_locked(periph); } else { TASK_INIT(&softc->refdrain_task, 0, ctlfe_drain, periph); taskqueue_enqueue(taskqueue_thread, &softc->refdrain_task); } } } /* * Send the ATIO/INOT back to the SIM, or free it if periph was invalidated. */ static void ctlfe_requeue_ccb(struct cam_periph *periph, union ccb *ccb, int unlock) { struct ctlfe_lun_softc *softc; struct mtx *mtx; if (periph->flags & CAM_PERIPH_INVALID) { mtx = cam_periph_mtx(periph); ctlfe_free_ccb(periph, ccb); if (unlock) mtx_unlock(mtx); return; } softc = (struct ctlfe_lun_softc *)periph->softc; if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) LIST_INSERT_HEAD(&softc->atio_list, &ccb->ccb_h, periph_links.le); else LIST_INSERT_HEAD(&softc->inot_list, &ccb->ccb_h, periph_links.le); if (unlock) cam_periph_unlock(periph); /* * For a wildcard attachment, commands can come in with a specific * target/lun. Reset the target and LUN fields back to the wildcard * values before we send them back down to the SIM. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, CAM_PRIORITY_NONE, ccb->ccb_h.flags); xpt_action(ccb); } static int ctlfe_adjust_cdb(struct ccb_accept_tio *atio, uint32_t offset) { uint64_t lba; uint32_t num_blocks, nbc; uint8_t *cmdbyt = atio_cdb_ptr(atio); nbc = offset >> 9; /* ASSUMING 512 BYTE BLOCKS */ switch (cmdbyt[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb = (struct scsi_rw_6 *)cmdbyt; lba = scsi_3btoul(cdb->addr); lba &= 0x1fffff; num_blocks = cdb->length; if (num_blocks == 0) num_blocks = 256; lba += nbc; num_blocks -= nbc; scsi_ulto3b(lba, cdb->addr); cdb->length = num_blocks; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb = (struct scsi_rw_10 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto2b(num_blocks, cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb = (struct scsi_rw_12 *)cmdbyt; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_ulto4b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb = (struct scsi_rw_16 *)cmdbyt; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); lba += nbc; num_blocks -= nbc; scsi_u64to8b(lba, cdb->addr); scsi_ulto4b(num_blocks, cdb->length); break; } default: return -1; } return (0); } static void ctlfedone(struct cam_periph *periph, union ccb *done_ccb) { struct ctlfe_lun_softc *softc; struct ctlfe_softc *bus_softc; struct ctlfe_cmd_info *cmd_info; struct ccb_accept_tio *atio = NULL; union ctl_io *io = NULL; struct mtx *mtx; cam_status status; KASSERT((done_ccb->ccb_h.flags & CAM_UNLOCKED) != 0, ("CCB in ctlfedone() without CAM_UNLOCKED flag")); #ifdef CTLFE_DEBUG printf("%s: entered, func_code = %#x\n", __func__, done_ccb->ccb_h.func_code); #endif /* * At this point CTL has no known use case for device queue freezes. * In case some SIM think different -- drop its freeze right here. */ if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(periph->path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; } softc = (struct ctlfe_lun_softc *)periph->softc; bus_softc = softc->parent_softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); switch (done_ccb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: { LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); atio = &done_ccb->atio; status = atio->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_CDB_RECVD) { ctlfe_free_ccb(periph, done_ccb); goto out; } resubmit: /* * Allocate a ctl_io, pass it to CTL, and wait for the * datamove or done. */ mtx_unlock(mtx); io = done_ccb->ccb_h.io_ptr; cmd_info = PRIV_INFO(io); ctl_zero_io(io); /* Save pointers on both sides */ PRIV_CCB(io) = done_ccb; PRIV_INFO(io) = cmd_info; done_ccb->ccb_h.io_ptr = io; /* * Only SCSI I/O comes down this path, resets, etc. come * down the immediate notify path below. */ io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = atio->init_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(atio->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = atio->ccb_h.target_lun; } io->scsiio.priority = atio->priority; io->scsiio.tag_num = atio->tag_id; switch (atio->tag_action) { case CAM_TAG_ACTION_NONE: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case MSG_SIMPLE_TASK: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case MSG_HEAD_OF_QUEUE_TASK: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case MSG_ORDERED_TASK: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case MSG_ACA_TASK: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; printf("%s: unhandled tag type %#x!!\n", __func__, atio->tag_action); break; } if (atio->cdb_len > sizeof(io->scsiio.cdb)) { printf("%s: WARNING: CDB len %d > ctl_io space %zd\n", __func__, atio->cdb_len, sizeof(io->scsiio.cdb)); } io->scsiio.cdb_len = min(atio->cdb_len, sizeof(io->scsiio.cdb)); bcopy(atio_cdb_ptr(atio), io->scsiio.cdb, io->scsiio.cdb_len); #ifdef CTLFEDEBUG printf("%s: %u:%u:%u: tag %04x CDB %02x\n", __func__, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->scsiio.tag_num, io->scsiio.cdb[0]); #endif ctl_queue(io); return; } case XPT_CONT_TARGET_IO: { int srr = 0; uint32_t srr_off = 0; atio = (struct ccb_accept_tio *)done_ccb->ccb_h.ccb_atio; io = (union ctl_io *)atio->ccb_h.io_ptr; softc->ctios_sent--; #ifdef CTLFEDEBUG printf("%s: got XPT_CONT_TARGET_IO tag %#x flags %#x\n", __func__, atio->tag_id, done_ccb->ccb_h.flags); #endif /* * Handle SRR case were the data pointer is pushed back hack */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_MESSAGE_RECV && done_ccb->csio.msg_ptr != NULL && done_ccb->csio.msg_ptr[0] == MSG_EXTENDED && done_ccb->csio.msg_ptr[1] == 5 && done_ccb->csio.msg_ptr[2] == 0) { srr = 1; srr_off = (done_ccb->csio.msg_ptr[3] << 24) | (done_ccb->csio.msg_ptr[4] << 16) | (done_ccb->csio.msg_ptr[5] << 8) | (done_ccb->csio.msg_ptr[6]); } /* * If we have an SRR and we're still sending data, we * should be able to adjust offsets and cycle again. * It is possible only if offset is from this datamove. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) && srr_off >= io->scsiio.kern_rel_offset && srr_off < io->scsiio.kern_rel_offset + io->scsiio.kern_data_len) { io->scsiio.kern_data_resid = io->scsiio.kern_rel_offset + io->scsiio.kern_data_len - srr_off; io->scsiio.ext_data_filled = srr_off; io->scsiio.io_hdr.status = CTL_STATUS_NONE; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; xpt_release_ccb(done_ccb); STAILQ_INSERT_HEAD(&softc->work_queue, &atio->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } /* * If status was being sent, the back end data is now history. * Hack it up and resubmit a new command with the CDB adjusted. * If the SIM does the right thing, all of the resid math * should work. */ if (srr && (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { xpt_release_ccb(done_ccb); if (ctlfe_adjust_cdb(atio, srr_off) == 0) { done_ccb = (union ccb *)atio; goto resubmit; } /* * Fall through to doom.... */ } if ((done_ccb->ccb_h.flags & CAM_SEND_STATUS) && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; /* * If we were sending status back to the initiator, free up * resources. If we were doing a datamove, call the * datamove done routine. */ if ((io->io_hdr.flags & CTL_FLAG_DMA_INPROG) == 0) { /* * If we asked to send sense data but it wasn't sent, * queue the I/O back to CTL for later REQUEST SENSE. */ if ((done_ccb->ccb_h.flags & CAM_SEND_SENSE) != 0 && (done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (done_ccb->ccb_h.status & CAM_SENT_SENSE) == 0 && (io = ctl_alloc_io_nowait(bus_softc->port.ctl_pool_ref)) != NULL) { PRIV_INFO(io) = PRIV_INFO( (union ctl_io *)atio->ccb_h.io_ptr); ctl_queue_sense(atio->ccb_h.io_ptr); atio->ccb_h.io_ptr = io; } /* Abort ATIO if CTIO sending status has failed. */ if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { done_ccb->ccb_h.func_code = XPT_ABORT; done_ccb->cab.abort_ccb = (union ccb *)atio; xpt_action(done_ccb); } xpt_release_ccb(done_ccb); ctlfe_requeue_ccb(periph, (union ccb *)atio, /* unlock */1); return; } else { struct ctlfe_cmd_info *cmd_info; struct ccb_scsiio *csio; csio = &done_ccb->csio; cmd_info = PRIV_INFO(io); io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; /* * Translate CAM status to CTL status. Success * does not change the overall, ctl_io status. In * that case we just set port_status to 0. If we * have a failure, though, set a data phase error * for the overall ctl_io. */ switch (done_ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_REQ_CMP: io->scsiio.kern_data_resid -= csio->dxfer_len - csio->resid; io->io_hdr.port_status = 0; break; default: /* * XXX KDM we probably need to figure out a * standard set of errors that the SIM * drivers should return in the event of a * data transfer failure. A data phase * error will at least point the user to a * data transfer error of some sort. * Hopefully the SIM printed out some * additional information to give the user * a clue what happened. */ io->io_hdr.port_status = 0xbad1; ctl_set_data_phase_error(&io->scsiio); /* * XXX KDM figure out residual. */ break; } /* * If we had to break this S/G list into multiple * pieces, figure out where we are in the list, and * continue sending pieces if necessary. */ if ((cmd_info->flags & CTLFE_CMD_PIECEWISE) && io->io_hdr.port_status == 0 && csio->resid == 0) { ccb_flags flags; uint8_t *data_ptr; uint32_t dxfer_len; flags = atio->ccb_h.flags & (CAM_DIS_DISCONNECT| CAM_TAG_ACTION_VALID); ctlfedata(softc, io, &flags, &data_ptr, &dxfer_len, &csio->sglist_cnt); if (((flags & CAM_SEND_STATUS) == 0) && (dxfer_len == 0)) { printf("%s: tag %04x no status or " "len cdb = %02x\n", __func__, atio->tag_id, atio_cdb_ptr(atio)[0]); printf("%s: tag %04x io status %#x\n", __func__, atio->tag_id, io->io_hdr.status); } cam_fill_ctio(csio, /*retries*/ 2, ctlfedone, flags, (flags & CAM_TAG_ACTION_VALID) ? MSG_SIMPLE_Q_TAG : 0, atio->tag_id, atio->init_id, 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, CTLFE_TIMEOUT * 1000); csio->ccb_h.flags |= CAM_UNLOCKED; csio->resid = 0; csio->ccb_h.ccb_atio = atio; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; softc->ctios_sent++; mtx_unlock(mtx); xpt_action((union ccb *)csio); } else { /* * Release the CTIO. The ATIO will be sent back * down to the SIM once we send status. */ xpt_release_ccb(done_ccb); mtx_unlock(mtx); - /* Call the backend move done callback */ - io->scsiio.be_move_done(io); + ctl_datamove_done(io, false); } return; } break; } case XPT_IMMEDIATE_NOTIFY: { union ctl_io *io; struct ccb_immediate_notify *inot; int send_ctl_io; LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); inot = &done_ccb->cin1; io = done_ccb->ccb_h.io_ptr; ctl_zero_io(io); send_ctl_io = 1; io->io_hdr.io_type = CTL_IO_TASK; PRIV_CCB(io) = done_ccb; inot->ccb_h.io_ptr = io; io->io_hdr.nexus.initid = inot->initiator_id; io->io_hdr.nexus.targ_port = bus_softc->port.targ_port; if (bus_softc->hba_misc & PIM_EXTLUNS) { io->io_hdr.nexus.targ_lun = ctl_decode_lun( CAM_EXTLUN_BYTE_SWIZZLE(inot->ccb_h.target_lun)); } else { io->io_hdr.nexus.targ_lun = inot->ccb_h.target_lun; } /* XXX KDM should this be the tag_id? */ io->taskio.tag_num = inot->seq_id; status = inot->ccb_h.status & CAM_STATUS_MASK; switch (status) { case CAM_SCSI_BUS_RESET: io->taskio.task_action = CTL_TASK_BUS_RESET; break; case CAM_BDR_SENT: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case CAM_MESSAGE_RECV: switch (inot->arg) { case MSG_ABORT_TASK_SET: io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case MSG_TARGET_RESET: io->taskio.task_action = CTL_TASK_TARGET_RESET; break; case MSG_ABORT_TASK: io->taskio.task_action = CTL_TASK_ABORT_TASK; break; case MSG_LOGICAL_UNIT_RESET: io->taskio.task_action = CTL_TASK_LUN_RESET; break; case MSG_CLEAR_TASK_SET: io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET; break; case MSG_CLEAR_ACA: io->taskio.task_action = CTL_TASK_CLEAR_ACA; break; case MSG_QUERY_TASK: io->taskio.task_action = CTL_TASK_QUERY_TASK; break; case MSG_QUERY_TASK_SET: io->taskio.task_action = CTL_TASK_QUERY_TASK_SET; break; case MSG_QUERY_ASYNC_EVENT: io->taskio.task_action = CTL_TASK_QUERY_ASYNC_EVENT; break; case MSG_NOOP: send_ctl_io = 0; break; default: xpt_print(periph->path, "%s: unsupported INOT message 0x%x\n", __func__, inot->arg); send_ctl_io = 0; break; } break; default: xpt_print(periph->path, "%s: unsupported INOT status 0x%x\n", __func__, status); /* FALLTHROUGH */ case CAM_REQ_ABORTED: case CAM_REQ_INVALID: case CAM_DEV_NOT_THERE: case CAM_PROVIDE_FAIL: ctlfe_free_ccb(periph, done_ccb); goto out; } mtx_unlock(mtx); if (send_ctl_io != 0) { ctl_queue(io); } else { done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; xpt_action(done_ccb); } return; } case XPT_NOTIFY_ACKNOWLEDGE: /* Queue this back down to the SIM as an immediate notify. */ done_ccb->ccb_h.status = CAM_REQ_INPROG; done_ccb->ccb_h.func_code = XPT_IMMEDIATE_NOTIFY; ctlfe_requeue_ccb(periph, done_ccb, /* unlock */1); return; case XPT_SET_SIM_KNOB: case XPT_GET_SIM_KNOB: case XPT_GET_SIM_KNOB_OLD: break; default: panic("%s: unexpected CCB type %#x", __func__, done_ccb->ccb_h.func_code); break; } out: mtx_unlock(mtx); } static void ctlfe_onoffline(void *arg, int online) { struct ctlfe_softc *bus_softc = arg; union ccb *ccb; cam_status status; struct cam_path *path; int set_wwnn = 0; status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path!\n", __func__); return; } ccb = xpt_alloc_ccb(); xpt_setup_ccb(&ccb->ccb_h, path, CAM_PRIORITY_NONE); ccb->ccb_h.func_code = XPT_GET_SIM_KNOB; xpt_action(ccb); /* Check whether we should change WWNs. */ if (online != 0) { if ((ccb->knob.xport_specific.valid & KNOB_VALID_ADDRESS) != 0){ printf("%s: %s current WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s current WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); /* * If the user has specified a WWNN/WWPN, send them * down to the SIM. Otherwise, record what the SIM * has reported. */ if (bus_softc->port.wwnn != 0 && bus_softc->port.wwnn != ccb->knob.xport_specific.fc.wwnn) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, true, ccb->knob.xport_specific.fc.wwnn, false, 0); } if (bus_softc->port.wwpn != 0 && bus_softc->port.wwpn != ccb->knob.xport_specific.fc.wwpn) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } else { ctl_port_set_wwns(&bus_softc->port, false, 0, true, ccb->knob.xport_specific.fc.wwpn); } } else { printf("%s: %s has no valid WWNN/WWPN\n", __func__, bus_softc->port_name); if (bus_softc->port.wwnn != 0) { ccb->knob.xport_specific.fc.wwnn = bus_softc->port.wwnn; set_wwnn = 1; } if (bus_softc->port.wwpn != 0) { ccb->knob.xport_specific.fc.wwpn = bus_softc->port.wwpn; set_wwnn = 1; } } } if (set_wwnn) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ADDRESS; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed set WWNs: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, ccb->ccb_h.status); } else { printf("%s: %s new WWNN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwnn); printf("%s: %s new WWPN %#jx\n", __func__, bus_softc->port_name, ccb->knob.xport_specific.fc.wwpn); } } /* Check whether we should change role. */ if ((ccb->knob.xport_specific.valid & KNOB_VALID_ROLE) == 0 || ((online != 0) ^ ((ccb->knob.xport_specific.fc.role & KNOB_ROLE_TARGET) != 0)) != 0) { ccb->ccb_h.func_code = XPT_SET_SIM_KNOB; ccb->knob.xport_specific.valid = KNOB_VALID_ROLE; if (online) ccb->knob.xport_specific.fc.role |= KNOB_ROLE_TARGET; else ccb->knob.xport_specific.fc.role &= ~KNOB_ROLE_TARGET; xpt_action(ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { printf("%s: %s (path id %d) failed %s target role: %#x\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable", ccb->ccb_h.status); } else { printf("%s: %s (path id %d) target role %s succeeded\n", __func__, bus_softc->port_name, bus_softc->path_id, online ? "enable" : "disable"); } } xpt_free_path(path); xpt_free_ccb(ccb); } static void ctlfe_online(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct ctlfe_lun_softc *lun_softc; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; /* * Create the wildcard LUN before bringing the port online. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } lun_softc = malloc(sizeof(*lun_softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(lun_softc, M_CTLFE); return; } lun_softc->parent_softc = bus_softc; lun_softc->flags |= CTLFE_LUN_WILDCARD; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, lun_softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(lun_softc, M_CTLFE); } xpt_path_unlock(path); ctlfe_onoffline(arg, /*online*/ 1); xpt_free_path(path); } static void ctlfe_offline(void *arg) { struct ctlfe_softc *bus_softc; struct cam_path *path; cam_status status; struct cam_periph *periph; bus_softc = (struct ctlfe_softc *)arg; ctlfe_onoffline(arg, /*online*/ 0); /* * Disable the wildcard LUN for this port now that we have taken * the port offline. */ status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { printf("%s: unable to create path for wildcard periph\n", __func__); return; } xpt_path_lock(path); if ((periph = cam_periph_find(path, "ctl")) != NULL) cam_periph_invalidate(periph); xpt_path_unlock(path); xpt_free_path(path); } /* * This will get called to enable a LUN on every bus that is attached to * CTL. So we only need to create a path/periph for this particular bus. */ static int ctlfe_lun_enable(void *arg, int lun_id) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *softc; struct cam_path *path; struct cam_periph *periph; cam_status status; bus_softc = (struct ctlfe_softc *)arg; if (bus_softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); status = xpt_create_path(&path, /*periph*/ NULL, bus_softc->path_id, bus_softc->target_id, lun_id); /* XXX KDM need some way to return status to CTL here? */ if (status != CAM_REQ_CMP) { printf("%s: could not create path, status %#x\n", __func__, status); return (1); } softc = malloc(sizeof(*softc), M_CTLFE, M_WAITOK | M_ZERO); xpt_path_lock(path); periph = cam_periph_find(path, "ctl"); if (periph != NULL) { /* We've already got a periph, no need to alloc a new one. */ xpt_path_unlock(path); xpt_free_path(path); free(softc, M_CTLFE); return (0); } softc->parent_softc = bus_softc; status = cam_periph_alloc(ctlferegister, ctlfeoninvalidate, ctlfecleanup, ctlfestart, "ctl", CAM_PERIPH_BIO, path, ctlfeasync, 0, softc); if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("%s: CAM error %s (%#x) returned from " "cam_periph_alloc()\n", __func__, (entry != NULL) ? entry->status_text : "Unknown", status); free(softc, M_CTLFE); } xpt_path_unlock(path); xpt_free_path(path); return (0); } /* * This will get called when the user removes a LUN to disable that LUN * on every bus that is attached to CTL. */ static int ctlfe_lun_disable(void *arg, int lun_id) { struct ctlfe_softc *softc; struct ctlfe_lun_softc *lun_softc; softc = (struct ctlfe_softc *)arg; if (softc->hba_misc & PIM_EXTLUNS) lun_id = CAM_EXTLUN_BYTE_SWIZZLE(ctl_encode_lun(lun_id)); mtx_lock(&softc->lun_softc_mtx); STAILQ_FOREACH(lun_softc, &softc->lun_softc_list, links) { struct cam_path *path; path = lun_softc->periph->path; if ((xpt_path_target_id(path) == softc->target_id) && (xpt_path_lun_id(path) == lun_id)) { break; } } if (lun_softc == NULL) { mtx_unlock(&softc->lun_softc_mtx); printf("%s: can't find lun %d\n", __func__, lun_id); return (1); } cam_periph_acquire(lun_softc->periph); mtx_unlock(&softc->lun_softc_mtx); cam_periph_lock(lun_softc->periph); cam_periph_invalidate(lun_softc->periph); cam_periph_unlock(lun_softc->periph); cam_periph_release(lun_softc->periph); return (0); } static void ctlfe_dump_sim(struct cam_sim *sim) { printf("%s%d: max dev openings: %d, max tagged dev openings: %d\n", sim->sim_name, sim->unit_number, sim->max_dev_openings, sim->max_tagged_dev_openings); } /* * Assumes that the SIM lock is held. */ static void ctlfe_dump_queue(struct ctlfe_lun_softc *softc) { struct cam_periph *periph = softc->periph; struct ccb_hdr *hdr; struct ccb_getdevstats cgds; int num_items; xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgds.ccb_h.func_code = XPT_GDEV_STATS; xpt_action((union ccb *)&cgds); if ((cgds.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { xpt_print(periph->path, "devq: openings %d, active %d, " "allocated %d, queued %d, held %d\n", cgds.dev_openings, cgds.dev_active, cgds.allocated, cgds.queued, cgds.held); } num_items = 0; STAILQ_FOREACH(hdr, &softc->work_queue, periph_links.stqe) { union ctl_io *io = hdr->io_ptr; num_items++; /* * Only regular SCSI I/O is put on the work * queue, so we can print sense here. There may be no * sense if it's no the queue for a DMA, but this serves to * print out the CCB as well. * * XXX KDM switch this over to scsi_sense_print() when * CTL is merged in with CAM. */ ctl_io_error_print(io, NULL); /* * Print DMA status if we are DMA_QUEUED. */ if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) { xpt_print(periph->path, "Total %u, Current %u, Resid %u\n", io->scsiio.kern_total_len, io->scsiio.kern_data_len, io->scsiio.kern_data_resid); } } xpt_print(periph->path, "%d requests waiting for CCBs\n", num_items); xpt_print(periph->path, "%d CTIOs outstanding\n", softc->ctios_sent); } /* * Datamove/done routine called by CTL. Put ourselves on the queue to * receive a CCB from CAM so we can queue the continue I/O request down * to the adapter. */ static void ctlfe_datamove(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); io->scsiio.ext_data_filled = 0; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; io->io_hdr.flags |= CTL_FLAG_DMA_QUEUED; if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); } static void ctlfe_done(union ctl_io *io) { union ccb *ccb; struct cam_periph *periph; struct ctlfe_lun_softc *softc; ccb = PRIV_CCB(io); periph = xpt_path_periph(ccb->ccb_h.path); cam_periph_lock(periph); softc = (struct ctlfe_lun_softc *)periph->softc; if (io->io_hdr.io_type == CTL_IO_TASK) { /* * Send the notify acknowledge down to the SIM, to let it * know we processed the task management command. */ ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.func_code = XPT_NOTIFY_ACKNOWLEDGE; switch (io->taskio.task_status) { case CTL_TASK_FUNCTION_COMPLETE: ccb->cna2.arg = CAM_RSP_TMF_COMPLETE; break; case CTL_TASK_FUNCTION_SUCCEEDED: ccb->cna2.arg = CAM_RSP_TMF_SUCCEEDED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_REJECTED: ccb->cna2.arg = CAM_RSP_TMF_REJECTED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_LUN_DOES_NOT_EXIST: ccb->cna2.arg = CAM_RSP_TMF_INCORRECT_LUN; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; case CTL_TASK_FUNCTION_NOT_SUPPORTED: ccb->cna2.arg = CAM_RSP_TMF_FAILED; ccb->ccb_h.flags |= CAM_SEND_STATUS; break; } ccb->cna2.arg |= scsi_3btoul(io->taskio.task_resp) << 8; xpt_action(ccb); } else if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) { ctlfe_requeue_ccb(periph, ccb, /* unlock */1); return; } else { io->io_hdr.flags |= CTL_FLAG_STATUS_QUEUED; STAILQ_INSERT_TAIL(&softc->work_queue, &ccb->ccb_h, periph_links.stqe); xpt_schedule(periph, CAM_PRIORITY_NORMAL); } cam_periph_unlock(periph); } static void ctlfe_dump(void) { struct ctlfe_softc *bus_softc; struct ctlfe_lun_softc *lun_softc; STAILQ_FOREACH(bus_softc, &ctlfe_softc_list, links) { ctlfe_dump_sim(bus_softc->sim); STAILQ_FOREACH(lun_softc, &bus_softc->lun_softc_list, links) ctlfe_dump_queue(lun_softc); } } diff --git a/sys/dev/usb/storage/cfumass.c b/sys/dev/usb/storage/cfumass.c index 59d744bd62d0..88b5a6156704 100644 --- a/sys/dev/usb/storage/cfumass.c +++ b/sys/dev/usb/storage/cfumass.c @@ -1,997 +1,997 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2016 The FreeBSD Foundation * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * USB Mass Storage Class Bulk-Only (BBB) Transport target. * * http://www.usb.org/developers/docs/devclass_docs/usbmassbulk_10.pdf * * This code implements the USB Mass Storage frontend driver for the CAM * Target Layer (ctl(4)) subsystem. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include "usb_if.h" #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw_usb, OID_AUTO, cfumass, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "CAM Target Layer USB Mass Storage Frontend"); static int debug = 1; SYSCTL_INT(_hw_usb_cfumass, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int max_lun = 0; SYSCTL_INT(_hw_usb_cfumass, OID_AUTO, max_lun, CTLFLAG_RWTUN, &max_lun, 1, "Maximum advertised LUN number"); static int ignore_stop = 1; SYSCTL_INT(_hw_usb_cfumass, OID_AUTO, ignore_stop, CTLFLAG_RWTUN, &ignore_stop, 1, "Ignore START STOP UNIT with START and LOEJ bits cleared"); /* * The driver uses a single, global CTL port. It could create its ports * in cfumass_attach() instead, but that would make it impossible to specify * "port cfumass0" in ctl.conf(5), as the port generally wouldn't exist * at the time ctld(8) gets run. */ struct ctl_port cfumass_port; bool cfumass_port_online; volatile u_int cfumass_refcount; #ifndef CFUMASS_BULK_SIZE #define CFUMASS_BULK_SIZE (1U << 17) /* bytes */ #endif /* * USB transfer definitions. */ #define CFUMASS_T_COMMAND 0 #define CFUMASS_T_DATA_OUT 1 #define CFUMASS_T_DATA_IN 2 #define CFUMASS_T_STATUS 3 #define CFUMASS_T_MAX 4 /* * USB interface specific control requests. */ #define UR_RESET 0xff /* Bulk-Only Mass Storage Reset */ #define UR_GET_MAX_LUN 0xfe /* Get Max LUN */ /* * Command Block Wrapper. */ struct cfumass_cbw_t { uDWord dCBWSignature; #define CBWSIGNATURE 0x43425355 /* "USBC" */ uDWord dCBWTag; uDWord dCBWDataTransferLength; uByte bCBWFlags; #define CBWFLAGS_OUT 0x00 #define CBWFLAGS_IN 0x80 uByte bCBWLUN; uByte bCDBLength; #define CBWCBLENGTH 16 uByte CBWCB[CBWCBLENGTH]; } __packed; #define CFUMASS_CBW_SIZE 31 CTASSERT(sizeof(struct cfumass_cbw_t) == CFUMASS_CBW_SIZE); /* * Command Status Wrapper. */ struct cfumass_csw_t { uDWord dCSWSignature; #define CSWSIGNATURE 0x53425355 /* "USBS" */ uDWord dCSWTag; uDWord dCSWDataResidue; uByte bCSWStatus; #define CSWSTATUS_GOOD 0x0 #define CSWSTATUS_FAILED 0x1 #define CSWSTATUS_PHASE 0x2 } __packed; #define CFUMASS_CSW_SIZE 13 CTASSERT(sizeof(struct cfumass_csw_t) == CFUMASS_CSW_SIZE); struct cfumass_softc { device_t sc_dev; struct usb_device *sc_udev; struct usb_xfer *sc_xfer[CFUMASS_T_MAX]; struct cfumass_cbw_t *sc_cbw; struct cfumass_csw_t *sc_csw; struct mtx sc_mtx; int sc_online; int sc_ctl_initid; /* * This is used to communicate between CTL callbacks * and USB callbacks; basically, it holds the state * for the current command ("the" command, since there * is no queueing in USB Mass Storage). */ bool sc_current_stalled; /* * The following are set upon receiving a SCSI command. */ int sc_current_tag; int sc_current_transfer_length; int sc_current_flags; /* * The following are set in ctl_datamove(). */ int sc_current_residue; union ctl_io *sc_ctl_io; /* * The following is set in cfumass_done(). */ int sc_current_status; /* * Number of requests queued to CTL. */ volatile u_int sc_queued; }; /* * USB interface. */ static device_probe_t cfumass_probe; static device_attach_t cfumass_attach; static device_detach_t cfumass_detach; static device_suspend_t cfumass_suspend; static device_resume_t cfumass_resume; static usb_handle_request_t cfumass_handle_request; static usb_callback_t cfumass_t_command_callback; static usb_callback_t cfumass_t_data_callback; static usb_callback_t cfumass_t_status_callback; static device_method_t cfumass_methods[] = { /* USB interface. */ DEVMETHOD(usb_handle_request, cfumass_handle_request), /* Device interface. */ DEVMETHOD(device_probe, cfumass_probe), DEVMETHOD(device_attach, cfumass_attach), DEVMETHOD(device_detach, cfumass_detach), DEVMETHOD(device_suspend, cfumass_suspend), DEVMETHOD(device_resume, cfumass_resume), DEVMETHOD_END }; static driver_t cfumass_driver = { .name = "cfumass", .methods = cfumass_methods, .size = sizeof(struct cfumass_softc), }; static devclass_t cfumass_devclass; DRIVER_MODULE(cfumass, uhub, cfumass_driver, cfumass_devclass, NULL, 0); MODULE_VERSION(cfumass, 0); MODULE_DEPEND(cfumass, usb, 1, 1, 1); MODULE_DEPEND(cfumass, usb_template, 1, 1, 1); static struct usb_config cfumass_config[CFUMASS_T_MAX] = { [CFUMASS_T_COMMAND] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = sizeof(struct cfumass_cbw_t), .callback = &cfumass_t_command_callback, .usb_mode = USB_MODE_DEVICE, }, [CFUMASS_T_DATA_OUT] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = CFUMASS_BULK_SIZE, .flags = {.proxy_buffer = 1, .short_xfer_ok = 1, .ext_buffer = 1}, .callback = &cfumass_t_data_callback, .usb_mode = USB_MODE_DEVICE, }, [CFUMASS_T_DATA_IN] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = CFUMASS_BULK_SIZE, .flags = {.proxy_buffer = 1, .short_xfer_ok = 1, .ext_buffer = 1}, .callback = &cfumass_t_data_callback, .usb_mode = USB_MODE_DEVICE, }, [CFUMASS_T_STATUS] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = sizeof(struct cfumass_csw_t), .flags = {.short_xfer_ok = 1}, .callback = &cfumass_t_status_callback, .usb_mode = USB_MODE_DEVICE, }, }; /* * CTL frontend interface. */ static int cfumass_init(void); static int cfumass_shutdown(void); static void cfumass_online(void *arg); static void cfumass_offline(void *arg); static void cfumass_datamove(union ctl_io *io); static void cfumass_done(union ctl_io *io); static struct ctl_frontend cfumass_frontend = { .name = "umass", .init = cfumass_init, .shutdown = cfumass_shutdown, }; CTL_FRONTEND_DECLARE(ctlcfumass, cfumass_frontend); #define CFUMASS_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ device_printf(S->sc_dev, "%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFUMASS_WARN(S, X, ...) \ do { \ if (debug > 0) { \ device_printf(S->sc_dev, "WARNING: %s: " X "\n",\ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFUMASS_LOCK(X) mtx_lock(&X->sc_mtx) #define CFUMASS_UNLOCK(X) mtx_unlock(&X->sc_mtx) static void cfumass_transfer_start(struct cfumass_softc *sc, uint8_t xfer_index); static void cfumass_terminate(struct cfumass_softc *sc); static int cfumass_probe(device_t dev) { struct usb_attach_arg *uaa; struct usb_interface_descriptor *id; uaa = device_get_ivars(dev); if (uaa->usb_mode != USB_MODE_DEVICE) return (ENXIO); /* * Check for a compliant device. */ id = usbd_get_interface_descriptor(uaa->iface); if ((id == NULL) || (id->bInterfaceClass != UICLASS_MASS) || (id->bInterfaceSubClass != UISUBCLASS_SCSI) || (id->bInterfaceProtocol != UIPROTO_MASS_BBB)) { return (ENXIO); } return (BUS_PROBE_GENERIC); } static int cfumass_attach(device_t dev) { struct cfumass_softc *sc; struct usb_attach_arg *uaa; int error; sc = device_get_softc(dev); uaa = device_get_ivars(dev); sc->sc_dev = dev; sc->sc_udev = uaa->device; CFUMASS_DEBUG(sc, "go"); usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, "cfumass", NULL, MTX_DEF); refcount_acquire(&cfumass_refcount); error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex, sc->sc_xfer, cfumass_config, CFUMASS_T_MAX, sc, &sc->sc_mtx); if (error != 0) { CFUMASS_WARN(sc, "usbd_transfer_setup() failed: %s", usbd_errstr(error)); refcount_release(&cfumass_refcount); return (ENXIO); } sc->sc_cbw = usbd_xfer_get_frame_buffer(sc->sc_xfer[CFUMASS_T_COMMAND], 0); sc->sc_csw = usbd_xfer_get_frame_buffer(sc->sc_xfer[CFUMASS_T_STATUS], 0); sc->sc_ctl_initid = ctl_add_initiator(&cfumass_port, -1, 0, NULL); if (sc->sc_ctl_initid < 0) { CFUMASS_WARN(sc, "ctl_add_initiator() failed with error %d", sc->sc_ctl_initid); usbd_transfer_unsetup(sc->sc_xfer, CFUMASS_T_MAX); refcount_release(&cfumass_refcount); return (ENXIO); } refcount_init(&sc->sc_queued, 0); CFUMASS_LOCK(sc); cfumass_transfer_start(sc, CFUMASS_T_COMMAND); CFUMASS_UNLOCK(sc); return (0); } static int cfumass_detach(device_t dev) { struct cfumass_softc *sc; int error; sc = device_get_softc(dev); CFUMASS_DEBUG(sc, "go"); CFUMASS_LOCK(sc); cfumass_terminate(sc); CFUMASS_UNLOCK(sc); usbd_transfer_unsetup(sc->sc_xfer, CFUMASS_T_MAX); if (sc->sc_ctl_initid != -1) { error = ctl_remove_initiator(&cfumass_port, sc->sc_ctl_initid); if (error != 0) { CFUMASS_WARN(sc, "ctl_remove_initiator() failed " "with error %d", error); } sc->sc_ctl_initid = -1; } mtx_destroy(&sc->sc_mtx); refcount_release(&cfumass_refcount); return (0); } static int cfumass_suspend(device_t dev) { struct cfumass_softc *sc; sc = device_get_softc(dev); CFUMASS_DEBUG(sc, "go"); return (0); } static int cfumass_resume(device_t dev) { struct cfumass_softc *sc; sc = device_get_softc(dev); CFUMASS_DEBUG(sc, "go"); return (0); } static void cfumass_transfer_start(struct cfumass_softc *sc, uint8_t xfer_index) { usbd_transfer_start(sc->sc_xfer[xfer_index]); } static void cfumass_transfer_stop_and_drain(struct cfumass_softc *sc, uint8_t xfer_index) { usbd_transfer_stop(sc->sc_xfer[xfer_index]); CFUMASS_UNLOCK(sc); usbd_transfer_drain(sc->sc_xfer[xfer_index]); CFUMASS_LOCK(sc); } static void cfumass_terminate(struct cfumass_softc *sc) { int last; for (;;) { cfumass_transfer_stop_and_drain(sc, CFUMASS_T_COMMAND); cfumass_transfer_stop_and_drain(sc, CFUMASS_T_DATA_IN); cfumass_transfer_stop_and_drain(sc, CFUMASS_T_DATA_OUT); if (sc->sc_ctl_io != NULL) { CFUMASS_DEBUG(sc, "terminating CTL transfer"); ctl_set_data_phase_error(&sc->sc_ctl_io->scsiio); - sc->sc_ctl_io->scsiio.be_move_done(sc->sc_ctl_io); + ctl_datamove_done(sc->sc_ctl_io, false); sc->sc_ctl_io = NULL; } cfumass_transfer_stop_and_drain(sc, CFUMASS_T_STATUS); refcount_acquire(&sc->sc_queued); last = refcount_release(&sc->sc_queued); if (last != 0) break; CFUMASS_DEBUG(sc, "%d CTL tasks pending", sc->sc_queued); msleep(__DEVOLATILE(void *, &sc->sc_queued), &sc->sc_mtx, 0, "cfumass_reset", hz / 100); } } static int cfumass_handle_request(device_t dev, const void *preq, void **pptr, uint16_t *plen, uint16_t offset, uint8_t *pstate) { static uint8_t max_lun_tmp; struct cfumass_softc *sc; const struct usb_device_request *req; uint8_t is_complete; sc = device_get_softc(dev); req = preq; is_complete = *pstate; CFUMASS_DEBUG(sc, "go"); if (is_complete) return (ENXIO); if ((req->bmRequestType == UT_WRITE_CLASS_INTERFACE) && (req->bRequest == UR_RESET)) { CFUMASS_WARN(sc, "received Bulk-Only Mass Storage Reset"); *plen = 0; CFUMASS_LOCK(sc); cfumass_terminate(sc); cfumass_transfer_start(sc, CFUMASS_T_COMMAND); CFUMASS_UNLOCK(sc); CFUMASS_DEBUG(sc, "Bulk-Only Mass Storage Reset done"); return (0); } if ((req->bmRequestType == UT_READ_CLASS_INTERFACE) && (req->bRequest == UR_GET_MAX_LUN)) { CFUMASS_DEBUG(sc, "received Get Max LUN"); if (offset == 0) { *plen = 1; /* * The protocol doesn't support LUN numbers higher * than 15. Also, some initiators (namely Windows XP * SP3 Version 2002) can't properly query the number * of LUNs, resulting in inaccessible "fake" ones - thus * the default limit of one LUN. */ if (max_lun < 0 || max_lun > 15) { CFUMASS_WARN(sc, "invalid hw.usb.cfumass.max_lun, must be " "between 0 and 15; defaulting to 0"); max_lun_tmp = 0; } else { max_lun_tmp = max_lun; } *pptr = &max_lun_tmp; } else { *plen = 0; } return (0); } return (ENXIO); } static int cfumass_quirk(struct cfumass_softc *sc, unsigned char *cdb, int cdb_len) { struct scsi_start_stop_unit *sssu; switch (cdb[0]) { case START_STOP_UNIT: /* * Some initiators - eg OSX, Darwin Kernel Version 15.6.0, * root:xnu-3248.60.11~2/RELEASE_X86_64 - attempt to stop * the unit on eject, but fail to start it when it's plugged * back. Just ignore the command. */ if (cdb_len < sizeof(*sssu)) { CFUMASS_DEBUG(sc, "received START STOP UNIT with " "bCDBLength %d, should be %zd", cdb_len, sizeof(*sssu)); break; } sssu = (struct scsi_start_stop_unit *)cdb; if ((sssu->how & SSS_PC_MASK) != 0) break; if ((sssu->how & SSS_START) != 0) break; if ((sssu->how & SSS_LOEJ) != 0) break; if (ignore_stop == 0) { break; } else if (ignore_stop == 1) { CFUMASS_WARN(sc, "ignoring START STOP UNIT request"); } else { CFUMASS_DEBUG(sc, "ignoring START STOP UNIT request"); } sc->sc_current_status = 0; cfumass_transfer_start(sc, CFUMASS_T_STATUS); return (1); default: break; } return (0); } static void cfumass_t_command_callback(struct usb_xfer *xfer, usb_error_t usb_error) { struct cfumass_softc *sc; uint32_t signature; union ctl_io *io; int error = 0; sc = usbd_xfer_softc(xfer); KASSERT(sc->sc_ctl_io == NULL, ("sc_ctl_io is %p, should be NULL", sc->sc_ctl_io)); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: CFUMASS_DEBUG(sc, "USB_ST_TRANSFERRED"); signature = UGETDW(sc->sc_cbw->dCBWSignature); if (signature != CBWSIGNATURE) { CFUMASS_WARN(sc, "wrong dCBWSignature 0x%08x, " "should be 0x%08x", signature, CBWSIGNATURE); break; } if (sc->sc_cbw->bCDBLength <= 0 || sc->sc_cbw->bCDBLength > sizeof(sc->sc_cbw->CBWCB)) { CFUMASS_WARN(sc, "invalid bCDBLength %d, should be <= %zd", sc->sc_cbw->bCDBLength, sizeof(sc->sc_cbw->CBWCB)); break; } sc->sc_current_stalled = false; sc->sc_current_status = 0; sc->sc_current_tag = UGETDW(sc->sc_cbw->dCBWTag); sc->sc_current_transfer_length = UGETDW(sc->sc_cbw->dCBWDataTransferLength); sc->sc_current_flags = sc->sc_cbw->bCBWFlags; /* * Make sure to report proper residue if the datamove wasn't * required, or wasn't called due to SCSI error. */ sc->sc_current_residue = sc->sc_current_transfer_length; if (cfumass_quirk(sc, sc->sc_cbw->CBWCB, sc->sc_cbw->bCDBLength) != 0) break; if (!cfumass_port_online) { CFUMASS_DEBUG(sc, "cfumass port is offline; stalling"); usbd_xfer_set_stall(xfer); break; } /* * Those CTL functions cannot be called with mutex held. */ CFUMASS_UNLOCK(sc); io = ctl_alloc_io(cfumass_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = sc; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid = sc->sc_ctl_initid; io->io_hdr.nexus.targ_port = cfumass_port.targ_port; io->io_hdr.nexus.targ_lun = ctl_decode_lun(sc->sc_cbw->bCBWLUN); io->scsiio.tag_num = UGETDW(sc->sc_cbw->dCBWTag); io->scsiio.tag_type = CTL_TAG_UNTAGGED; io->scsiio.cdb_len = sc->sc_cbw->bCDBLength; memcpy(io->scsiio.cdb, sc->sc_cbw->CBWCB, sc->sc_cbw->bCDBLength); refcount_acquire(&sc->sc_queued); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFUMASS_WARN(sc, "ctl_queue() failed; error %d; stalling", error); ctl_free_io(io); refcount_release(&sc->sc_queued); CFUMASS_LOCK(sc); usbd_xfer_set_stall(xfer); break; } CFUMASS_LOCK(sc); break; case USB_ST_SETUP: tr_setup: CFUMASS_DEBUG(sc, "USB_ST_SETUP"); usbd_xfer_set_frame_len(xfer, 0, sizeof(*sc->sc_cbw)); usbd_transfer_submit(xfer); break; default: if (usb_error == USB_ERR_CANCELLED) { CFUMASS_DEBUG(sc, "USB_ERR_CANCELLED"); break; } CFUMASS_DEBUG(sc, "USB_ST_ERROR: %s", usbd_errstr(usb_error)); goto tr_setup; } } static void cfumass_t_data_callback(struct usb_xfer *xfer, usb_error_t usb_error) { struct cfumass_softc *sc = usbd_xfer_softc(xfer); union ctl_io *io = sc->sc_ctl_io; uint32_t max_bulk; struct ctl_sg_entry sg_entry, *sglist; int actlen, sumlen, sg_count; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: CFUMASS_DEBUG(sc, "USB_ST_TRANSFERRED"); usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); sc->sc_current_residue -= actlen; io->scsiio.ext_data_filled += actlen; io->scsiio.kern_data_resid -= actlen; if (actlen < sumlen || sc->sc_current_residue == 0 || io->scsiio.kern_data_resid == 0) { sc->sc_ctl_io = NULL; - io->scsiio.be_move_done(io); + ctl_datamove_done(io, false); break; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: CFUMASS_DEBUG(sc, "USB_ST_SETUP"); if (io->scsiio.kern_sg_entries > 0) { sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; sg_count = io->scsiio.kern_sg_entries; } else { sglist = &sg_entry; sglist->addr = io->scsiio.kern_data_ptr; sglist->len = io->scsiio.kern_data_len; sg_count = 1; } sumlen = io->scsiio.ext_data_filled - io->scsiio.kern_rel_offset; while (sumlen >= sglist->len && sg_count > 0) { sumlen -= sglist->len; sglist++; sg_count--; } KASSERT(sg_count > 0, ("Run out of S/G list entries")); max_bulk = usbd_xfer_max_len(xfer); actlen = min(sglist->len - sumlen, max_bulk); actlen = min(actlen, sc->sc_current_transfer_length - io->scsiio.ext_data_filled); CFUMASS_DEBUG(sc, "requested %d, done %d, max_bulk %d, " "segment %zd => transfer %d", sc->sc_current_transfer_length, io->scsiio.ext_data_filled, max_bulk, sglist->len - sumlen, actlen); usbd_xfer_set_frame_data(xfer, 0, (uint8_t *)sglist->addr + sumlen, actlen); usbd_transfer_submit(xfer); break; default: if (usb_error == USB_ERR_CANCELLED) { CFUMASS_DEBUG(sc, "USB_ERR_CANCELLED"); break; } CFUMASS_DEBUG(sc, "USB_ST_ERROR: %s", usbd_errstr(usb_error)); goto tr_setup; } } static void cfumass_t_status_callback(struct usb_xfer *xfer, usb_error_t usb_error) { struct cfumass_softc *sc; sc = usbd_xfer_softc(xfer); KASSERT(sc->sc_ctl_io == NULL, ("sc_ctl_io is %p, should be NULL", sc->sc_ctl_io)); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: CFUMASS_DEBUG(sc, "USB_ST_TRANSFERRED"); cfumass_transfer_start(sc, CFUMASS_T_COMMAND); break; case USB_ST_SETUP: tr_setup: CFUMASS_DEBUG(sc, "USB_ST_SETUP"); if (sc->sc_current_residue > 0 && !sc->sc_current_stalled) { CFUMASS_DEBUG(sc, "non-zero residue, stalling"); usbd_xfer_set_stall(xfer); sc->sc_current_stalled = true; } USETDW(sc->sc_csw->dCSWSignature, CSWSIGNATURE); USETDW(sc->sc_csw->dCSWTag, sc->sc_current_tag); USETDW(sc->sc_csw->dCSWDataResidue, sc->sc_current_residue); sc->sc_csw->bCSWStatus = sc->sc_current_status; usbd_xfer_set_frame_len(xfer, 0, sizeof(*sc->sc_csw)); usbd_transfer_submit(xfer); break; default: if (usb_error == USB_ERR_CANCELLED) { CFUMASS_DEBUG(sc, "USB_ERR_CANCELLED"); break; } CFUMASS_DEBUG(sc, "USB_ST_ERROR: %s", usbd_errstr(usb_error)); goto tr_setup; } } static void cfumass_online(void *arg __unused) { cfumass_port_online = true; } static void cfumass_offline(void *arg __unused) { cfumass_port_online = false; } static void cfumass_datamove(union ctl_io *io) { struct cfumass_softc *sc; sc = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; CFUMASS_DEBUG(sc, "go"); CFUMASS_LOCK(sc); KASSERT(sc->sc_ctl_io == NULL, ("sc_ctl_io is %p, should be NULL", sc->sc_ctl_io)); sc->sc_ctl_io = io; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { /* * Verify that CTL wants us to send the data in the direction * expected by the initiator. */ if (sc->sc_current_flags != CBWFLAGS_IN) { CFUMASS_WARN(sc, "wrong bCBWFlags 0x%x, should be 0x%x", sc->sc_current_flags, CBWFLAGS_IN); goto fail; } cfumass_transfer_start(sc, CFUMASS_T_DATA_IN); } else { if (sc->sc_current_flags != CBWFLAGS_OUT) { CFUMASS_WARN(sc, "wrong bCBWFlags 0x%x, should be 0x%x", sc->sc_current_flags, CBWFLAGS_OUT); goto fail; } cfumass_transfer_start(sc, CFUMASS_T_DATA_OUT); } CFUMASS_UNLOCK(sc); return; fail: ctl_set_data_phase_error(&io->scsiio); - io->scsiio.be_move_done(io); + ctl_datamove_done(io, true); sc->sc_ctl_io = NULL; } static void cfumass_done(union ctl_io *io) { struct cfumass_softc *sc; sc = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; CFUMASS_DEBUG(sc, "go"); KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); KASSERT(sc->sc_ctl_io == NULL, ("sc_ctl_io is %p, should be NULL", sc->sc_ctl_io)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ ctl_free_io(io); return; } /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); return; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) sc->sc_current_status = 0; else sc->sc_current_status = 1; /* XXX: How should we report BUSY, RESERVATION CONFLICT, etc? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR && io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND) ctl_queue_sense(io); else ctl_free_io(io); CFUMASS_LOCK(sc); cfumass_transfer_start(sc, CFUMASS_T_STATUS); CFUMASS_UNLOCK(sc); refcount_release(&sc->sc_queued); } int cfumass_init(void) { int error; cfumass_port.frontend = &cfumass_frontend; cfumass_port.port_type = CTL_PORT_UMASS; cfumass_port.num_requested_ctl_io = 1; cfumass_port.port_name = "cfumass"; cfumass_port.physical_port = 0; cfumass_port.virtual_port = 0; cfumass_port.port_online = cfumass_online; cfumass_port.port_offline = cfumass_offline; cfumass_port.onoff_arg = NULL; cfumass_port.fe_datamove = cfumass_datamove; cfumass_port.fe_done = cfumass_done; cfumass_port.targ_port = -1; error = ctl_port_register(&cfumass_port); if (error != 0) { printf("%s: ctl_port_register() failed " "with error %d", __func__, error); } cfumass_port_online = true; refcount_init(&cfumass_refcount, 0); return (error); } int cfumass_shutdown(void) { int error; if (cfumass_refcount > 0) { if (debug > 1) { printf("%s: still have %u attachments; " "returning EBUSY\n", __func__, cfumass_refcount); } return (EBUSY); } error = ctl_port_deregister(&cfumass_port); if (error != 0) { printf("%s: ctl_port_deregister() failed " "with error %d\n", __func__, error); } return (error); }