diff --git a/sys/cam/ctl/ctl.c b/sys/cam/ctl/ctl.c index cae6dd4aa101..575c8eea5198 100644 --- a/sys/cam/ctl/ctl.c +++ b/sys/cam/ctl/ctl.c @@ -1,13578 +1,13575 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * Copyright (c) 2017 Jakub Wojciech Klama * Copyright (c) 2018 Marcelo Araujo * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_PER, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_da_verify_recovery_page verify_er_page_default = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_verify_recovery_page verify_er_page_changeable = { /*page_code*/SMS_VERIFY_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_verify_recovery_page) - 2, /*byte3*/SMS_VER_PER, /*read_retry_count*/0, /*reserved*/{ 0, 0, 0, 0, 0, 0 }, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK | SCP_NUAR, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) const static struct scsi_control_ext_page control_ext_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_control_ext_page control_ext_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0xff }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC, /*mrie*/SIEP_MRIE_NO, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 1} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_EWASC | SIEP_FLAGS_DEXCPT | SIEP_FLAGS_TEST | SIEP_FLAGS_LOGERR, /*mrie*/0x0f, /*interval_timer*/{0xff, 0xff, 0xff, 0xff}, /*report_count*/{0xff, 0xff, 0xff, 0xff} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/SLBPP_SITUA, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct scsi_cddvd_capabilities_page cddvd_page_default = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0x3f, /*caps2*/0x00, /*caps3*/0xf0, /*caps4*/0x00, /*caps5*/0x29, /*caps6*/0x00, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{8, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0, /*caps2*/0, /*caps3*/0, /*caps4*/0, /*caps5*/0, /*caps6*/0, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{0, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); static int worker_threads = -1; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); static int ctl_lun_map_size = 1024; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, lun_map_size, CTLFLAG_RWTUN, &ctl_lun_map_size, 0, "Size of per-port LUN map (max LUN + 1)"); #ifdef CTL_TIME_IO static int ctl_time_io_secs = CTL_TIME_IO_DEFAULT_SECS; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN, &ctl_time_io_secs, 0, "Log requests taking more seconds"); #endif /* * Maximum number of LUNs we support. MUST be a power of 2. */ #define CTL_DEFAULT_MAX_LUNS 1024 static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS; TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN, &ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs"); /* * Maximum number of ports registered at one time. */ #define CTL_DEFAULT_MAX_PORTS 256 static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS; TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN, &ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports"); /* * Maximum number of initiators we support. */ #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports) /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), SCSI Feature Sets (0x92), * Block limits (0xB0), Block Device Characteristics (0xB1) and * Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 11 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); static int ctl_init(void); static int ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_enable_lun(struct ctl_lun *lun); static int ctl_disable_lun(struct ctl_lun *lun); static int ctl_free_lun(struct ctl_lun *lun); static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, const uint8_t *serialize_row, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io); static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip); static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *io, bool skip); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); static void ctl_failover_lun(union ctl_io *io); static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_target_reset(union ctl_io *io); static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type); static int ctl_lun_reset(union ctl_io *io); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_query_task(union ctl_io *io, int task_set); static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type); static int ctl_i_t_nexus_reset(union ctl_io *io); static int ctl_query_async_event(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr); static void ctl_datamove_remote_write(union ctl_io *io); static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static void ctl_process_done(union ctl_io *io); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); static int ctl_ha_init(void); static int ctl_ha_shutdown(void); static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ha_frontend = { .name = "ha", .init = ctl_ha_init, .shutdown = ctl_ha_shutdown, }; static int ctl_ha_init(void) { struct ctl_softc *softc = control_softc; if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &softc->othersc_pool) != 0) return (ENOMEM); if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { ctl_pool_free(softc->othersc_pool); return (EIO); } if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { ctl_ha_msg_destroy(softc); ctl_pool_free(softc->othersc_pool); return (EIO); } return (0); }; static int ctl_ha_shutdown(void) { struct ctl_softc *softc = control_softc; struct ctl_port *port; ctl_ha_msg_shutdown(softc); if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) return (EIO); if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) return (EIO); ctl_pool_free(softc->othersc_pool); while ((port = STAILQ_FIRST(&ha_frontend.port_list)) != NULL) { ctl_port_deregister(port); free(port->port_name, M_CTL); free(port, M_CTL); } return (0); }; static void ctl_ha_datamove(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_sg_entry *sgl; union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy, i, j; memset(&msg.dt, 0, sizeof(msg.dt)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[0].addr = (void *)vtophys(io->scsiio.kern_data_ptr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; #endif msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; do_sg_copy = 1; } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before processing. */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[j].addr = sgl[i].addr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[j].addr = (void *)vtophys(sgl[i].addr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[j].addr = sgl[i].addr; #endif msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.dt) - sizeof(msg.dt.sg_list) + sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, M_WAITOK) > CTL_HA_STATUS_SUCCESS) { io->io_hdr.port_status = 31341; ctl_datamove_done(io, true); return; } msg.dt.sent_sg_entries = sg_entries_sent; } /* * Officially handover the request from us to peer. * If failover has just happened, then we must return error. * If failover happen just after, then it is not our problem. */ if (lun) mtx_lock(&lun->lun_lock); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { if (lun) mtx_unlock(&lun->lun_lock); io->io_hdr.port_status = 31342; ctl_datamove_done(io, true); return; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; if (lun) mtx_unlock(&lun->lun_lock); } static void ctl_ha_done(union ctl_io *io) { union ctl_ha_msg msg; if (io->io_hdr.io_type == CTL_IO_SCSI) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); } ctl_free_io(io); } static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, msg_info->scsi.sense_len); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } void ctl_isc_announce_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg *msg; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&lun->lun_lock); i = sizeof(msg->lun); if (lun->lun_devid) i += lun->lun_devid->len; i += sizeof(pr_key) * lun->pr_key_count; alloc: mtx_unlock(&lun->lun_lock); msg = malloc(i, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); k = sizeof(msg->lun); if (lun->lun_devid) k += lun->lun_devid->len; k += sizeof(pr_key) * lun->pr_key_count; if (i < k) { free(msg, M_CTL); i = k; goto alloc; } bzero(&msg->lun, sizeof(msg->lun)); msg->hdr.msg_type = CTL_MSG_LUN_SYNC; msg->hdr.nexus.targ_lun = lun->lun; msg->hdr.nexus.targ_mapped_lun = lun->lun; msg->lun.flags = lun->flags; msg->lun.pr_generation = lun->pr_generation; msg->lun.pr_res_idx = lun->pr_res_idx; msg->lun.pr_res_type = lun->pr_res_type; msg->lun.pr_key_count = lun->pr_key_count; i = 0; if (lun->lun_devid) { msg->lun.lun_devid_len = lun->lun_devid->len; memcpy(&msg->lun.data[i], lun->lun_devid->data, msg->lun.lun_devid_len); i += msg->lun.lun_devid_len; } for (k = 0; k < CTL_MAX_INITIATORS; k++) { if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) continue; pr_key.pr_iid = k; memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); i += sizeof(pr_key); } mtx_unlock(&lun->lun_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); if (lun->flags & CTL_LUN_PRIMARY_SC) { for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } } } void ctl_isc_announce_port(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; i = sizeof(msg->port) + strlen(port->port_name) + 1; if (port->lun_map) i += port->lun_map_size * sizeof(uint32_t); if (port->port_devid) i += port->port_devid->len; if (port->target_devid) i += port->target_devid->len; if (port->init_devid) i += port->init_devid->len; msg = malloc(i, M_CTL, M_WAITOK); bzero(&msg->port, sizeof(msg->port)); msg->hdr.msg_type = CTL_MSG_PORT_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->port.port_type = port->port_type; msg->port.physical_port = port->physical_port; msg->port.virtual_port = port->virtual_port; msg->port.status = port->status; i = 0; msg->port.name_len = sprintf(&msg->port.data[i], "%d:%s", softc->ha_id, port->port_name) + 1; i += msg->port.name_len; if (port->lun_map) { msg->port.lun_map_len = port->lun_map_size * sizeof(uint32_t); memcpy(&msg->port.data[i], port->lun_map, msg->port.lun_map_len); i += msg->port.lun_map_len; } if (port->port_devid) { msg->port.port_devid_len = port->port_devid->len; memcpy(&msg->port.data[i], port->port_devid->data, msg->port.port_devid_len); i += msg->port.port_devid_len; } if (port->target_devid) { msg->port.target_devid_len = port->target_devid->len; memcpy(&msg->port.data[i], port->target_devid->data, msg->port.target_devid_len); i += msg->port.target_devid_len; } if (port->init_devid) { msg->port.init_devid_len = port->init_devid->len; memcpy(&msg->port.data[i], port->init_devid->data, msg->port.init_devid_len); i += msg->port.init_devid_len; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); } void ctl_isc_announce_iid(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i, l; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&softc->ctl_lock); i = sizeof(msg->iid); l = 0; if (port->wwpn_iid[iid].name) l = strlen(port->wwpn_iid[iid].name) + 1; i += l; msg = malloc(i, M_CTL, M_NOWAIT); if (msg == NULL) { mtx_unlock(&softc->ctl_lock); return; } bzero(&msg->iid, sizeof(msg->iid)); msg->hdr.msg_type = CTL_MSG_IID_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->hdr.nexus.initid = iid; msg->iid.in_use = port->wwpn_iid[iid].in_use; msg->iid.name_len = l; msg->iid.wwpn = port->wwpn_iid[iid].wwpn; if (port->wwpn_iid[iid].name) strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); mtx_unlock(&softc->ctl_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); free(msg, M_CTL); } void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg msg; u_int i; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == page && lun->mode_pages.index[i].subpage == subpage) break; } if (i == CTL_NUM_MODE_PAGES) return; /* Don't try to replicate pages not present on this device. */ if (lun->mode_pages.index[i].page_data == NULL) return; bzero(&msg.mode, sizeof(msg.mode)); msg.hdr.msg_type = CTL_MSG_MODE_SYNC; msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.mode.page_code = page; msg.mode.subpage = subpage; msg.mode.page_len = lun->mode_pages.index[i].page_len; memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, msg.mode.page_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), M_WAITOK); } static void ctl_isc_ha_link_up(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_ha_msg msg; int i; /* Announce this node parameters to peer for validation. */ msg.login.msg_type = CTL_MSG_LOGIN; msg.login.version = CTL_HA_VERSION; msg.login.ha_mode = softc->ha_mode; msg.login.ha_id = softc->ha_id; msg.login.max_luns = ctl_max_luns; msg.login.max_ports = ctl_max_ports; msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), M_WAITOK); STAILQ_FOREACH(port, &softc->port_list, links) { ctl_isc_announce_port(port); for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use) ctl_isc_announce_iid(port, i); } } STAILQ_FOREACH(lun, &softc->lun_list, links) ctl_isc_announce_lun(lun); } static void ctl_isc_ha_link_down(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_io *io; int i; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); io = ctl_alloc_io(softc->othersc_pool); mtx_lock(&softc->ctl_lock); ctl_zero_io(io); io->io_hdr.msg_type = CTL_MSG_FAILOVER; io->io_hdr.nexus.targ_mapped_lun = lun->lun; ctl_enqueue_isc(io); } STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port >= softc->port_min && port->targ_port < softc->port_max) continue; port->status &= ~CTL_PORT_STATUS_ONLINE; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { port->wwpn_iid[i].in_use = 0; free(port->wwpn_iid[i].name, M_CTL); port->wwpn_iid[i].name = NULL; } } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); mtx_lock(&softc->ctl_lock); if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns || (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); if (msg->ua.ua_all) { if (msg->ua.ua_set) ctl_est_ua_all(lun, iid, msg->ua.ua_type); else ctl_clr_ua_all(lun, iid, msg->ua.ua_type); } else { if (msg->ua.ua_set) ctl_est_ua(lun, iid, msg->ua.ua_type); else ctl_clr_ua(lun, iid, msg->ua.ua_type); } mtx_unlock(&lun->lun_lock); } static void ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; ctl_lun_flags oflags; uint32_t targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; if (msg->lun.lun_devid_len != i || (i > 0 && memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { mtx_unlock(&lun->lun_lock); printf("%s: Received conflicting HA LUN %d\n", __func__, targ_lun); return; } else { /* Record whether peer is primary. */ oflags = lun->flags; if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_DISABLED) == 0) lun->flags |= CTL_LUN_PEER_SC_PRIMARY; else lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; if (oflags != lun->flags) ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); /* If peer is primary and we are not -- use data */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { lun->pr_generation = msg->lun.pr_generation; lun->pr_res_idx = msg->lun.pr_res_idx; lun->pr_res_type = msg->lun.pr_res_type; lun->pr_key_count = msg->lun.pr_key_count; for (k = 0; k < CTL_MAX_INITIATORS; k++) ctl_clr_prkey(lun, k); for (k = 0; k < msg->lun.pr_key_count; k++) { memcpy(&pr_key, &msg->lun.data[i], sizeof(pr_key)); ctl_alloc_prkey(lun, pr_key.pr_iid); ctl_set_prkey(lun, pr_key.pr_iid, pr_key.pr_key); i += sizeof(pr_key); } } mtx_unlock(&lun->lun_lock); CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", __func__, targ_lun, (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? "primary" : "secondary")); /* If we are primary but peer doesn't know -- notify */ if ((lun->flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) ctl_isc_announce_lun(lun); } } static void ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; struct ctl_lun *lun; int i, new; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 1; port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); port->frontend = &ha_frontend; port->targ_port = msg->hdr.nexus.targ_port; port->fe_datamove = ctl_ha_datamove; port->fe_done = ctl_ha_done; } else if (port->frontend == &ha_frontend) { CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 0; } else { printf("%s: Received conflicting HA port %d\n", __func__, msg->hdr.nexus.targ_port); return; } port->port_type = msg->port.port_type; port->physical_port = msg->port.physical_port; port->virtual_port = msg->port.virtual_port; port->status = msg->port.status; i = 0; free(port->port_name, M_CTL); port->port_name = strndup(&msg->port.data[i], msg->port.name_len, M_CTL); i += msg->port.name_len; if (msg->port.lun_map_len != 0) { if (port->lun_map == NULL || port->lun_map_size * sizeof(uint32_t) < msg->port.lun_map_len) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(msg->port.lun_map_len, M_CTL, M_WAITOK); } memcpy(port->lun_map, &msg->port.data[i], msg->port.lun_map_len); port->lun_map_size = msg->port.lun_map_len / sizeof(uint32_t); i += msg->port.lun_map_len; } else { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; } if (msg->port.port_devid_len != 0) { if (port->port_devid == NULL || port->port_devid->len < msg->port.port_devid_len) { free(port->port_devid, M_CTL); port->port_devid = malloc(sizeof(struct ctl_devid) + msg->port.port_devid_len, M_CTL, M_WAITOK); } memcpy(port->port_devid->data, &msg->port.data[i], msg->port.port_devid_len); port->port_devid->len = msg->port.port_devid_len; i += msg->port.port_devid_len; } else { free(port->port_devid, M_CTL); port->port_devid = NULL; } if (msg->port.target_devid_len != 0) { if (port->target_devid == NULL || port->target_devid->len < msg->port.target_devid_len) { free(port->target_devid, M_CTL); port->target_devid = malloc(sizeof(struct ctl_devid) + msg->port.target_devid_len, M_CTL, M_WAITOK); } memcpy(port->target_devid->data, &msg->port.data[i], msg->port.target_devid_len); port->target_devid->len = msg->port.target_devid_len; i += msg->port.target_devid_len; } else { free(port->target_devid, M_CTL); port->target_devid = NULL; } if (msg->port.init_devid_len != 0) { if (port->init_devid == NULL || port->init_devid->len < msg->port.init_devid_len) { free(port->init_devid, M_CTL); port->init_devid = malloc(sizeof(struct ctl_devid) + msg->port.init_devid_len, M_CTL, M_WAITOK); } memcpy(port->init_devid->data, &msg->port.data[i], msg->port.init_devid_len); port->init_devid->len = msg->port.init_devid_len; i += msg->port.init_devid_len; } else { free(port->init_devid, M_CTL); port->init_devid = NULL; } if (new) { if (ctl_port_register(port) != 0) { printf("%s: ctl_port_register() failed with error\n", __func__); } } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; int iid; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { printf("%s: Received IID for unknown port %d\n", __func__, msg->hdr.nexus.targ_port); return; } iid = msg->hdr.nexus.initid; if (port->wwpn_iid[iid].in_use != 0 && msg->iid.in_use == 0) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); port->wwpn_iid[iid].in_use = msg->iid.in_use; port->wwpn_iid[iid].wwpn = msg->iid.wwpn; free(port->wwpn_iid[iid].name, M_CTL); if (msg->iid.name_len) { port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], msg->iid.name_len, M_CTL); } else port->wwpn_iid[iid].name = NULL; } static void ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { if (msg->login.version != CTL_HA_VERSION) { printf("CTL HA peers have different versions %d != %d\n", msg->login.version, CTL_HA_VERSION); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_mode != softc->ha_mode) { printf("CTL HA peers have different ha_mode %d != %d\n", msg->login.ha_mode, softc->ha_mode); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_id == softc->ha_id) { printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.max_luns != ctl_max_luns || msg->login.max_ports != ctl_max_ports || msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { printf("CTL HA peers have different limits\n"); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } } static void ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; u_int i; uint32_t initidx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == msg->mode.page_code && lun->mode_pages.index[i].subpage == msg->mode.subpage) break; } if (i == CTL_NUM_MODE_PAGES) { mtx_unlock(&lun->lun_lock); return; } memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, lun->mode_pages.index[i].page_len); initidx = ctl_get_initindex(&msg->hdr.nexus); if (initidx != -1) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc = control_softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg *msg, msgbuf; if (param > sizeof(msgbuf)) msg = malloc(param, M_CTL, M_WAITOK); else msg = &msgbuf; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, M_WAITOK); if (isc_status != CTL_HA_STATUS_SUCCESS) { printf("%s: Error receiving message: %d\n", __func__, isc_status); if (msg != &msgbuf) free(msg, M_CTL); return; } CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->hdr.msg_type)); switch (msg->hdr.msg_type) { case CTL_MSG_SERIALIZE: io = ctl_alloc_io(softc->othersc_pool); ctl_zero_io(io); // populate ctsio from msg io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.remote_io = msg->hdr.original_sc; io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode != CTL_HA_MODE_XFER) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg->hdr.nexus; io->scsiio.priority = msg->scsi.priority; io->scsiio.tag_num = msg->scsi.tag_num; io->scsiio.tag_type = msg->scsi.tag_type; #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ io->scsiio.cdb_len = msg->scsi.cdb_len; memcpy(io->scsiio.cdb, msg->scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.remote_io = msg->hdr.serializing_sc; if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.status = msg->hdr.status; if (msg->dt.sg_sequence == 0) { #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif i = msg->dt.kern_sg_entries + msg->dt.kern_data_len / CTL_HA_DATAMOVE_SEGMENT + 1; sgl = malloc(sizeof(*sgl) * i, M_CTL, M_WAITOK | M_ZERO); CTL_RSGL(io) = sgl; CTL_LSGL(io) = &sgl[msg->dt.kern_sg_entries]; io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg->dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg->dt.kern_sg_entries; io->scsiio.kern_data_len = msg->dt.kern_data_len; io->scsiio.kern_total_len = msg->dt.kern_total_len; io->scsiio.kern_data_resid = msg->dt.kern_data_resid; io->scsiio.kern_rel_offset = msg->dt.kern_rel_offset; io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; io->io_hdr.flags |= msg->dt.flags & CTL_FLAG_BUS_ADDR; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg->dt.sent_sg_entries, j = 0; i < (msg->dt.sent_sg_entries + msg->dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg->dt.sg_list[j].addr; sgl[i].len = msg->dt.sg_list[j].len; } /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg->dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg->hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.port_status = msg->scsi.port_status; io->scsiio.kern_data_resid = msg->scsi.kern_data_resid; if (msg->hdr.status != CTL_STATUS_NONE) { io->io_hdr.status = msg->hdr.status; io->scsiio.scsi_status = msg->scsi.scsi_status; io->scsiio.sense_len = msg->scsi.sense_len; memcpy(&io->scsiio.sense_data, &msg->scsi.sense_data, msg->scsi.sense_len); if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); break; } io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.remote_io = msg->hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, msg); else ctl_isc_handler_finish_ser_only(softc, msg); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(msg, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg->hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg->hdr.nexus; taskio->task_action = msg->task.task_action; taskio->tag_num = msg->task.tag_num; taskio->tag_type = msg->task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbinuptime(&taskio->io_hdr.start_bt); #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; presio->io_hdr.nexus = msg->hdr.nexus; presio->pr_msg = msg->pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_UA: ctl_isc_ua(softc, msg, param); break; case CTL_MSG_PORT_SYNC: ctl_isc_port_sync(softc, msg, param); break; case CTL_MSG_LUN_SYNC: ctl_isc_lun_sync(softc, msg, param); break; case CTL_MSG_IID_SYNC: ctl_isc_iid_sync(softc, msg, param); break; case CTL_MSG_LOGIN: ctl_isc_login(softc, msg, param); break; case CTL_MSG_MODE_SYNC: ctl_isc_mode_sync(softc, msg, param); break; default: printf("Received HA message of unknown type %d\n", msg->hdr.msg_type); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); break; } if (msg != &msgbuf) free(msg, M_CTL); } else if (event == CTL_HA_EVT_LINK_CHANGE) { printf("CTL: HA link status changed from %d to %d\n", softc->ha_link, param); if (param == softc->ha_link) return; if (softc->ha_link == CTL_HA_LINK_ONLINE) { softc->ha_link = param; ctl_isc_ha_link_down(softc); } else { softc->ha_link = param; if (softc->ha_link == CTL_HA_LINK_ONLINE) ctl_isc_ha_link_up(softc); } return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, src->scsi.sense_len); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) { memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, src->scsiio.sense_len); dest->scsi.scsi_status = src->scsiio.scsi_status; dest->scsi.sense_len = src->scsiio.sense_len; dest->hdr.status = src->io_hdr.status; } void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) { int i; mtx_assert(&lun->lun_lock, MA_OWNED); if (lun->pending_ua[port] == NULL) return; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port * CTL_MAX_INIT_PER_PORT + i == except) continue; lun->pending_ua[port][i] |= ua; } } void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) ctl_est_ua_port(lun, i, except, ua); } void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } } static int ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; struct ctl_lun_req ireq; int error, value; value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_unlock(&softc->ctl_lock); bzero(&ireq, sizeof(ireq)); ireq.reqtype = CTL_LUNREQ_MODIFY; ireq.reqdata.modify.lun_id = lun->lun; lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, curthread); if (ireq.status != CTL_LUN_OK) { printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", __func__, ireq.status, ireq.error_str); } mtx_lock(&softc->ctl_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { struct make_dev_args args; struct ctl_softc *softc; int i, error; softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); make_dev_args_init(&args); args.mda_devsw = &ctl_cdevsw; args.mda_uid = UID_ROOT; args.mda_gid = GID_OPERATOR; args.mda_mode = 0600; args.mda_si_drv1 = softc; args.mda_si_drv2 = NULL; error = make_dev_s(&args, &softc->dev, "cam/ctl"); if (error != 0) { free(softc, M_DEVBUF); control_softc = NULL; return (error); } sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->flags = 0; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) { printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n", ctl_max_luns, CTL_DEFAULT_MAX_LUNS); ctl_max_luns = CTL_DEFAULT_MAX_LUNS; } softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns, M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_lun_mask = malloc(sizeof(uint32_t) * ((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) { printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n", ctl_max_ports, CTL_DEFAULT_MAX_PORTS); ctl_max_ports = CTL_DEFAULT_MAX_PORTS; } softc->ctl_port_mask = malloc(sizeof(uint32_t) * ((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO); softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_cnt = ctl_max_ports; softc->port_min = 0; } else { softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES; softc->port_min = (softc->ha_id - 1) * softc->port_cnt; } softc->port_max = softc->port_min + softc->port_cnt; softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, "HA link state (0 - offline, 1 - unknown, 2 - online)"); STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); return (error); } } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, &softc->thresh_thread, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); return (error); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); if (softc->is_single == 0) { if (ctl_frontend_register(&ha_frontend) != 0) softc->is_single = 1; } return (0); } static int ctl_shutdown(void) { struct ctl_softc *softc = control_softc; int i; if (softc->is_single == 0) ctl_frontend_deregister(&ha_frontend); destroy_dev(softc->dev); /* Shutdown CTL threads. */ softc->shutdown = 1; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; while (thr->thread != NULL) { wakeup(thr); if (thr->thread != NULL) pause("CTL thr shutdown", 1); } mtx_destroy(&thr->queue_lock); } while (softc->thresh_thread != NULL) { wakeup(softc->thresh_thread); if (softc->thresh_thread != NULL) pause("CTL thr shutdown", 1); } ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); free(softc->ctl_luns, M_DEVBUF); free(softc->ctl_lun_mask, M_DEVBUF); free(softc->ctl_port_mask, M_DEVBUF); free(softc->ctl_ports, M_DEVBUF); sysctl_ctx_free(&softc->sysctl_ctx); free(softc, M_DEVBUF); control_softc = NULL; return (0); } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (ctl_shutdown()); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; int last; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); last = (--port->wwpn_iid[iid].in_use == 0); port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); if (last) ctl_i_t_nexus_loss(softc, iid, CTL_UA_POWERON); ctl_isc_announce_iid(port, iid); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = port->ctl_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static void ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); union ctl_ha_msg msg_info; struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t targ_lun; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; /* Make sure that we know about this port. */ if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 1); goto badjuju; } /* Make sure that we know about this LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); /* * The other node would not send this request to us unless * received announce that we are primary node for this LUN. * If this LUN does not exist now, it is probably result of * a race, so respond to initiator in the most opaque way. */ ctl_set_busy(ctsio); goto badjuju; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/Os completed. */ if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); ctl_set_busy(ctsio); goto badjuju; } entry = ctl_get_cmd_entry(ctsio, NULL); ctsio->seridx = entry->seridx; if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); goto badjuju; } CTL_LUN(ctsio) = lun; CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); mtx_unlock(&lun->lun_lock); } else { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_WAITOK); } break; case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_OVERLAP: LIST_REMOVE(&ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); goto badjuju; case CTL_ACTION_OVERLAP_TAG: LIST_REMOVE(&ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num); badjuju: ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); msg_info.hdr.original_sc = ctsio->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io((union ctl_io *)ctsio); break; default: __assert_unreachable(); } } /* * Returns 0 for success, errno for failure. */ static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { struct ctl_io_hdr *ioh; mtx_lock(&lun->lun_lock); ioh = LIST_FIRST(&lun->ooa_queue); if (ioh == NULL) { mtx_unlock(&lun->lun_lock); return; } while (LIST_NEXT(ioh, ooa_links) != NULL) ioh = LIST_NEXT(ioh, ooa_links); for ( ; ioh; ioh = LIST_PREV(ioh, &lun->ooa_queue, ctl_io_hdr, ooa_links)) { union ctl_io *io = (union ctl_io *)ioh; struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) { (*cur_fill_num)++; continue; } entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.blocker != NULL) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_QUEUED; if (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) entry->cmd_flags |= CTL_OOACMD_FLAG_STATUS_SENT; (*cur_fill_num)++; } mtx_unlock(&lun->lun_lock); } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc = dev->si_drv1; struct ctl_port *port; struct ctl_lun *lun; int retval; retval = 0; switch (cmd) { case CTL_IO: retval = ctl_ioctl_io(dev, cmd, addr, flag, td); break; case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max) continue; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action == 0) continue; /* * XXX KDM we have to drop the lock here, because * the online/offline operations can potentially * block. We need to reference count the frontends * so they can't go away, */ if (cmd == CTL_ENABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_online(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_DISABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_offline(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_SET_PORT_WWNS) { ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 && (ooa_hdr->lun_num >= ctl_max_luns || softc->ctl_luns[ooa_hdr->lun_num] == NULL)) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbinuptime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if (delay_info->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delay_info->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); if (err_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[err_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); if (delete_desc->lun_id >= ctl_max_luns || (lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) != 0) { mtx_unlock(&lun->lun_lock); continue; } for (j = 0; j < ctl_max_ports; j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %ju port %d iid %d key " "%#jx\n", lun->lun, j, k, (uintmax_t)lun->pr_keys[j][k]); } } mtx_unlock(&lun->lun_lock); } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; lun_req = (struct ctl_lun_req *)addr; tmp_args_nvl = lun_req->args_nvl; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->args != NULL) { packed = malloc(lun_req->args_len, M_CTL, M_WAITOK); if (copyin(lun_req->args, packed, lun_req->args_len) != 0) { free(packed, M_CTL); lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyin args."); break; } lun_req->args_nvl = nvlist_unpack(packed, lun_req->args_len, 0); free(packed, M_CTL); if (lun_req->args_nvl == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot unpack args nvlist."); break; } } else lun_req->args_nvl = nvlist_create(0); retval = backend->ioctl(dev, cmd, addr, flag, td); nvlist_destroy(lun_req->args_nvl); lun_req->args_nvl = tmp_args_nvl; if (lun_req->result_nvl != NULL) { if (lun_req->result != NULL) { packed = nvlist_pack(lun_req->result_nvl, &packed_len); if (packed == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > lun_req->result_len) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, lun_req->result, packed_len)) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } lun_req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(lun_req->result_nvl); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun_list *list; const char *name, *value; void *cookie; int type; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(lun->be_lun->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string( lun->be_lun->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; void *packed; nvlist_t *tmp_args_nvl; size_t packed_len; req = (struct ctl_req *)addr; tmp_args_nvl = req->args_nvl; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->args != NULL) { packed = malloc(req->args_len, M_CTL, M_WAITOK); if (copyin(req->args, packed, req->args_len) != 0) { free(packed, M_CTL); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyin args."); break; } req->args_nvl = nvlist_unpack(packed, req->args_len, 0); free(packed, M_CTL); if (req->args_nvl == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot unpack args nvlist."); break; } } else req->args_nvl = nvlist_create(0); if (fe->ioctl) retval = fe->ioctl(dev, cmd, addr, flag, td); else retval = ENODEV; nvlist_destroy(req->args_nvl); req->args_nvl = tmp_args_nvl; if (req->result_nvl != NULL) { if (req->result != NULL) { packed = nvlist_pack(req->result_nvl, &packed_len); if (packed == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot pack result nvlist."); break; } if (packed_len > req->result_len) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Result nvlist too large."); free(packed, M_NVLIST); break; } if (copyout(packed, req->result, packed_len)) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Cannot copyout() the result."); free(packed, M_NVLIST); break; } req->result_len = packed_len; free(packed, M_NVLIST); } nvlist_destroy(req->result_nvl); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; const char *name, *value; void *cookie; int j, type; uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } cookie = NULL; while ((name = nvlist_next(port->options, &type, &cookie)) != NULL) { sbuf_printf(sb, "\t<%s>", name); if (type == NV_TYPE_STRING) { value = dnvlist_get_string(port->options, name, NULL); if (value != NULL) sbuf_printf(sb, "%s", value); } sbuf_printf(sb, "\n", name); } if (port->lun_map != NULL) { sbuf_printf(sb, "\ton\n"); for (j = 0; j < port->lun_map_size; j++) { plun = ctl_lun_map_from_port(port, j); if (plun == UINT32_MAX) continue; sbuf_printf(sb, "\t%u\n", j, plun); } } for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_LUN_MAP: { struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; struct ctl_port *port; mtx_lock(&softc->ctl_lock); if (lm->port < softc->port_min || lm->port >= softc->port_max || (port = softc->ctl_ports[lm->port]) == NULL) { mtx_unlock(&softc->ctl_lock); return (ENXIO); } if (port->status & CTL_PORT_STATUS_ONLINE) { STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_port(lun, lm->port, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps if (lm->plun != UINT32_MAX) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_unset(port, lm->plun); else if (lm->lun < ctl_max_luns && softc->ctl_luns[lm->lun] != NULL) retval = ctl_lun_map_set(port, lm->plun, lm->lun); else return (ENXIO); } else { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_deinit(port); else retval = ctl_lun_map_init(port); } if (port->status & CTL_PORT_STATUS_ONLINE) ctl_isc_announce_port(port); break; } case CTL_GET_LUN_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { if (lun->lun < stats->first_item) continue; if (stats->fill_len + sizeof(lun->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&lun->stats, &stats->stats[i++], sizeof(lun->stats)); if (retval != 0) break; stats->fill_len += sizeof(lun->stats); } stats->num_items = softc->num_luns; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } case CTL_GET_PORT_STATS: { struct ctl_get_io_stats *stats = (struct ctl_get_io_stats *)addr; int i; /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; stats->status = CTL_SS_OK; stats->fill_len = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < stats->first_item) continue; if (stats->fill_len + sizeof(port->stats) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; break; } retval = copyout(&port->stats, &stats->stats[i++], sizeof(port->stats)); if (retval != 0) break; stats->fill_len += sizeof(port->stats); } stats->num_items = softc->num_ports; stats->flags = CTL_STATS_FLAG_NONE; #ifdef CTL_TIME_IO stats->flags |= CTL_STATS_FLAG_TIME_VALID; #endif getnanouptime(&stats->timestamp); break; } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } int ctl_lun_map_init(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; int size = ctl_lun_map_size; uint32_t i; if (port->lun_map == NULL || port->lun_map_size < size) { port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = malloc(size * sizeof(uint32_t), M_CTL, M_NOWAIT); } if (port->lun_map == NULL) return (ENOMEM); for (i = 0; i < size; i++) port->lun_map[i] = UINT32_MAX; port->lun_map_size = size; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_disable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_deinit(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; if (port->lun_map == NULL) return (0); port->lun_map_size = 0; free(port->lun_map, M_CTL); port->lun_map = NULL; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_enable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) { int status; uint32_t old; if (port->lun_map == NULL) { status = ctl_lun_map_init(port); if (status != 0) return (status); } if (plun >= port->lun_map_size) return (EINVAL); old = port->lun_map[plun]; port->lun_map[plun] = glun; if ((port->status & CTL_PORT_STATUS_ONLINE) && old == UINT32_MAX) { if (port->lun_enable != NULL) port->lun_enable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) { uint32_t old; if (port->lun_map == NULL || plun >= port->lun_map_size) return (0); old = port->lun_map[plun]; port->lun_map[plun] = UINT32_MAX; if ((port->status & CTL_PORT_STATUS_ONLINE) && old != UINT32_MAX) { if (port->lun_disable != NULL) port->lun_disable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) { if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); if (lun_id > port->lun_map_size) return (UINT32_MAX); return (port->lun_map[lun_id]); } uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { uint32_t i; if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < port->lun_map_size; i++) { if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } uint32_t ctl_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result = 0xffffffff; be64enc(lun, encoded); switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = lun[1]; break; case RPL_LUNDATA_ATYP_FLAT: if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case RPL_LUNDATA_ATYP_EXTLUN: switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { case 0x02: switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { case 0x00: result = lun[1]; break; case 0x10: result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; break; case 0x20: if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) result = (lun[2] << 24) + (lun[3] << 16) + (lun[4] << 8) + lun[5]; break; } break; case RPL_LUNDATA_EXT_EAM_NOT_SPEC: result = 0xffffffff; break; } break; } return (result); } uint64_t ctl_encode_lun(uint32_t decoded) { uint64_t l = decoded; if (l <= 0xff) return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); if (l <= 0x3fff) return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); if (l <= 0xffffff) return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | (l << 32)); return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); } int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) { int i; for (i = first; i < last; i++) { if ((mask[i / 32] & (1 << (i % 32))) == 0) return (i); } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; #ifdef IO_POOLS pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ #else pool->zone = ctl_softc->io_zone; #endif *npool = pool; return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); #endif free(pool, M_CTL); } union ctl_io * ctl_alloc_io(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_WAITOK); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; union ctl_io *io; io = uma_zalloc(pool->zone, M_NOWAIT); if (io != NULL) { io->io_hdr.pool = pool_ref; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } return (io); } void ctl_free_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); } void ctl_zero_io(union ctl_io *io) { struct ctl_io_pool *pool; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool; CTL_SOFTC(io) = pool->ctl_softc; TAILQ_INIT(&io->io_hdr.blocked_queue); } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i, page_code; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; page_code = page_index->page_code & SMPH_PC_MASK; switch (page_code) { case SMS_RW_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_VERIFY_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CURRENT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_CHANGEABLE], &verify_er_page_changeable, sizeof(verify_er_page_changeable)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_DEFAULT], &verify_er_page_default, sizeof(verify_er_page_default)); memcpy(&lun->mode_pages.verify_er_page[CTL_PAGE_SAVED], &verify_er_page_default, sizeof(verify_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.verify_er_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "writecache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = dnvlist_get_string(lun->be_lun->options, "readcache", NULL); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: { struct scsi_control_page *control_page; memcpy(&lun->mode_pages.control_page[ CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "reordering", NULL); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_page[ CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case 0x01: memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_DEFAULT], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CHANGEABLE], &control_ext_page_changeable, sizeof(control_ext_page_changeable)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], sizeof(control_ext_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_ext_page; break; default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = dnvlist_get_string(lun->be_lun->options, "avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = dnvlist_get_string(lun->be_lun->options, "used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-avail-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = dnvlist_get_string(lun->be_lun->options, "pool-used-threshold", NULL); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_CDDVD_CAPS_PAGE:{ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[ CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, sizeof(cddvd_page_changeable)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], sizeof(cddvd_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.cddvd_page; break; } default: panic("invalid page code value %#x", page_code); } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = (uint8_t *)&lun->log_pages.temp_page; lun->log_pages.index[2].page_len = sizeof(lun->log_pages.temp_page); lun->log_pages.index[3].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[3].page_len = 12*CTL_NUM_LBP_PARAMS; lun->log_pages.index[4].page_data = (uint8_t *)&lun->log_pages.stat_page; lun->log_pages.index[4].page_len = sizeof(lun->log_pages.stat_page); lun->log_pages.index[5].page_data = (uint8_t *)&lun->log_pages.ie_page; lun->log_pages.index[5].page_len = sizeof(lun->log_pages.ie_page); return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { while (str[i] == '-') /* Skip dashes in UUIDs. */ str++; c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * Add LUN. * * Returns 0 for success, non-zero (errno) for failure. */ int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *ctl_softc = control_softc; struct ctl_lun *nlun, *lun; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *uuid, *vendor, *value; int lun_number; int devidlen, idlen1, idlen2 = 0, len; /* * We support only Direct Access, CD-ROM or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: case T_PROCESSOR: case T_CDROM: break; case T_SEQUENTIAL: case T_CHANGER: default: return (EINVAL); } lun = malloc(sizeof(*lun), M_CTL, M_WAITOK | M_ZERO); lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO); /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = dnvlist_get_string(be_lun->options, "scsiname", NULL); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = dnvlist_get_string(be_lun->options, "eui", NULL); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = dnvlist_get_string(be_lun->options, "naa", NULL); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } uuid = dnvlist_get_string(be_lun->options, "uuid", NULL); if (uuid != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 18; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = dnvlist_get_string(be_lun->options, "vendor", NULL)) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } if (uuid != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_UUID; desc->identifier[0] = 0x10; hex2bin(uuid, &desc->identifier[2], 16); desc->length = 18; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (ctl_max_luns - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (ctl_max_luns - 1)) { printf("ctl: requested LUN ID %d is higher " "than ctl_max_luns - 1 (%d)\n", be_lun->req_lun_id, ctl_max_luns - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } fail: free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN, out of LUNs\n"); goto fail; } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_unlock(&ctl_softc->ctl_lock); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; if (be_lun->flags & CTL_LUN_FLAG_EJECTED) lun->flags |= CTL_LUN_EJECTED; if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) lun->flags |= CTL_LUN_NO_MEDIA; if (be_lun->flags & CTL_LUN_FLAG_STOPPED) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = dnvlist_get_string(be_lun->options, "removable", NULL); if (value != NULL) { if (strcmp(value, "on") == 0) lun->flags |= CTL_LUN_REMOVABLE; } else if (be_lun->lun_type == T_CDROM) lun->flags |= CTL_LUN_REMOVABLE; lun->ctl_softc = ctl_softc; #ifdef CTL_TIME_IO lun->last_busy = getsbinuptime(); #endif LIST_INIT(&lun->ooa_queue); STAILQ_INIT(&lun->error_list); lun->ie_reported = 1; callout_init_mtx(&lun->ie_callout, &lun->lun_lock, 0); ctl_tpc_lun_init(lun); if (lun->flags & CTL_LUN_REMOVABLE) { lun->prevent = malloc((CTL_MAX_INITIATORS + 31) / 32 * 4, M_CTL, M_WAITOK); } /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* Setup statistics gathering */ lun->stats.item = lun_number; /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ mtx_lock(&ctl_softc->ctl_lock); STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; mtx_unlock(&ctl_softc->ctl_lock); /* * We successfully added the LUN, attempt to enable it. */ if (ctl_enable_lun(lun) != 0) { printf("%s: ctl_enable_lun() failed!\n", __func__); mtx_lock(&ctl_softc->ctl_lock); STAILQ_REMOVE(&ctl_softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(ctl_softc->ctl_lun_mask, lun_number); ctl_softc->ctl_luns[lun_number] = NULL; ctl_softc->num_luns--; mtx_unlock(&ctl_softc->ctl_lock); free(lun->lun_devid, M_CTL); free(lun, M_CTL); return (EIO); } return (0); } /* * Free LUN that has no active requests. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; struct ctl_lun *nlun; int i; KASSERT(LIST_EMPTY(&lun->ooa_queue), ("Freeing a LUN %p with outstanding I/O!\n", lun)); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; softc->num_luns--; STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } mtx_unlock(&softc->ctl_lock); /* * Tell the backend to free resources, if this LUN has a backend. */ lun->be_lun->lun_shutdown(lun->be_lun); lun->ie_reportcnt = UINT32_MAX; callout_drain(&lun->ie_callout); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < ctl_max_ports; i++) free(lun->pending_ua[i], M_CTL); free(lun->pending_ua, M_DEVBUF); for (i = 0; i < ctl_max_ports; i++) free(lun->pr_keys[i], M_CTL); free(lun->pr_keys, M_DEVBUF); free(lun->write_buffer, M_CTL); free(lun->prevent, M_CTL); free(lun, M_CTL); return (0); } static int ctl_enable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) != 0, ("%s: LUN not disabled", __func__)); lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_enable == NULL) continue; /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } static int ctl_disable_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_port *port; int retval; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); KASSERT((lun->flags & CTL_LUN_DISABLED) == 0, ("%s: LUN not enabled", __func__)); lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_disable == NULL) continue; /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_disable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_disable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_no_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_NO_MEDIA; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_has_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); if (lun->flags & CTL_LUN_REMOVABLE) ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); mtx_unlock(&lun->lun_lock); if ((lun->flags & CTL_LUN_REMOVABLE) && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } return (0); } int ctl_lun_ejected(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_EJECTED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_primary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_lun_secondary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } /* * Remove LUN. If there are active requests, wait for completion. * * Returns 0 for success, non-zero (errno) for failure. * Completion is reported to backed via the lun_shutdown() method. */ int ctl_remove_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; ctl_disable_lun(lun); mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (LIST_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); mtx_unlock(&lun->lun_lock); if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int ctl_config_move_done(union ctl_io *io, bool samethr) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint32_t residx; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ if (lun->flags & CTL_LUN_PR_RESERVED) { ctl_set_success(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_start_stop_unit *cdb; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); cdb = (struct scsi_start_stop_unit *)ctsio->cdb; if ((cdb->how & SSS_PC_MASK) == 0) { if ((lun->flags & CTL_LUN_PR_RESERVED) && (cdb->how & SSS_START) == 0) { uint32_t residx; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } if ((cdb->how & SSS_LOEJ) && (lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 4, /*bit_valid*/ 1, /*bit*/ 1); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && lun->prevent_count > 0) { /* "Medium removal prevented" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_prevent_allow(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_prevent *cdb; int retval; uint32_t initidx; CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); cdb = (struct scsi_prevent *)ctsio->cdb; if ((lun->flags & CTL_LUN_REMOVABLE) == 0 || lun->prevent == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&lun->lun_lock); if ((cdb->how & PR_PREVENT) && ctl_is_set(lun->prevent, initidx) == 0) { ctl_set_mask(lun->prevent, initidx); lun->prevent_count++; } else if ((cdb->how & PR_PREVENT) == 0 && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t starting_lba; uint32_t block_count; int retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); byte2 = cdb->byte2; break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); byte2 = cdb->byte2; break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio, MAX(starting_lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); goto bailout; } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = starting_lba; lbalen->len = block_count; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); uint64_t buffer_offset; uint32_t len; uint8_t byte2; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); switch (ctsio->cdb[0]) { case READ_BUFFER: { struct scsi_read_buffer *cdb; cdb = (struct scsi_read_buffer *)ctsio->cdb; buffer_offset = scsi_3btoul(cdb->offset); len = scsi_3btoul(cdb->length); byte2 = cdb->byte2; break; } case READ_BUFFER_16: { struct scsi_read_buffer_16 *cdb; cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; buffer_offset = scsi_8btou64(cdb->offset); len = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* This shouldn't happen. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (buffer_offset > CTL_WRITE_BUFFER_SIZE || buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_write_buffer *cdb; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); cdb = (struct scsi_write_buffer *)ctsio->cdb; len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_write_same_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba += lbalen->len; if ((lun->be_lun->maxlba + 1) - lbalen->lba <= UINT32_MAX) { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen->len = (lun->be_lun->maxlba + 1) - lbalen->lba; } CTL_DEBUG_PRINT(("ctl_write_same_cont: calling config_write()\n")); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; const char *val; uint64_t lba, ival; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_write_same\n")); switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* ANCHOR flag can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); if ((lun->be_lun->maxlba + 1) - lba > ival) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ ctsio->cdb[0] == WRITE_SAME_10 ? 7 : 10, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_write_same_cont; num_blocks = 1 << 31; } else num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_unmap\n")); cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK); ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_try_unblock_others(lun, (union ctl_io *)ctsio, FALSE); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_default_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); uint8_t *current_cp; int set_ua; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; current_cp = (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); mtx_lock(&lun->lun_lock); if (memcmp(current_cp, page_ptr, page_index->page_len)) { memcpy(current_cp, page_ptr, page_index->page_len); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (CTL_RETVAL_COMPLETE); } static void ctl_ie_timer(void *arg) { struct ctl_lun *lun = arg; uint64_t t; if (lun->ie_asc == 0) return; if (lun->MODE_IE.mrie == SIEP_MRIE_UA) ctl_est_ua_all(lun, -1, CTL_UA_IE); else lun->ie_reported = 0; if (lun->ie_reportcnt < scsi_4btoul(lun->MODE_IE.report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(lun->MODE_IE.interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_schedule(&lun->ie_callout, t * hz / 10); } } int ctl_ie_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_info_exceptions_page *pg; uint64_t t; (void)ctl_default_page_handler(ctsio, page_index, page_ptr); pg = (struct scsi_info_exceptions_page *)page_ptr; mtx_lock(&lun->lun_lock); if (pg->info_flags & SIEP_FLAGS_TEST) { lun->ie_asc = 0x5d; lun->ie_ascq = 0xff; if (pg->mrie == SIEP_MRIE_UA) { ctl_est_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = 1; } else { ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reported = -1; } lun->ie_reportcnt = 1; if (lun->ie_reportcnt < scsi_4btoul(pg->report_count)) { lun->ie_reportcnt++; t = scsi_4btoul(pg->interval_timer); if (t == 0 || t == UINT32_MAX) t = 3000; /* 5 min */ callout_reset(&lun->ie_callout, t * hz / 10, ctl_ie_timer, lun); } } else { lun->ie_asc = 0; lun->ie_ascq = 0; lun->ie_reported = 1; ctl_clr_ua_all(lun, -1, CTL_UA_IE); lun->ie_reportcnt = UINT32_MAX; callout_stop(&lun->ie_callout); } mtx_unlock(&lun->lun_lock); return (CTL_RETVAL_COMPLETE); } static int ctl_do_mode_select(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int page_len, page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; uint16_t *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if ((page_index->page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((page_index->page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((page_index->page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (page_index->subpage == sph->subpage) { page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((i >= CTL_NUM_MODE_PAGES) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if (page_len != page_index->page_len - page_len_offset - page_len_size) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (*len_left < page_index->page_len) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); union ctl_modepage_info *modepage_info; int bd_len, i, header_size, param_len, rtd; uint32_t initidx; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; rtd = (cdb->byte2 & SMS_RTD) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (rtd) { if (param_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Revert to defaults. */ ctl_init_page_index(lun); mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); modepage_info->header.len_left = param_len - header_size - bd_len; modepage_info->header.len_used = header_size + bd_len; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int pc, page_code, llba, subpage; int alloc_len, page_len, header_len, bd_len, total_len; void *block_desc; struct ctl_page_index *page_index; llba = 0; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) bd_len = 0; else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) { bd_len = 0; } else if (lun->be_lun->lun_type == T_DIRECT) { if (cdb->byte2 & SMS10_LLBAA) { llba = 1; bd_len = sizeof(struct scsi_mode_block_descr_dlong); } else bd_len = sizeof(struct scsi_mode_block_descr_dshort); } else bd_len = sizeof(struct scsi_mode_block_descr); header_len += bd_len; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { u_int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; page_len += page_index->page_len; } break; } default: { u_int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; page_len += page_index->page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } header->block_descr_len = bd_len; block_desc = &header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (llba) header->flags |= SMH_LONGLBA; scsi_ulto2b(bd_len, header->block_descr_len); block_desc = &header[1]; break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (bd_len > 0) { if (lun->be_lun->lun_type == T_DIRECT) { if (llba) { struct scsi_mode_block_descr_dlong *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_u64to8b(lun->be_lun->maxlba + 1, bd->num_blocks); scsi_ulto4b(lun->be_lun->blocksize, bd->block_len); } else { struct scsi_mode_block_descr_dshort *bd = block_desc; if (lun->be_lun->maxlba != 0) scsi_ulto4b(MIN(lun->be_lun->maxlba+1, UINT32_MAX), bd->num_blocks); scsi_ulto3b(lun->be_lun->blocksize, bd->block_len); } } else { struct scsi_mode_block_descr *bd = block_desc; scsi_ulto3b(0, bd->block_len); } } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_temp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_temperature *data; const char *value; data = (struct scsi_log_temperature *)page_index->page_data; scsi_ulto2b(SLP_TEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; data++; scsi_ulto2b(SLP_REFTEMPERATURE, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_temperature) - sizeof(struct scsi_log_param_header); if ((value = dnvlist_get_string(lun->be_lun->options, "reftemperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct stat_page *data; struct bintime *t; data = (struct stat_page *)page_index->page_data; scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); data->sap.hdr.param_control = SLP_LBIN; data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - sizeof(struct scsi_log_param_header); scsi_u64to8b(lun->stats.operations[CTL_STATS_READ], data->sap.read_num); scsi_u64to8b(lun->stats.operations[CTL_STATS_WRITE], data->sap.write_num); if (lun->be_lun->blocksize > 0) { scsi_u64to8b(lun->stats.bytes[CTL_STATS_WRITE] / lun->be_lun->blocksize, data->sap.recvieved_lba); scsi_u64to8b(lun->stats.bytes[CTL_STATS_READ] / lun->be_lun->blocksize, data->sap.transmitted_lba); } t = &lun->stats.time[CTL_STATS_READ]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.read_int); t = &lun->stats.time[CTL_STATS_WRITE]; scsi_u64to8b((uint64_t)t->sec * 1000 + t->frac / (UINT64_MAX / 1000), data->sap.write_int); scsi_u64to8b(0, data->sap.weighted_num); scsi_u64to8b(0, data->sap.weighted_int); scsi_ulto2b(SLP_IT, data->it.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - sizeof(struct scsi_log_param_header); #ifdef CTL_TIME_IO scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); #endif scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - sizeof(struct scsi_log_param_header); scsi_ulto4b(3, data->ti.exponent); scsi_ulto4b(1, data->ti.integer); return (0); } int ctl_ie_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_log_informational_exceptions *data; const char *value; data = (struct scsi_log_informational_exceptions *)page_index->page_data; scsi_ulto2b(SLP_IE_GEN, data->hdr.param_code); data->hdr.param_control = SLP_LBIN; data->hdr.param_len = sizeof(struct scsi_log_informational_exceptions) - sizeof(struct scsi_log_param_header); data->ie_asc = lun->ie_asc; data->ie_ascq = lun->ie_ascq; if ((value = dnvlist_get_string(lun->be_lun->options, "temperature", NULL)) != NULL) data->temperature = strtol(value, NULL, 0); else data->temperature = 0xff; return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) header->page |= SL_DS; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*data), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio, lba); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (retval); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_ident_info(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_ident_info *cdb; struct scsi_report_ident_info_data *rii_ptr; struct scsi_report_ident_info_descr *riid_ptr; const char *oii, *otii; int retval, alloc_len, total_len = 0, len = 0; CTL_DEBUG_PRINT(("ctl_report_ident_info\n")); cdb = (struct scsi_report_ident_info *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_ident_info_data); switch (cdb->type) { case RII_LUII: oii = dnvlist_get_string(lun->be_lun->options, "ident_info", NULL); if (oii) len = strlen(oii); /* Approximately */ break; case RII_LUTII: otii = dnvlist_get_string(lun->be_lun->options, "text_ident_info", NULL); if (otii) len = strlen(otii) + 1; /* NULL-terminated */ break; case RII_IIS: len = 2 * sizeof(struct scsi_report_ident_info_descr); break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 11, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return(retval); } total_len += len; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; rii_ptr = (struct scsi_report_ident_info_data *)ctsio->kern_data_ptr; switch (cdb->type) { case RII_LUII: if (oii) { if (oii[0] == '0' && oii[1] == 'x') len = hex2bin(oii, (uint8_t *)(rii_ptr + 1), len); else strncpy((uint8_t *)(rii_ptr + 1), oii, len); } break; case RII_LUTII: if (otii) strlcpy((uint8_t *)(rii_ptr + 1), otii, len); break; case RII_IIS: riid_ptr = (struct scsi_report_ident_info_descr *)(rii_ptr + 1); riid_ptr->type = RII_LUII; scsi_ulto2b(0xffff, riid_ptr->length); riid_ptr++; riid_ptr->type = RII_LUTII; scsi_ulto2b(0xffff, riid_ptr->length); } scsi_ulto2b(len, rii_ptr->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; int num_ha_groups, num_target_ports, shared_group; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } num_target_ports = 0; shared_group = (softc->is_single != 0); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->status & CTL_PORT_STATUS_HA_SHARED) shared_group = 1; } mtx_unlock(&softc->ctl_lock); num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * (shared_group + num_ha_groups) + sizeof(struct scsi_target_port_descriptor) * num_target_ports; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_min / softc->port_cnt; if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { /* Some shelf is known to be primary. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) os = TPG_ASYMMETRIC_ACCESS_STANDBY; else os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; if (lun->flags & CTL_LUN_PRIMARY_SC) { ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { /* No known primary shelf. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) { ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; } } if (shared_group) { tpg_desc->pref_state = ts; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (!softc->is_single && (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } for (g = 0; g < num_ha_groups; g++) { tpg_desc->pref_state = (g == pg) ? ts : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(2 + g, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < g * softc->port_cnt || port->targ_port >= (g + 1) * softc->port_cnt) continue; if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (port->status & CTL_PORT_STATUS_HA_SHARED) continue; if (ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* FALLTHROUGH */ case RSO_OPTIONS_OC_ASA: total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; case RSO_OPTIONS_OC_ASA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; if (entry->flags & CTL_CMD_FLAG_SA5) { entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } else if (service_action != 0) { one->support = 1; break; } goto fill_one; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_ext_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; if (cdb->options & RST_REPD) total_len = sizeof(struct scsi_report_supported_tmf_ext_data); else total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_supported_tmf_ext_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | RST_TRS; data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; data->length = total_len - 4; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(total_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->pr_generation, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->pr_res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags1 = SPRI_CRH; res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->pr_res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int i; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } } return (0); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->pr_generation++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); int retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct scsi_per_res_out_parms* param; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); cdb = (struct scsi_per_res_out *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++){ if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; } case SPRO_RESERVE: mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); union ctl_ha_msg *msg = (union ctl_ha_msg *)&io->presio.pr_msg; struct ctl_lun *lun; int i; uint32_t residx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } residx = ctl_get_initindex(&msg->hdr.nexus); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->pr_generation++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * If this isn't an exclusive access reservation and NUAR * is not set, generate UA for all other registrants. */ if (lun->pr_res_type != SPR_TYPE_EX_AC && lun->pr_res_type != SPR_TYPE_WR_EX && (lun->MODE_CTRL.queue_flags & SCP_NUAR) == 0) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_2btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->MODE_CACHING.flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); struct ctl_scsiio *ctsio; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->MODE_CACHING.flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio, MAX(lba, lun->be_lun->maxlba + 1)); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun, *request_lun = CTL_LUN(ctsio); struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; int num_filled, num_luns, num_port_luns, retval; uint32_t alloc_len, lun_datalen; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_report_luns *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_report_luns\n")); num_luns = 0; num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns; mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) { if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX) num_luns++; } mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: case RPL_REPORT_NONSUBSID: break; case RPL_REPORT_WELLKNOWN: case RPL_REPORT_ADMIN: case RPL_REPORT_CONGLOM: num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < num_port_luns && num_filled < num_luns; targ_lun_id++) { lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id == UINT32_MAX) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; be64enc(lun_data->luns[num_filled++].lundata, ctl_encode_lun(targ_lun_id)); /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(lun_datalen, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr, *ps; uint32_t initidx; int have_error; u_int sense_len = SSD_FULL_SIZE; scsi_sense_data_type sense_format; ctl_ua_type ua_type; uint8_t asc = 0, ascq = 0; cdb = (struct scsi_request_sense *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL || ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_link < CTL_HA_LINK_UNKNOWN)) { /* "Logical unit not supported" */ ctl_set_sense_data(sense_ptr, &sense_len, NULL, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); goto send; } have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps += initidx % CTL_MAX_INIT_PER_PORT; if (ps != NULL && ps->error_code != 0) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(ps); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) ps, (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) ps, (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, ps, sizeof(*sense_ptr)); ps->error_code = 0; have_error = 1; } else { ua_type = ctl_build_ua(lun, initidx, sense_ptr, &sense_len, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; } if (have_error == 0) { /* * Report informational exception if have one and allowed. */ if (lun->MODE_IE.mrie != SIEP_MRIE_NO) { asc = lun->ie_asc; ascq = lun->ie_ascq; } ctl_set_sense_data(sense_ptr, &sense_len, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ asc, /*ascq*/ ascq, SSD_ELEM_NONE); } mtx_unlock(&lun->lun_lock); send: /* * We report the SCSI status as OK, since the status of the command * itself is OK. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_supported_pages *pages; int sup_page_size; int p; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sup_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; /* SCSI Feature Sets */ pages->page_list[p++] = SVPD_SCSI_SFS; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_unit_serial_number *sn_ptr; int data_len; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_extended_inquiry_data *eid_ptr; int data_len; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLR; /* * We support revert to defaults (RTD) bit in MODE SELECT. */ eid_ptr->flags5 = SVPD_EID_RTD_SUP; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_mode_page_policy *mpp_ptr; int data_len; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; int data_len, g; uint8_t proto; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port && port->port_devid) data_len += port->port_devid->len; if (port && port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port && port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port && port->port_type == CTL_PORT_SAS) proto = SCSI_PROTO_SAS << 4; else if (port && port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port && port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; if (softc->is_single || (port && port->status & CTL_PORT_STATUS_HA_SHARED)) g = 1; else g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; scsi_ulto2b(g, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port && port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; scsi_ulto2b(port->targ_port, pd->relative_port_id); if (port->init_devid) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_sfs(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_sfs *sfs_ptr; int sfs_page_size, n; sfs_page_size = sizeof(*sfs_ptr) + 5 * 2; ctsio->kern_data_ptr = malloc(sfs_page_size, M_CTL, M_WAITOK | M_ZERO); sfs_ptr = (struct scsi_vpd_sfs *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sfs_page_size, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sfs_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sfs_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sfs_ptr->page_code = SVPD_SCSI_SFS; n = 0; /* Discovery 2016 */ scsi_ulto2b(0x0001, &sfs_ptr->codes[2 * n++]); if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* SBC Base 2016 */ scsi_ulto2b(0x0101, &sfs_ptr->codes[2 * n++]); /* SBC Base 2010 */ scsi_ulto2b(0x0102, &sfs_ptr->codes[2 * n++]); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { /* Basic Provisioning 2016 */ scsi_ulto2b(0x0103, &sfs_ptr->codes[2 * n++]); } /* Drive Maintenance 2016 */ //scsi_ulto2b(0x0104, &sfs_ptr->codes[2 * n++]); } scsi_ulto2b(4 + 2 * n, sfs_ptr->page_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_limits *bl_ptr; const char *val; uint64_t ival; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->kern_data_len = min(sizeof(*bl_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_lba_cnt); ival = 0xffffffff; val = dnvlist_get_string(lun->be_lun->options, "unmap_max_descr", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_ulto4b(ival, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); ival = UINT64_MAX; val = dnvlist_get_string(lun->be_lun->options, "write_same_max_lba", NULL); if (val != NULL) ctl_expand_number(val, &ival); scsi_u64to8b(ival, bl_ptr->max_write_same_length); if (lun->be_lun->maxlba + 1 > ival) bl_ptr->flags |= SVPD_BL_WSNZ; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_block_device_characteristics *bdc_ptr; const char *value; u_int i; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*bdc_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "rpm", NULL)) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = dnvlist_get_string(lun->be_lun->options, "formfactor", NULL)) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_RBWZ | SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_vpd_logical_block_prov *lbp_ptr; const char *value; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(sizeof(*lbp_ptr), alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; value = dnvlist_get_string(lun->be_lun->options, "provisioning_type", NULL); if (value != NULL) { if (strcmp(value, "resource") == 0) lbp_ptr->prov_type = SVPD_LBP_RESOURCE; else if (strcmp(value, "thin") == 0) lbp_ptr->prov_type = SVPD_LBP_THIN; } else lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry *cdb; int alloc_len, retval; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_SCSI_SFS: retval = ctl_inquiry_evpd_sfs(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_port *port = CTL_PORT(ctsio); struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; const char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; port_type = port->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; if (lun != NULL) { if ((lun->flags & CTL_LUN_PRIMARY_SC) || softc->ha_link >= CTL_HA_LINK_UNKNOWN) { inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; } else { inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | lun->be_lun->lun_type; } if (lun->flags & CTL_LUN_REMOVABLE) inq_ptr->dev_qual2 |= SID_RMB; } else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC5; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The response format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; inq_ptr->spc2_flags |= SPC2_SID_MultiP; inq_ptr->flags = SID_CmdQue; if (port_type == CTL_PORT_SCSI) inq_ptr->flags |= SID_WBus16 | SID_Sync; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "vendor", NULL)) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = dnvlist_get_string(lun->be_lun->options, "product", NULL)) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; case T_CDROM: strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = dnvlist_get_string(lun->be_lun->options, "revision", NULL)) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-6 (no version claimed) */ scsi_ulto2b(0x00C0, inq_ptr->version1); /* SPC-5 (no version claimed) */ scsi_ulto2b(0x05C0, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } else if (port_type == CTL_PORT_UMASS) { /* USB Mass Storage Class Bulk-Only Transport, Revision 1.0 */ scsi_ulto2b(0x1730, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: break; case T_CDROM: /* MMC-6 (no version claimed) */ scsi_ulto2b(0x04E0, inq_ptr->version4); break; default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } int ctl_get_config(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_get_config_header *hdr; struct scsi_get_config_feature *feature; struct scsi_get_config *cdb; uint32_t alloc_len, data_len; int rt, starting; cdb = (struct scsi_get_config *)ctsio->cdb; rt = (cdb->rt & SGC_RT_MASK); starting = scsi_2btoul(cdb->starting_feature); alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_config_header) + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; if (lun->flags & CTL_LUN_NO_MEDIA) scsi_ulto2b(0x0000, hdr->current_profile); else scsi_ulto2b(0x0010, hdr->current_profile); feature = (struct scsi_get_config_feature *)(hdr + 1); if (starting > 0x003b) goto done; if (starting > 0x003a) goto f3b; if (starting > 0x002b) goto f3a; if (starting > 0x002a) goto f2b; if (starting > 0x001f) goto f2a; if (starting > 0x001e) goto f1f; if (starting > 0x001d) goto f1e; if (starting > 0x0010) goto f1d; if (starting > 0x0003) goto f10; if (starting > 0x0002) goto f3; if (starting > 0x0001) goto f2; if (starting > 0x0000) goto f1; /* Profile List */ scsi_ulto2b(0x0000, feature->feature_code); feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ feature->feature_data[2] = 0x00; scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ feature->feature_data[6] = 0x01; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1: /* Core */ scsi_ulto2b(0x0001, feature->feature_code); feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(0x00000000, &feature->feature_data[0]); feature->feature_data[4] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2: /* Morphing */ scsi_ulto2b(0x0002, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x02; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3: /* Removable Medium */ scsi_ulto2b(0x0003, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x39; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) goto done; f10: /* Random Read */ scsi_ulto2b(0x0010, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); scsi_ulto2b(1, &feature->feature_data[4]); feature->feature_data[6] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1d: /* Multi-Read */ scsi_ulto2b(0x001D, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 0; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1e: /* CD Read */ scsi_ulto2b(0x001E, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1f: /* DVD Read */ scsi_ulto2b(0x001F, feature->feature_code); feature->flags = 0x08; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x01; feature->feature_data[2] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2a: /* DVD+RW */ scsi_ulto2b(0x002A, feature->feature_code); feature->flags = 0x04; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2b: /* DVD+R */ scsi_ulto2b(0x002B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3a: /* DVD+RW Dual Layer */ scsi_ulto2b(0x003A, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3b: /* DVD+R Dual Layer */ scsi_ulto2b(0x003B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; done: data_len = (uint8_t *)feature - (uint8_t *)hdr; if (rt == SGC_RT_SPECIFIC && data_len > 4) { feature = (struct scsi_get_config_feature *)(hdr + 1); if (scsi_2btoul(feature->feature_code) == starting) feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; data_len = (uint8_t *)feature - (uint8_t *)hdr; } scsi_ulto4b(data_len - 4, hdr->data_length); ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_event_status(struct ctl_scsiio *ctsio) { struct scsi_get_event_status_header *hdr; struct scsi_get_event_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_get_event_status *)ctsio->cdb; if ((cdb->byte2 & SGESN_POLLED) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; scsi_ulto2b(0, hdr->descr_length); hdr->nea_class = SGESN_NEA; hdr->supported_class = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_mechanism_status(struct ctl_scsiio *ctsio) { struct scsi_mechanism_status_header *hdr; struct scsi_mechanism_status *cdb; uint32_t alloc_len, data_len; cdb = (struct scsi_mechanism_status *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; hdr->state1 = 0x00; hdr->state2 = 0xe0; scsi_ulto3b(0, hdr->lba); hdr->slots_num = 0; scsi_ulto2b(0, hdr->slots_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_ultomsf(uint32_t lba, uint8_t *buf) { lba += 150; buf[0] = 0; buf[1] = bin2bcd((lba / 75) / 60); buf[2] = bin2bcd((lba / 75) % 60); buf[3] = bin2bcd(lba % 75); } int ctl_read_toc(struct ctl_scsiio *ctsio) { struct ctl_lun *lun = CTL_LUN(ctsio); struct scsi_read_toc_hdr *hdr; struct scsi_read_toc_type01_descr *descr; struct scsi_read_toc *cdb; uint32_t alloc_len, data_len; int format, msf; cdb = (struct scsi_read_toc *)ctsio->cdb; msf = (cdb->byte2 & CD_MSF) != 0; format = cdb->format; alloc_len = scsi_2btoul(cdb->data_len); data_len = sizeof(struct scsi_read_toc_hdr); if (format == 0) data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); else data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_rel_offset = 0; ctsio->kern_data_len = min(data_len, alloc_len); ctsio->kern_total_len = ctsio->kern_data_len; hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; if (format == 0) { scsi_ulto2b(0x12, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); descr++; descr->addr_ctl = 0x14; descr->track_number = 0xaa; if (msf) ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); else scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); } else { scsi_ulto2b(0x0a, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: *lba = 0; *len = UINT64_MAX; return (1); } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); /* If not UNMAP -- go other way. */ if (io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_SKIP); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; retval = ctl_get_lba_len(io2, &lba2, &len2); KASSERT(retval == 0, ("ctl_get_lba_len() error")); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_SKIP) return (retval); retval = ctl_get_lba_len(io1, &lba1, &len1); KASSERT(retval == 0, ("ctl_get_lba_len() error")); if (seq && (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE)) seq = FALSE; return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_seq_check(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) return (CTL_ACTION_PASS); retval = ctl_get_lba_len(io1, &lba1, &len1); KASSERT(retval == 0, ("ctl_get_lba_len() error")); retval = ctl_get_lba_len(io2, &lba2, &len2); KASSERT(retval == 0, ("ctl_get_lba_len() error")); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, const uint8_t *serialize_row, union ctl_io *ooa_io) { /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if (__predict_true(pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && __predict_true(ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && __predict_false(pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (__predict_false(pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) return (CTL_ACTION_PASS); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (__predict_false(ooa_io->scsiio.tag_type == CTL_TAG_ORDERED) || __predict_false(ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) return (CTL_ACTION_BLOCK); /* Unsupported command in OOA queue. */ if (__predict_false(ooa_io->scsiio.seridx == CTL_SERIDX_INVLD)) return (CTL_ACTION_PASS); switch (serialize_row[ooa_io->scsiio.seridx]) { case CTL_SER_SEQ: if (lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_seq_check(ooa_io, pending_io)); /* FALLTHROUGH */ case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_EXTENTOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_PASS); /* FALLTHROUGH */ case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_BLOCKOPT: if ((lun->MODE_CTRL.queue_flags & SCP_QUEUE_ALG_MASK) == SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_PASS); /* FALLTHROUGH */ case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); default: __assert_unreachable(); } } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io **starting_io) { union ctl_io *ooa_io = *starting_io; const uint8_t *serialize_row; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Aborted commands are not going to be executed and may even * not report completion, so we don't care about their order. * Let them complete ASAP to clean the OOA queue. */ if (__predict_false(pending_io->io_hdr.flags & CTL_FLAG_ABORT)) return (CTL_ACTION_SKIP); /* * Ordered tags have to block until all items ahead of them have * completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if ((pending_io->scsiio.tag_type == CTL_TAG_ORDERED) && (ooa_io != NULL)) return (CTL_ACTION_BLOCK); serialize_row = ctl_serialize_table[pending_io->scsiio.seridx]; /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (; ooa_io != NULL; ooa_io = (union ctl_io *)LIST_NEXT(&ooa_io->io_hdr, ooa_links)) { action = ctl_check_for_blockage(lun, pending_io, serialize_row, ooa_io); if (action != CTL_ACTION_PASS) { *starting_io = ooa_io; return (action); } } *starting_io = NULL; return (CTL_ACTION_PASS); } /* * Try to unblock the specified I/O. * * skip parameter allows explicitly skip present blocker of the I/O, * starting from the previous one on OOA queue. It can be used when * we know for sure that the blocker I/O does no longer count. */ static void ctl_try_unblock_io(struct ctl_lun *lun, union ctl_io *io, bool skip) { struct ctl_softc *softc = lun->ctl_softc; union ctl_io *bio, *obio; const struct ctl_cmd_entry *entry; union ctl_ha_msg msg_info; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); if (io->io_hdr.blocker == NULL) return; obio = bio = io->io_hdr.blocker; if (skip) bio = (union ctl_io *)LIST_NEXT(&bio->io_hdr, ooa_links); action = ctl_check_ooa(lun, io, &bio); if (action == CTL_ACTION_BLOCK) { /* Still blocked, but may be by different I/O now. */ if (bio != obio) { TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = bio; } return; } /* No longer blocked, one way or another. */ TAILQ_REMOVE(&obio->io_hdr.blocked_queue, &io->io_hdr, blocked_links); io->io_hdr.blocker = NULL; switch (action) { case CTL_ACTION_PASS: case CTL_ACTION_SKIP: /* Serializing commands from the other SC retire there. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = io; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_NOWAIT); break; } /* * Check this I/O for LUN state changes that may have happened * while this command was blocked. The LUN state may have been * changed by a command ahead of us in the queue. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(io); break; default: __assert_unreachable(); case CTL_ACTION_OVERLAP: ctl_set_overlapped_cmd(&io->scsiio); goto error; case CTL_ACTION_OVERLAP_TAG: ctl_set_overlapped_tag(&io->scsiio, io->scsiio.tag_num & 0xff); error: /* Serializing commands from the other SC are done here. */ if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) && (softc->ha_mode != CTL_HA_MODE_XFER)) { ctl_try_unblock_others(lun, io, TRUE); LIST_REMOVE(&io->io_hdr, ooa_links); ctl_copy_sense_data_back(io, &msg_info); msg_info.hdr.original_sc = io->io_hdr.remote_io; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); ctl_free_io(io); break; } ctl_done(io); break; } } /* * Try to unblock I/Os blocked by the specified I/O. * * skip parameter allows explicitly skip the specified I/O as blocker, * starting from the previous one on the OOA queue. It can be used when * we know for sure that the specified I/O does no longer count (done). * It has to be still on OOA queue though so that we know where to start. */ static void ctl_try_unblock_others(struct ctl_lun *lun, union ctl_io *bio, bool skip) { union ctl_io *io, *next_io; mtx_assert(&lun->lun_lock, MA_OWNED); for (io = (union ctl_io *)TAILQ_FIRST(&bio->io_hdr.blocked_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, blocked_links); KASSERT(io->io_hdr.blocker != NULL, ("I/O %p on blocked list without blocker", io)); ctl_try_unblock_io(lun, io, skip); } KASSERT(!skip || TAILQ_EMPTY(&bio->io_hdr.blocked_queue), ("blocked_queue is not empty after skipping %p", bio)); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we may have to * reject some commands disallowed by HA mode and link state. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { if (softc->ha_link == CTL_HA_LINK_OFFLINE && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_unavail(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_transit(ctsio); retval = 1; goto bailout; } if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } /* The rest of checks are only done on executing side */ if (softc->ha_mode == CTL_HA_MODE_XFER) goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { ctl_set_hw_write_protected(ctsio); retval = 1; goto bailout; } if ((lun->MODE_CTRL.eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->pr_res_type == SPR_TYPE_WR_EX || lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { if (lun->flags & CTL_LUN_EJECTED) ctl_set_lun_ejected(ctsio); else if (lun->flags & CTL_LUN_NO_MEDIA) { if (lun->flags & CTL_LUN_REMOVABLE) ctl_set_lun_no_media(ctsio); else ctl_set_lun_int_reqd(ctsio); } else if (lun->flags & CTL_LUN_STOPPED) ctl_set_lun_stopped(ctsio); else goto bailout; retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover_lun(union ctl_io *rio) { struct ctl_softc *softc = CTL_SOFTC(rio); struct ctl_lun *lun; struct ctl_io_hdr *io, *next_io; uint32_t targ_lun; targ_lun = rio->io_hdr.nexus.targ_mapped_lun; CTL_DEBUG_PRINT(("FAILOVER for lun %u\n", targ_lun)); /* Find and lock the LUN. */ mtx_lock(&softc->ctl_lock); if (targ_lun > ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } if (softc->ha_mode == CTL_HA_MODE_XFER) { LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_ABORT | CTL_FLAG_FAILOVER; ctl_try_unblock_io(lun, (union ctl_io *)io, FALSE); } else { /* This can be only due to DATAMOVE */ io->msg_type = CTL_MSG_DATAMOVE_DONE; io->flags &= ~CTL_FLAG_DMA_INPROG; io->flags |= CTL_FLAG_IO_ACTIVE; io->port_status = 31340; ctl_enqueue_isc((union ctl_io *)io); } } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } else { /* SERIALIZE modes */ LIST_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->blocker != NULL) { TAILQ_REMOVE(&io->blocker->io_hdr.blocked_queue, io, blocked_links); io->blocker = NULL; } ctl_try_unblock_others(lun, (union ctl_io *)io, TRUE); LIST_REMOVE(io, ooa_links); ctl_free_io((union ctl_io *)io); } else /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } mtx_unlock(&lun->lun_lock); } static void ctl_scsiio_precheck(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = CTL_SOFTC(ctsio); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; union ctl_io *bio; uint32_t initidx, targ_lun; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if (targ_lun < ctl_max_luns) lun = softc->ctl_luns[targ_lun]; if (lun) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; } } CTL_LUN(ctsio) = lun; if (lun) { CTL_BACKEND_LUN(ctsio) = lun->be_lun; /* * Every I/O goes into the OOA queue for a particular LUN, * and stays there until completion. */ #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif LIST_INSERT_HEAD(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return; } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return; } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return; } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return; } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if (ctsio->cdb[0] != REQUEST_SENSE) { struct scsi_sense_data *ps; ps = lun->pending_sense[initidx / CTL_MAX_INIT_PER_PORT]; if (ps != NULL) ps[initidx % CTL_MAX_INIT_PER_PORT].error_code = 0; } /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; u_int sense_len = 0; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, &sense_len, SSD_TYPE_NONE); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = sense_len; ctl_done((union ctl_io *)ctsio); return; } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return; } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); msg_info.scsi.cdb_len = ctsio->cdb_len; msg_info.scsi.priority = ctsio->priority; if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { ctl_set_busy(ctsio); ctl_done((union ctl_io *)ctsio); return; } return; } bio = (union ctl_io *)LIST_NEXT(&ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, &bio)) { case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_BLOCK: ctsio->io_hdr.blocker = bio; TAILQ_INSERT_TAIL(&bio->io_hdr.blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; default: __assert_unreachable(); } } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); ctsio->seridx = entry->seridx; if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_DIRECT: if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) return (0); break; case T_PROCESSOR: if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) return (0); break; case T_CDROM: if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } static int ctl_target_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; uint32_t initidx; ctl_ua_type ua_type; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = io->taskio.task_action; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); if (io->taskio.task_action == CTL_TASK_TARGET_RESET) ua_type = CTL_UA_TARG_RESET; else ua_type = CTL_UA_BUS_RESET; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) == UINT32_MAX) continue; ctl_do_lun_reset(lun, initidx, ua_type); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static void ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_io_hdr *xioh; int i; mtx_lock(&lun->lun_lock); /* Abort tasks. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { xioh->flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; ctl_try_unblock_io(lun, (union ctl_io *)xioh, FALSE); } /* Clear CA. */ for (i = 0; i < ctl_max_ports; i++) { free(lun->pending_sense[i], M_CTL); lun->pending_sense[i] = NULL; } /* Clear reservation. */ lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent) { for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->prevent, i); lun->prevent_count = 0; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, -1); /* Establish UA. */ #if 0 ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif mtx_unlock(&lun->lun_lock); } static int ctl_lun_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } ctl_do_lun_reset(lun, initidx, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { union ctl_ha_msg msg_info; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } return (0); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { struct ctl_io_hdr *xioh; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((targ_port == UINT32_MAX || targ_port == xioh->nexus.targ_port) && (init_id == UINT32_MAX || init_id == xioh->nexus.initid)) { if (targ_port != xioh->nexus.targ_port || init_id != xioh->nexus.initid) xioh->flags |= CTL_FLAG_ABORT_STATUS; xioh->flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xioh->nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_i_t_nexus_loss(struct ctl_softc *softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t p, i; p = initidx / CTL_MAX_INIT_PER_PORT; i = initidx % CTL_MAX_INIT_PER_PORT; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); /* Abort tasks. */ ctl_abort_tasks_lun(lun, p, i, 1); /* Clear CA. */ ps = lun->pending_sense[p]; if (ps != NULL) ps[i].error_code = 0; /* Clear reservation. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) lun->flags &= ~CTL_LUN_RESERVED; /* Clear prevent media removal. */ if (lun->prevent && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } /* Clear TPC status */ ctl_tpc_lun_clear(lun, initidx); /* Establish UA. */ ctl_est_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); uint32_t initidx; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_i_t_nexus_loss(softc, initidx, CTL_UA_I_T_NEXUS_LOSS); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_abort_task(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_io_hdr *xioh; struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) || (xioh->nexus.initid != io->io_hdr.nexus.initid) || (xioh->flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) { #else /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { #endif xioh->flags |= CTL_FLAG_ABORT; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } ctl_try_unblock_io(lun, xio, FALSE); } } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_task(union ctl_io *io, int task_set) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_io_hdr *xioh; struct ctl_lun *lun; int found = 0; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); LIST_FOREACH(xioh, &lun->ooa_queue, ooa_links) { union ctl_io *xio = (union ctl_io *)xioh; if ((xioh->nexus.targ_port != io->io_hdr.nexus.targ_port) || (xioh->nexus.initid != io->io_hdr.nexus.initid) || (xioh->flags & CTL_FLAG_ABORT)) continue; if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { found = 1; break; } } mtx_unlock(&lun->lun_lock); if (found) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_async_event(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; ctl_ua_type ua; uint32_t targ_lun, initidx; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); mtx_unlock(&lun->lun_lock); if (ua != CTL_UA_NONE) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_run_task(union ctl_io *io) { int retval = 1; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: retval = ctl_lun_reset(io); break; case CTL_TASK_TARGET_RESET: case CTL_TASK_BUS_RESET: retval = ctl_target_reset(io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; case CTL_TASK_QUERY_TASK: retval = ctl_query_task(io, 0); break; case CTL_TASK_QUERY_TASK_SET: retval = ctl_query_task(io, 1); break; case CTL_TASK_QUERY_ASYNC_EVENT: retval = ctl_query_async_event(io); break; default: printf("%s: got unknown task management event %d\n", __func__, io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */ entry = ctl_get_cmd_entry(&io->scsiio, NULL); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_done(io); break; } mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, &io->scsiio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctl_done(io); break; } if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { ctl_free_io(io); break; } mtx_lock(&lun->lun_lock); ctl_try_unblock_others(lun, io, TRUE); LIST_REMOVE(&io->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_free_io(io); break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc(io); ctl_free_io(io); break; case CTL_MSG_BAD_JUJU: ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ ctl_datamove_done(io, false); break; case CTL_MSG_FAILOVER: ctl_failover_lun(io); ctl_free_io(io); break; default: printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); ctl_free_io(io); break; } } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio, (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_OUT); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ static void ctl_datamove_done_process(union ctl_io *io) { #ifdef CTL_TIME_IO struct bintime cur_bt; #endif KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } else if (io->scsiio.kern_data_resid != 0 && (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { ctl_set_invalid_field_ciu(&io->scsiio); } else if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); } void ctl_datamove_done(union ctl_io *io, bool samethr) { ctl_datamove_done_process(io); io->scsiio.be_move_done(io, samethr); } void ctl_datamove(union ctl_io *io) { void (*fe_datamove)(union ctl_io *io); mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); /* No data transferred yet. Frontend must update this when done. */ io->scsiio.kern_data_resid = io->scsiio.kern_data_len; #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; ctl_datamove_done_process(io); io->scsiio.be_move_done(io, true); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { ctl_datamove_done_process(io); io->scsiio.be_move_done(io, true); return; } fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.kern_data_resid = io->scsiio.kern_data_resid; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.port_status = io->io_hdr.port_status; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; uint32_t i; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int ctl_datamove_remote_dm_write_cb(union ctl_io *io, bool samethr) { int retval; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_dm_read_cb(union ctl_io *io, bool samethr) { uint32_t i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d\n", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)CTL_LSGL(io); /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = CTL_PORT(io)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist; uint32_t len_to_go; int retval; int i; retval = 0; local_sglist = CTL_LSGL(io); len_to_go = io->scsiio.kern_data_len; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; len_to_go > 0; i++) { local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); local_sglist[i].addr = malloc(local_sglist[i].len, M_CTL, M_WAITOK); len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The original * number of S/G entries is available in rem_sg_entries. */ io->scsiio.kern_sg_entries = i; return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; uint32_t local_used, remote_used, total_used; int i, j, isc_ret; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (1); } local_sglist = CTL_LSGL(io); remote_sglist = CTL_RSGL(io); local_used = 0; remote_used = 0; total_used = 0; /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ isc_ret = CTL_HA_STATUS_SUCCESS; for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { uint32_t cur_len; uint8_t *tmp_ptr; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); rq->size = cur_len; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; #if 0 /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ rq->local = vtophys(tmp_ptr); } else rq->local = tmp_ptr; #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); rq->local = tmp_ptr; #endif tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; isc_ret = ctl_dt_single(rq); if (isc_ret > CTL_HA_STATUS_SUCCESS) break; } if (isc_ret != CTL_HA_STATUS_WAIT) { rq->ret = isc_ret; callback(rq); } return (0); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; uint32_t i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if (retval != 0) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(CTL_LSGLT(io)[i].addr, M_CTL); free(CTL_RSGL(io), M_CTL); CTL_RSGL(io) = NULL; CTL_LSGL(io) = NULL; } } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { mtx_assert(&((struct ctl_softc *)CTL_SOFTC(io))->ctl_lock, MA_NOTOWNED); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 0); return; } /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) ctl_datamove_remote_write(io); else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ctl_datamove_remote_read(io); else { io->io_hdr.port_status = 31339; ctl_send_datamove_done(io, /*have_lock*/ 0); } } static void ctl_process_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun = CTL_LUN(io); void (*fe_done)(union ctl_io *io); union ctl_ha_msg msg; CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = port->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x/%d, Prio: %d\n", io->scsiio.tag_num, io->scsiio.tag_type, io->scsiio.priority); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task Action: %d Tag: 0x%04x/%d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_INFO) ctl_io_error_print(io, NULL); fe_done(io); return; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any informational exception and status * of this command can be modified to report it in form of either * RECOVERED ERROR or NO SENSE, depending on MRIE mode page field. */ if (lun->ie_reported == 0 && lun->ie_asc != 0 && io->io_hdr.status == CTL_SUCCESS && (io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0) { uint8_t mrie = lun->MODE_IE.mrie; uint8_t per = ((lun->MODE_RWER.byte3 & SMS_RWER_PER) || (lun->MODE_VER.byte3 & SMS_VER_PER)); if (((mrie == SIEP_MRIE_REC_COND && per) || mrie == SIEP_MRIE_REC_UNCOND || mrie == SIEP_MRIE_NO_SENSE) && (ctl_get_cmd_entry(&io->scsiio, NULL)->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ (mrie == SIEP_MRIE_NO_SENSE) ? SSD_KEY_NO_SENSE : SSD_KEY_RECOVERED_ERROR, /*asc*/ lun->ie_asc, /*ascq*/ lun->ie_ascq, SSD_ELEM_NONE); lun->ie_reported = 1; } } else if (lun->ie_reported < 0) lun->ie_reported = 0; /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if (!STAILQ_EMPTY(&lun->error_list) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { int type; #ifdef CTL_TIME_IO struct bintime bt; getbinuptime(&bt); bintime_sub(&bt, &io->io_hdr.start_bt); #endif if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; lun->stats.bytes[type] += io->scsiio.kern_total_len; lun->stats.operations[type] ++; lun->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&lun->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&lun->stats.time[type], &bt); #endif mtx_lock(&port->port_lock); port->stats.bytes[type] += io->scsiio.kern_total_len; port->stats.operations[type] ++; port->stats.dmas[type] += io->io_hdr.num_dmas; #ifdef CTL_TIME_IO bintime_add(&port->stats.dma_time[type], &io->io_hdr.dma_bt); bintime_add(&port->stats.time[type], &bt); #endif mtx_unlock(&port->port_lock); } /* * Run through the blocked queue of this I/O and see if anything * can be unblocked, now that this I/O is done and will be removed. * We need to do it before removal to have OOA position to start. */ ctl_try_unblock_others(lun, io, TRUE); /* * Remove this from the OOA queue. */ LIST_REMOVE(&io->io_hdr, ooa_links); #ifdef CTL_TIME_IO if (LIST_EMPTY(&lun->ooa_queue)) lun->last_busy = getsbinuptime(); #endif /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && LIST_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); ctl_free_lun(lun); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && (ctl_debug & CTL_DEBUG_INFO) != 0) ctl_io_error_print(io, NULL); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. */ if ((softc->ha_mode != CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.serializing_sc = io->io_hdr.remote_io; msg.hdr.nexus = io->io_hdr.nexus; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), M_WAITOK); } fe_done(io); } /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_port *port = CTL_PORT(io); struct ctl_lun *lun; struct scsi_sense_data *ps; uint32_t initidx, p, targ_lun; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. * If we don't have a LUN for this, just toss the sense information. */ mtx_lock(&softc->ctl_lock); if (targ_lun >= ctl_max_luns || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); goto bailout; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); p = initidx / CTL_MAX_INIT_PER_PORT; if (lun->pending_sense[p] == NULL) { lun->pending_sense[p] = malloc(sizeof(*ps) * CTL_MAX_INIT_PER_PORT, M_CTL, M_NOWAIT | M_ZERO); } if ((ps = lun->pending_sense[p]) != NULL) { ps += initidx % CTL_MAX_INIT_PER_PORT; memset(ps, 0, sizeof(*ps)); memcpy(ps, &io->scsiio.sense_data, io->scsiio.sense_len); } mtx_unlock(&lun->lun_lock); bailout: ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } int ctl_run(union ctl_io *io) { struct ctl_port *port = CTL_PORT(io); CTL_DEBUG_PRINT(("ctl_run cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_scsiio_precheck(&io->scsiio); break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_run_task(io); break; default: printf("ctl_run: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_serseq_done(union ctl_io *io) { struct ctl_lun *lun = CTL_LUN(io); - if (lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) - return; - /* This is racy, but should not be a problem. */ if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) { mtx_lock(&lun->lun_lock); io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; ctl_try_unblock_others(lun, io, FALSE); mtx_unlock(&lun->lun_lock); } else io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; } void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%u:%u:%u tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun = CTL_LUN(io); if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - incoming queue * - RtR queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(&io->scsiio); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP, "-", 0); } thr->thread = NULL; kthread_exit(); } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct ctl_logical_block_provisioning_page *page; const char *attr; union ctl_ha_msg msg; uint64_t thres, val; int i, e, set; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); thread_lock(curthread); sched_prio(curthread, PUSER - 1); thread_unlock(curthread); while (!softc->shutdown) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_NO_MEDIA) || lun->backend->lun_attr == NULL) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_mode == CTL_HA_MODE_XFER) continue; if ((lun->MODE_RWER.byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->MODE_LBP; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr(lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e = (val >= thres); else e = (val <= thres); if (e) break; } mtx_lock(&lun->lun_lock); if (e) { scsi_u64to8b((uint8_t *)&page->descr[i] - (uint8_t *)page, lun->ua_tpt_info); if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = 1; } else set = 0; } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = -1; } mtx_unlock(&lun->lun_lock); if (set != 0 && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = (set > 0); msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); mtx_unlock(&softc->ctl_lock); // XXX ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); mtx_lock(&softc->ctl_lock); } } mtx_sleep(&softc->thresh_thread, &softc->ctl_lock, PDROP, "-", CTL_LBP_PERIOD * hz); } softc->thresh_thread = NULL; kthread_exit(); } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = CTL_SOFTC(io); struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl_backend.h b/sys/cam/ctl/ctl_backend.h index 05e65abe41f8..fe4e7f5df1d0 100644 --- a/sys/cam/ctl/ctl_backend.h +++ b/sys/cam/ctl/ctl_backend.h @@ -1,250 +1,251 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend.h#2 $ * $FreeBSD$ */ /* * CTL backend driver definitions * * Author: Ken Merry */ #ifndef _CTL_BACKEND_H_ #define _CTL_BACKEND_H_ #include #include typedef enum { CTL_LUN_SERSEQ_OFF, + CTL_LUN_SERSEQ_SOFT, CTL_LUN_SERSEQ_READ, CTL_LUN_SERSEQ_ON } ctl_lun_serseq; #ifdef _KERNEL #define CTL_BACKEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ return (ctl_backend_register( \ (struct ctl_backend_driver *)data)); \ break; \ case MOD_UNLOAD: \ return (ctl_backend_deregister( \ (struct ctl_backend_driver *)data)); \ break; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) struct ctl_be_lun; typedef void (*be_callback_t)(struct ctl_be_lun *be_lun); /* * The lun_type field is the SCSI device type of this particular LUN. In * general, this should be T_DIRECT, although backends will want to create * a processor LUN, typically at LUN 0. See scsi_all.h for the defines for * the various SCSI device types. * * The flags are described above. * * The be_lun field is the backend driver's own context that will get * passsed back so that it can tell which LUN CTL is referencing. * * maxlba is the maximum accessible LBA on the LUN. Note that this is * different from the capacity of the array. capacity = maxlba + 1 * * blocksize is the size, in bytes, of each LBA on the LUN. In general * this should be 512. In theory CTL should be able to handle other block * sizes. Host application software may not deal with it very well, though. * * pblockexp is the log2() of number of LBAs on the LUN per physical sector. * * pblockoff is the lowest LBA on the LUN aligned to physical sector. * * ublockexp is the log2() of number of LBAs on the LUN per UNMAP block. * * ublockoff is the lowest LBA on the LUN aligned to UNMAP block. * * atomicblock is the number of blocks that can be written atomically. * * opttxferlen is the number of blocks that can be written in one operation. * * req_lun_id is the requested LUN ID. CTL only pays attention to this * field if the CTL_LUN_FLAG_ID_REQ flag is set. If the requested LUN ID is * not available, the LUN addition will fail. If a particular LUN ID isn't * requested, the first available LUN ID will be allocated. * * serial_num is the device serial number returned in the SCSI INQUIRY VPD * page 0x80. This should be a unique, per-shelf value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded out with ASCII spaces. This field should NOT be NULL * terminated. * * device_id is the T10 device identifier returned in the SCSI INQUIRY VPD * page 0x83. This should be a unique, per-LUN value. The data inside * this field should be ASCII only, left aligned, and any unused space * should be padded with ASCII spaces. This field should NOT be NULL * terminated. * * The lun_shutdown() method is the callback for the ctl_remove_lun() * call. It is called when all outstanding I/O for that LUN has been * completed and CTL has deleted the resources for that LUN. When the CTL * backend gets this call, it can safely free its per-LUN resources. * * The be field is a pointer to the ctl_backend_driver structure, which * contains the backend methods to be called by CTL. * * The ctl_lun field is for CTL internal use only, and should not be used * by the backend. * * The links field is for CTL internal use only, and should not be used by * the backend. */ struct ctl_be_lun { uint8_t lun_type; /* passed to CTL */ ctl_backend_lun_flags flags; /* passed to CTL */ ctl_lun_serseq serseq; /* passed to CTL */ uint64_t maxlba; /* passed to CTL */ uint32_t blocksize; /* passed to CTL */ uint16_t pblockexp; /* passed to CTL */ uint16_t pblockoff; /* passed to CTL */ uint16_t ublockexp; /* passed to CTL */ uint16_t ublockoff; /* passed to CTL */ uint32_t atomicblock; /* passed to CTL */ uint32_t opttxferlen; /* passed to CTL */ uint32_t req_lun_id; /* passed to CTL */ uint32_t lun_id; /* returned from CTL */ uint8_t serial_num[CTL_SN_LEN]; /* passed to CTL */ uint8_t device_id[CTL_DEVID_LEN];/* passed to CTL */ be_callback_t lun_shutdown; /* passed to CTL */ struct ctl_backend_driver *be; /* passed to CTL */ void *ctl_lun; /* used by CTL */ nvlist_t *options; /* passed to CTL */ STAILQ_ENTRY(ctl_be_lun) links; /* used by CTL */ }; typedef enum { CTL_BE_FLAG_NONE = 0x00, /* no flags */ CTL_BE_FLAG_HAS_CONFIG = 0x01, /* can do config reads, writes */ } ctl_backend_flags; typedef int (*be_init_t)(void); typedef int (*be_shutdown_t)(void); typedef int (*be_func_t)(union ctl_io *io); typedef void (*be_vfunc_t)(union ctl_io *io); typedef int (*be_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); typedef int (*be_luninfo_t)(struct ctl_be_lun *be_lun, struct sbuf *sb); typedef uint64_t (*be_lunattr_t)(struct ctl_be_lun *be_lun, const char *attrname); struct ctl_backend_driver { char name[CTL_BE_NAME_LEN]; /* passed to CTL */ ctl_backend_flags flags; /* passed to CTL */ be_init_t init; /* passed to CTL */ be_shutdown_t shutdown; /* passed to CTL */ be_func_t data_submit; /* passed to CTL */ be_func_t config_read; /* passed to CTL */ be_func_t config_write; /* passed to CTL */ be_ioctl_t ioctl; /* passed to CTL */ be_luninfo_t lun_info; /* passed to CTL */ be_lunattr_t lun_attr; /* passed to CTL */ #ifdef CS_BE_CONFIG_MOVE_DONE_IS_NOT_USED be_func_t config_move_done; /* passed to backend */ #endif #if 0 be_vfunc_t config_write_done; /* passed to backend */ #endif STAILQ_ENTRY(ctl_backend_driver) links; /* used by CTL */ }; int ctl_backend_register(struct ctl_backend_driver *be); int ctl_backend_deregister(struct ctl_backend_driver *be); struct ctl_backend_driver *ctl_backend_find(char *backend_name); /* * To add a LUN, call ctl_add_lun(). */ int ctl_add_lun(struct ctl_be_lun *be_lun); /* * To remove a LUN, first call ctl_remove_lun(). * You will get the lun_shutdown() callback when all * I/O to the LUN has completed and the LUN has been deleted. */ int ctl_remove_lun(struct ctl_be_lun *be_lun); /* * To start a LUN (transition from powered off to powered on state) call * ctl_start_lun(). To stop a LUN (transition from powered on to powered * off state) call ctl_stop_lun(). */ int ctl_start_lun(struct ctl_be_lun *be_lun); int ctl_stop_lun(struct ctl_be_lun *be_lun); /* * Methods to notify about media and tray status changes. */ int ctl_lun_no_media(struct ctl_be_lun *be_lun); int ctl_lun_has_media(struct ctl_be_lun *be_lun); int ctl_lun_ejected(struct ctl_be_lun *be_lun); /* * Called on LUN HA role change. */ int ctl_lun_primary(struct ctl_be_lun *be_lun); int ctl_lun_secondary(struct ctl_be_lun *be_lun); /* * Let the backend notify the initiators about changes. */ void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun); #endif /* _KERNEL */ #endif /* _CTL_BACKEND_H_ */ /* * vim: ts=8 */ diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c index 9f96c157d5ae..4ffaf0912915 100644 --- a/sys/cam/ctl/ctl_backend_block.c +++ b/sys/cam/ctl/ctl_backend_block.c @@ -1,2804 +1,2816 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2009-2011 Spectra Logic Corporation * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2015 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ */ /* * CAM Target Layer driver backend for block devices. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * The idea here is that we'll allocate enough S/G space to hold a 1MB * I/O. If we get an I/O larger than that, we'll split it. */ #define CTLBLK_HALF_IO_SIZE (512 * 1024) #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) #define CTLBLK_MIN_SEG (128 * 1024) #define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys) #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1) #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) #define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG) #ifdef CTLBLK_DEBUG #define DPRINTF(fmt, args...) \ printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while(0) #endif #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) SDT_PROVIDER_DEFINE(cbb); typedef enum { CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, CTL_BE_BLOCK_LUN_WAITING = 0x04, } ctl_be_block_lun_flags; typedef enum { CTL_BE_BLOCK_NONE, CTL_BE_BLOCK_DEV, CTL_BE_BLOCK_FILE } ctl_be_block_type; struct ctl_be_block_filedata { struct ucred *cred; }; union ctl_be_block_bedata { struct ctl_be_block_filedata file; }; struct ctl_be_block_io; struct ctl_be_block_lun; typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, const char *attrname); /* * Backend LUN structure. There is a 1:1 mapping between a block device * and a backend block LUN, and between a backend block LUN and a CTL LUN. */ struct ctl_be_block_lun { struct ctl_be_lun cbe_lun; /* Must be first element. */ struct ctl_lun_create_params params; char *dev_path; ctl_be_block_type dev_type; struct vnode *vn; union ctl_be_block_bedata backend; cbb_dispatch_t dispatch; cbb_dispatch_t lun_flush; cbb_dispatch_t unmap; cbb_dispatch_t get_lba_status; cbb_getattr_t getattr; uint64_t size_blocks; uint64_t size_bytes; struct ctl_be_block_softc *softc; struct devstat *disk_stats; ctl_be_block_lun_flags flags; SLIST_ENTRY(ctl_be_block_lun) links; struct taskqueue *io_taskqueue; struct task io_task; int num_threads; STAILQ_HEAD(, ctl_io_hdr) input_queue; STAILQ_HEAD(, ctl_io_hdr) config_read_queue; STAILQ_HEAD(, ctl_io_hdr) config_write_queue; STAILQ_HEAD(, ctl_io_hdr) datamove_queue; struct mtx_padalign io_lock; struct mtx_padalign queue_lock; }; /* * Overall softc structure for the block backend module. */ struct ctl_be_block_softc { struct sx modify_lock; struct mtx lock; int num_luns; SLIST_HEAD(, ctl_be_block_lun) lun_list; uma_zone_t beio_zone; uma_zone_t bufmin_zone; uma_zone_t bufmax_zone; }; static struct ctl_be_block_softc backend_block_softc; /* * Per-I/O information. */ struct ctl_be_block_io { union ctl_io *io; struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; struct iovec xiovecs[CTLBLK_MAX_SEGS]; int refcnt; int bio_cmd; int two_sglists; int num_segs; int num_bios_sent; int num_bios_done; int send_complete; int first_error; uint64_t first_error_offset; struct bintime ds_t0; devstat_tag_type ds_tag_type; devstat_trans_flags ds_trans_type; uint64_t io_len; uint64_t io_offset; int io_arg; struct ctl_be_block_softc *softc; struct ctl_be_block_lun *lun; void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ }; extern struct ctl_softc *control_softc; static int cbb_num_threads = 32; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CAM Target Layer Block Backend"); SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, &cbb_num_threads, 0, "Number of threads per backing file"); static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); static void ctl_free_beio(struct ctl_be_block_io *beio); static void ctl_complete_beio(struct ctl_be_block_io *beio); static int ctl_be_block_move_done(union ctl_io *io, bool samethr); static void ctl_be_block_biodone(struct bio *bio); static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio); static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname); static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io); static void ctl_be_block_worker(void *context, int pending); static int ctl_be_block_submit(union ctl_io *io); static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req); static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req); static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun); static int ctl_be_block_config_write(union ctl_io *io); static int ctl_be_block_config_read(union ctl_io *io); static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb); static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); static int ctl_be_block_init(void); static int ctl_be_block_shutdown(void); static struct ctl_backend_driver ctl_be_block_driver = { .name = "block", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_be_block_init, .shutdown = ctl_be_block_shutdown, .data_submit = ctl_be_block_submit, .config_read = ctl_be_block_config_read, .config_write = ctl_be_block_config_write, .ioctl = ctl_be_block_ioctl, .lun_info = ctl_be_block_lun_info, .lun_attr = ctl_be_block_lun_attr }; MALLOC_DEFINE(M_CTLBLK, "ctlblock", "Memory used for CTL block backend"); CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); static void ctl_alloc_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg, size_t len) { if (len <= CTLBLK_MIN_SEG) { sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK); } else { KASSERT(len <= CTLBLK_MAX_SEG, ("Too large alloc %zu > %lu", len, CTLBLK_MAX_SEG)); sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK); } sg->len = len; } static void ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg) { if (sg->len <= CTLBLK_MIN_SEG) { uma_zfree(softc->bufmin_zone, sg->addr); } else { KASSERT(sg->len <= CTLBLK_MAX_SEG, ("Too large free %zu > %lu", sg->len, CTLBLK_MAX_SEG)); uma_zfree(softc->bufmax_zone, sg->addr); } } static struct ctl_be_block_io * ctl_alloc_beio(struct ctl_be_block_softc *softc) { struct ctl_be_block_io *beio; beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); beio->softc = softc; beio->refcnt = 1; return (beio); } static void ctl_real_free_beio(struct ctl_be_block_io *beio) { struct ctl_be_block_softc *softc = beio->softc; int i; for (i = 0; i < beio->num_segs; i++) { ctl_free_seg(softc, &beio->sg_segs[i]); /* For compare we had two equal S/G lists. */ if (beio->two_sglists) { ctl_free_seg(softc, &beio->sg_segs[i + CTLBLK_HALF_SEGS]); } } uma_zfree(softc->beio_zone, beio); } static void ctl_refcnt_beio(void *arg, int diff) { struct ctl_be_block_io *beio = arg; if (atomic_fetchadd_int(&beio->refcnt, diff) + diff == 0) ctl_real_free_beio(beio); } static void ctl_free_beio(struct ctl_be_block_io *beio) { ctl_refcnt_beio(beio, -1); } static void ctl_complete_beio(struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; if (beio->beio_cont != NULL) { beio->beio_cont(beio); } else { ctl_free_beio(beio); ctl_data_submit_done(io); } } static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static void ctl_be_block_compare(union ctl_io *io) { struct ctl_be_block_io *beio; uint64_t off, res; int i; uint8_t info[8]; beio = (struct ctl_be_block_io *)PRIV(io)->ptr; off = 0; for (i = 0; i < beio->num_segs; i++) { res = cmp(beio->sg_segs[i].addr, beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, beio->sg_segs[i].len); off += res; if (res < beio->sg_segs[i].len) break; } if (i < beio->num_segs) { scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); } else ctl_set_success(&io->scsiio); } static int ctl_be_block_move_done(union ctl_io *io, bool samethr) { struct ctl_be_block_io *beio; struct ctl_be_block_lun *be_lun; struct ctl_lba_len_flags *lbalen; beio = (struct ctl_be_block_io *)PRIV(io)->ptr; DPRINTF("entered\n"); io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; /* * We set status at this point for read and compare commands. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_READ) { ctl_set_success(&io->scsiio); } else if (lbalen->flags & CTL_LLF_COMPARE) { /* We have two data blocks ready for comparison. */ ctl_be_block_compare(io); } } /* * If this is a read, or a write with errors, it is done. */ if ((beio->bio_cmd == BIO_READ) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { ctl_complete_beio(beio); return (0); } /* * At this point, we have a write and the DMA completed successfully. * If we were called synchronously in the original thread then just * dispatch, otherwise we now have to queue it to the task queue to * execute the backend I/O. That is because we do blocking * memory allocations, and in the file backing case, blocking I/O. * This move done routine is generally called in the SIM's * interrupt context, and therefore we cannot block. */ be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); if (samethr) { be_lun->dispatch(be_lun, beio); } else { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); } return (0); } static void ctl_be_block_biodone(struct bio *bio) { - struct ctl_be_block_io *beio; - struct ctl_be_block_lun *be_lun; + struct ctl_be_block_io *beio = bio->bio_caller1; + struct ctl_be_block_lun *be_lun = beio->lun; + struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; union ctl_io *io; int error; - beio = bio->bio_caller1; - be_lun = beio->lun; io = beio->io; DPRINTF("entered\n"); error = bio->bio_error; mtx_lock(&be_lun->io_lock); if (error != 0 && (beio->first_error == 0 || bio->bio_offset < beio->first_error_offset)) { beio->first_error = error; beio->first_error_offset = bio->bio_offset; } beio->num_bios_done++; /* * XXX KDM will this cause WITNESS to complain? Holding a lock * during the free might cause it to complain. */ g_destroy_bio(bio); /* * If the send complete bit isn't set, or we aren't the last I/O to * complete, then we're done. */ if ((beio->send_complete == 0) || (beio->num_bios_done < beio->num_bios_sent)) { mtx_unlock(&be_lun->io_lock); return; } /* * At this point, we've verified that we are the last I/O to * complete, so it's safe to drop the lock. */ devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If there are any errors from the backing device, we fail the * entire I/O with a medium error. */ error = beio->first_error; if (error != 0) { if (error == EOPNOTSUPP) { ctl_set_invalid_opcode(&io->scsiio); } else if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else if (beio->bio_cmd == BIO_FLUSH) { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad2); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write, a flush, a delete or verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (beio->bio_cmd == BIO_FLUSH) || (beio->bio_cmd == BIO_DELETE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); - ctl_serseq_done(io); + if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); } ctl_datamove(io); } } static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct mount *mountpoint; int error, lock_flags; DPRINTF("entered\n"); binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, curthread); VOP_UNLOCK(be_lun->vn); vn_finished_write(mountpoint); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); if (error == 0) ctl_set_success(&io->scsiio); else { /* XXX KDM is there is a better error here? */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 0xbad1); } ctl_complete_beio(beio); } SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { + struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_filedata *file_data; union ctl_io *io; struct uio xuio; struct iovec *xiovec; size_t s; int error, flags, i; DPRINTF("entered\n"); file_data = &be_lun->backend.file; io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); if (beio->bio_cmd == BIO_READ) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); + if (beio->beio_cont == NULL && + cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); /* * UFS pays attention to IO_DIRECT for reads. If the * DIRECTIO option is configured into the kernel, it calls * ffs_rawread(). But that only works for single-segment * uios with user space addresses. In our case, with a * kernel uio, it still reads into the buffer cache, but it * will just try to release the buffer from the cache later * on in ffs_read(). * * ZFS does not pay attention to IO_DIRECT for reads. * * UFS does not pay attention to IO_SYNC for reads. * * ZFS pays attention to IO_SYNC (which translates into the * Solaris define FRSYNC for zfs_read()) for reads. It * attempts to sync the file before reading. */ error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn); SDT_PROBE0(cbb, , read, file_done); if (error == 0 && xuio.uio_resid > 0) { /* * If we red less then requested (EOF), then * we should clean the rest of the buffer. */ s = beio->io_len - xuio.uio_resid; for (i = 0; i < beio->num_segs; i++) { if (s >= beio->sg_segs[i].len) { s -= beio->sg_segs[i].len; continue; } bzero((uint8_t *)beio->sg_segs[i].addr + s, beio->sg_segs[i].len - s); s = 0; } } } else { struct mount *mountpoint; int lock_flags; (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) lock_flags = LK_SHARED; else lock_flags = LK_EXCLUSIVE; vn_lock(be_lun->vn, lock_flags | LK_RETRY); /* * UFS pays attention to IO_DIRECT for writes. The write * is done asynchronously. (Normally the write would just * get put into cache. * * UFS pays attention to IO_SYNC for writes. It will * attempt to write the buffer out synchronously if that * flag is set. * * ZFS does not pay attention to IO_DIRECT for writes. * * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) * for writes. It will flush the transaction from the * cache before returning. */ error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); VOP_UNLOCK(be_lun->vn); vn_finished_write(mountpoint); SDT_PROBE0(cbb, , write, file_done); } mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); - ctl_serseq_done(io); + if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); } ctl_datamove(io); } } static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, status; DPRINTF("entered\n"); off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 0, curthread->td_ucred, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } VOP_UNLOCK(be_lun->vn); data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) { struct vattr vattr; struct statfs statfs; uint64_t val; int error; val = UINT64_MAX; if (be_lun->vn == NULL) return (val); vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); if (strcmp(attrname, "blocksused") == 0) { error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error == 0) val = vattr.va_bytes / be_lun->cbe_lun.blocksize; } if (strcmp(attrname, "blocksavail") == 0 && !VN_IS_DOOMED(be_lun->vn)) { error = VFS_STATFS(be_lun->vn->v_mount, &statfs); if (error == 0) val = statfs.f_bavail * statfs.f_bsize / be_lun->cbe_lun.blocksize; } VOP_UNLOCK(be_lun->vn); return (val); } static void ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { + struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; union ctl_io *io; struct cdevsw *csw; struct cdev *dev; struct uio xuio; struct iovec *xiovec; int error, flags, i, ref; DPRINTF("entered\n"); io = beio->io; flags = 0; if (ARGS(io)->flags & CTL_LLF_DPO) flags |= IO_DIRECT; if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) flags |= IO_SYNC; bzero(&xuio, sizeof(xuio)); if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, file_start); xuio.uio_rw = UIO_READ; } else { SDT_PROBE0(cbb, , write, file_start); xuio.uio_rw = UIO_WRITE; } xuio.uio_offset = beio->io_offset; xuio.uio_resid = beio->io_len; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = beio->xiovecs; xuio.uio_iovcnt = beio->num_segs; xuio.uio_td = curthread; for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { xiovec->iov_base = beio->sg_segs[i].addr; xiovec->iov_len = beio->sg_segs[i].len; } binuptime(&beio->ds_t0); devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { - if (beio->bio_cmd == BIO_READ) + if (beio->bio_cmd == BIO_READ) { + if (beio->beio_cont == NULL && + cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); error = csw->d_read(dev, &xuio, flags); - else + } else error = csw->d_write(dev, &xuio, flags); dev_relthread(dev, ref); } else error = ENXIO; if (beio->bio_cmd == BIO_READ) SDT_PROBE0(cbb, , read, file_done); else SDT_PROBE0(cbb, , write, file_done); mtx_lock(&be_lun->io_lock); devstat_end_transaction(beio->lun->disk_stats, beio->io_len, beio->ds_tag_type, beio->ds_trans_type, /*now*/ NULL, /*then*/&beio->ds_t0); mtx_unlock(&be_lun->io_lock); /* * If we got an error, set the sense data to "MEDIUM ERROR" and * return the I/O to the user. */ if (error != 0) { if (error == ENOSPC || error == EDQUOT) { ctl_set_space_alloc_fail(&io->scsiio); } else if (error == EROFS || error == EACCES) { ctl_set_hw_write_protected(&io->scsiio); } else { ctl_set_medium_error(&io->scsiio, beio->bio_cmd == BIO_READ); } ctl_complete_beio(beio); return; } /* * If this is a write or a verify, we're all done. * If this is a read, we can now send the data to the user. */ if ((beio->bio_cmd == BIO_WRITE) || (ARGS(io)->flags & CTL_LLF_VERIFY)) { ctl_set_success(&io->scsiio); ctl_complete_beio(beio); } else { if ((ARGS(io)->flags & CTL_LLF_READ) && beio->beio_cont == NULL) { ctl_set_success(&io->scsiio); - ctl_serseq_done(io); + if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); } ctl_datamove(io); } } static void ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io = beio->io; struct cdevsw *csw; struct cdev *dev; struct ctl_lba_len_flags *lbalen = ARGS(io); struct scsi_get_lba_status_data *data; off_t roff, off; int error, ref, status; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; goto done; } off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 0; /* mapped up to off */ else { error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, curthread); if (error == 0 && off > roff) status = 1; /* deallocated up to off */ else { status = 0; /* unknown up to the end */ off = be_lun->size_bytes; } } dev_relthread(dev, ref); done: data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(lbalen->lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - lbalen->lba), data->descr[0].length); data->descr[0].status = status; ctl_complete_beio(beio); } static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { struct bio *bio; struct cdevsw *csw; struct cdev *dev; int ref; DPRINTF("entered\n"); /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); bio->bio_cmd = BIO_FLUSH; bio->bio_offset = 0; bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = 0; /* * We don't need to acquire the LUN lock here, because we are only * sending one bio, and so there is no other context to synchronize * with. */ beio->num_bios_sent = 1; beio->send_complete = 1; binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw) { bio->bio_dev = dev; csw->d_strategy(bio); dev_relthread(dev, ref); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } static void ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio, uint64_t off, uint64_t len, int last) { struct bio *bio; uint64_t maxlen; struct cdevsw *csw; struct cdev *dev; int ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); while (len > 0) { bio = g_alloc_bio(); bio->bio_cmd = BIO_DELETE; bio->bio_dev = dev; bio->bio_offset = off; bio->bio_length = MIN(len, maxlen); bio->bio_data = 0; bio->bio_done = ctl_be_block_biodone; bio->bio_caller1 = beio; bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; off += bio->bio_length; len -= bio->bio_length; mtx_lock(&be_lun->io_lock); beio->num_bios_sent++; if (last && len == 0) beio->send_complete = 1; mtx_unlock(&be_lun->io_lock); if (csw) { csw->d_strategy(bio); } else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { union ctl_io *io; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end; uint64_t len; io = beio->io; DPRINTF("entered\n"); binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); if (beio->io_offset == -1) { beio->io_len = 0; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { len = (uint64_t)scsi_4btoul(buf->length) * be_lun->cbe_lun.blocksize; beio->io_len += len; ctl_be_block_unmap_dev_range(be_lun, beio, scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, len, (end - buf < 2) ? TRUE : FALSE); } } else ctl_be_block_unmap_dev_range(be_lun, beio, beio->io_offset, beio->io_len, TRUE); } static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, struct ctl_be_block_io *beio) { TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); struct bio *bio; struct cdevsw *csw; struct cdev *dev; off_t cur_offset; int i, max_iosize, ref; DPRINTF("entered\n"); csw = devvn_refthread(be_lun->vn, &dev, &ref); /* * We have to limit our I/O size to the maximum supported by the * backend device. */ if (csw) { max_iosize = dev->si_iosize_max; if (max_iosize < PAGE_SIZE) max_iosize = DFLTPHYS; } else max_iosize = DFLTPHYS; cur_offset = beio->io_offset; for (i = 0; i < beio->num_segs; i++) { size_t cur_size; uint8_t *cur_ptr; cur_size = beio->sg_segs[i].len; cur_ptr = beio->sg_segs[i].addr; while (cur_size > 0) { /* This can't fail, it's a blocking allocation. */ bio = g_alloc_bio(); KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); bio->bio_cmd = beio->bio_cmd; bio->bio_dev = dev; bio->bio_caller1 = beio; bio->bio_length = min(cur_size, max_iosize); bio->bio_offset = cur_offset; bio->bio_data = cur_ptr; bio->bio_done = ctl_be_block_biodone; bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; cur_offset += bio->bio_length; cur_ptr += bio->bio_length; cur_size -= bio->bio_length; TAILQ_INSERT_TAIL(&queue, bio, bio_queue); beio->num_bios_sent++; } } beio->send_complete = 1; binuptime(&beio->ds_t0); devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); /* * Fire off all allocated requests! */ while ((bio = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bio, bio_queue); if (csw) csw->d_strategy(bio); else { bio->bio_error = ENXIO; ctl_be_block_biodone(bio); } } if (csw) dev_relthread(dev, ref); } static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) { struct diocgattr_arg arg; struct cdevsw *csw; struct cdev *dev; int error, ref; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (UINT64_MAX); strlcpy(arg.name, attrname, sizeof(arg.name)); arg.len = sizeof(arg.value.off); if (csw->d_ioctl) { error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); } else error = ENODEV; dev_relthread(dev, ref); if (error != 0) return (UINT64_MAX); return (arg.value.off); } static void ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; beio->io_len = lbalen->len * cbe_lun->blocksize; beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; beio->bio_cmd = BIO_FLUSH; beio->ds_trans_type = DEVSTAT_NO_DATA; DPRINTF("SYNC\n"); be_lun->lun_flush(be_lun, beio); } static void ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_config_write_done(io); return; } ctl_be_block_config_write(io); } static void ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_softc *softc = be_lun->softc; struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_lba_len_flags *lbalen; uint64_t len_left, lba; uint32_t pb, pbo, adj; int i, seglen; uint8_t *buf, *end; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; lbalen = ARGS(io); if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; be_lun->unmap(be_lun, beio); return; } beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; DPRINTF("WRITE SAME at LBA %jx len %u\n", (uintmax_t)lbalen->lba, lbalen->len); pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; if (be_lun->cbe_lun.pblockoff > 0) pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; else pbo = 0; len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) { /* * Setup the S/G entry for this chunk. */ seglen = MIN(CTLBLK_MAX_SEG, len_left); if (pb > cbe_lun->blocksize) { adj = ((lbalen->lba + lba) * cbe_lun->blocksize + seglen - pbo) % pb; if (seglen > adj) seglen -= adj; else seglen -= seglen % cbe_lun->blocksize; } else seglen -= seglen % cbe_lun->blocksize; ctl_alloc_seg(softc, &beio->sg_segs[i], seglen); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); beio->num_segs++; len_left -= seglen; buf = beio->sg_segs[i].addr; end = buf + seglen; for (; buf < end; buf += cbe_lun->blocksize) { if (lbalen->flags & SWS_NDOB) { memset(buf, 0, cbe_lun->blocksize); } else { memcpy(buf, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lbalen->lba + lba, buf); lba++; } } beio->io_offset = lbalen->lba * cbe_lun->blocksize; beio->io_len = lba * cbe_lun->blocksize; /* We can not do all in one run. Correct and schedule rerun. */ if (len_left > 0) { lbalen->lba += lba; lbalen->len -= lba; beio->beio_cont = ctl_be_block_cw_done_ws; } be_lun->dispatch(be_lun, beio); } static void ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_ptr_len_flags *ptrlen; DPRINTF("entered\n"); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { ctl_free_beio(beio); ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } beio->io_len = 0; beio->io_offset = -1; beio->bio_cmd = BIO_DELETE; beio->ds_trans_type = DEVSTAT_FREE; DPRINTF("UNMAP\n"); be_lun->unmap(be_lun, beio); } static void ctl_be_block_cr_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_read_done(io); } static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cr_done; PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: /* GET LBA STATUS */ beio->bio_cmd = -1; beio->ds_trans_type = DEVSTAT_NO_DATA; beio->ds_tag_type = DEVSTAT_TAG_ORDERED; beio->io_len = 0; if (be_lun->get_lba_status) be_lun->get_lba_status(be_lun, beio); else ctl_be_block_cr_done(beio); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } static void ctl_be_block_cw_done(struct ctl_be_block_io *beio) { union ctl_io *io; io = beio->io; ctl_free_beio(beio); ctl_config_write_done(io); } static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; DPRINTF("entered\n"); softc = be_lun->softc; beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; beio->beio_cont = ctl_be_block_cw_done; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } PRIV(io)->ptr = (void *)beio; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: ctl_be_block_cw_dispatch_sync(be_lun, io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_be_block_cw_dispatch_ws(be_lun, io); break; case UNMAP: ctl_be_block_cw_dispatch_unmap(be_lun, io); break; default: panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); break; } } SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); static void ctl_be_block_next(struct ctl_be_block_io *beio) { struct ctl_be_block_lun *be_lun; union ctl_io *io; io = beio->io; be_lun = beio->lun; ctl_free_beio(beio); if ((io->io_hdr.flags & CTL_FLAG_ABORT) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { ctl_data_submit_done(io); return; } io->io_hdr.status &= ~CTL_STATUS_MASK; io->io_hdr.status |= CTL_STATUS_NONE; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); } static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, union ctl_io *io) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_be_block_io *beio; struct ctl_be_block_softc *softc; struct ctl_lba_len_flags *lbalen; struct ctl_ptr_len_flags *bptrlen; uint64_t len_left, lbas; int i; softc = be_lun->softc; DPRINTF("entered\n"); lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_WRITE) { SDT_PROBE0(cbb, , write, start); } else { SDT_PROBE0(cbb, , read, start); } beio = ctl_alloc_beio(softc); beio->io = io; beio->lun = be_lun; bptrlen = PRIV(io); bptrlen->ptr = (void *)beio; switch (io->scsiio.tag_type) { case CTL_TAG_ORDERED: beio->ds_tag_type = DEVSTAT_TAG_ORDERED; break; case CTL_TAG_HEAD_OF_QUEUE: beio->ds_tag_type = DEVSTAT_TAG_HEAD; break; case CTL_TAG_UNTAGGED: case CTL_TAG_SIMPLE: case CTL_TAG_ACA: default: beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; break; } if (lbalen->flags & CTL_LLF_WRITE) { beio->bio_cmd = BIO_WRITE; beio->ds_trans_type = DEVSTAT_WRITE; } else { beio->bio_cmd = BIO_READ; beio->ds_trans_type = DEVSTAT_READ; } DPRINTF("%s at LBA %jx len %u @%ju\n", (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); if (lbalen->flags & CTL_LLF_COMPARE) { beio->two_sglists = 1; lbas = CTLBLK_HALF_IO_SIZE; } else { lbas = CTLBLK_MAX_IO_SIZE; } lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; beio->io_len = lbas * cbe_lun->blocksize; bptrlen->len += lbas; for (i = 0, len_left = beio->io_len; len_left > 0; i++) { KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", i, CTLBLK_MAX_SEGS)); /* * Setup the S/G entry for this chunk. */ ctl_alloc_seg(softc, &beio->sg_segs[i], MIN(CTLBLK_MAX_SEG, len_left)); DPRINTF("segment %d addr %p len %zd\n", i, beio->sg_segs[i].addr, beio->sg_segs[i].len); /* Set up second segment for compare operation. */ if (beio->two_sglists) { ctl_alloc_seg(softc, &beio->sg_segs[i + CTLBLK_HALF_SEGS], beio->sg_segs[i].len); } beio->num_segs++; len_left -= beio->sg_segs[i].len; } if (bptrlen->len < lbalen->len) beio->beio_cont = ctl_be_block_next; io->scsiio.be_move_done = ctl_be_block_move_done; /* For compare we have separate S/G lists for read and datamove. */ if (beio->two_sglists) io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; else io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; io->scsiio.kern_data_len = beio->io_len; io->scsiio.kern_sg_entries = beio->num_segs; io->scsiio.kern_data_ref = ctl_refcnt_beio; io->scsiio.kern_data_arg = beio; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; /* * For the read case, we need to read the data into our buffers and * then we can send it back to the user. For the write case, we * need to get the data from the user first. */ if (beio->bio_cmd == BIO_READ) { SDT_PROBE0(cbb, , read, alloc_done); be_lun->dispatch(be_lun, beio); } else { SDT_PROBE0(cbb, , write, alloc_done); ctl_datamove(io); } } static void ctl_be_block_worker(void *context, int pending) { struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; union ctl_io *io; struct ctl_be_block_io *beio; DPRINTF("entered\n"); /* * Fetch and process I/Os from all queues. If we detect LUN * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, * so make response maximally opaque to not confuse initiator. */ for (;;) { mtx_lock(&be_lun->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); if (io != NULL) { DPRINTF("datamove queue\n"); STAILQ_REMOVE_HEAD(&be_lun->datamove_queue, links); mtx_unlock(&be_lun->queue_lock); beio = (struct ctl_be_block_io *)PRIV(io)->ptr; if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_complete_beio(beio); continue; } be_lun->dispatch(be_lun, beio); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); if (io != NULL) { DPRINTF("config write queue\n"); STAILQ_REMOVE_HEAD(&be_lun->config_write_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_write_done(io); continue; } ctl_be_block_cw_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); if (io != NULL) { DPRINTF("config read queue\n"); STAILQ_REMOVE_HEAD(&be_lun->config_read_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_config_read_done(io); continue; } ctl_be_block_cr_dispatch(be_lun, io); continue; } io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); if (io != NULL) { DPRINTF("input queue\n"); STAILQ_REMOVE_HEAD(&be_lun->input_queue, links); mtx_unlock(&be_lun->queue_lock); if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { ctl_set_busy(&io->scsiio); ctl_data_submit_done(io); continue; } ctl_be_block_dispatch(be_lun, io); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ mtx_unlock(&be_lun->queue_lock); break; } } /* * Entry point from CTL to the backend for I/O. We queue everything to a * work thread, so this just puts the I/O on a queue and wakes up the * thread. */ static int ctl_be_block_submit(union ctl_io *io) { struct ctl_be_block_lun *be_lun; DPRINTF("entered\n"); be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); PRIV(io)->len = 0; mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (CTL_RETVAL_COMPLETE); } static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_block_softc *softc = &backend_block_softc; int error; error = 0; switch (cmd) { case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: error = ctl_be_block_create(softc, lun_req); break; case CTL_LUNREQ_RM: error = ctl_be_block_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: error = ctl_be_block_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "invalid LUN request type %d", lun_req->reqtype); break; } break; } default: error = ENOTTY; break; } return (error); } static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_filedata *file_data; struct ctl_lun_create_params *params; const char *value; struct vattr vattr; off_t ps, pss, po, pos, us, uss, uo, uos; int error; cbe_lun = &be_lun->cbe_lun; file_data = &be_lun->backend.file; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_FILE; be_lun->dispatch = ctl_be_block_dispatch_file; be_lun->lun_flush = ctl_be_block_flush_file; be_lun->get_lba_status = ctl_be_block_gls_file; be_lun->getattr = ctl_be_block_getattr_file; be_lun->unmap = NULL; cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); if (error != 0) { snprintf(req->error_str, sizeof(req->error_str), "error calling VOP_GETATTR() for file %s", be_lun->dev_path); return (error); } file_data->cred = crhold(curthread->td_ucred); if (params->lun_size_bytes != 0) be_lun->size_bytes = params->lun_size_bytes; else be_lun->size_bytes = vattr.va_size; /* * For files we can use any logical block size. Prefer 512 bytes * for compatibility reasons. If file's vattr.va_blocksize * (preferred I/O block size) is bigger and multiple to chosen * logical block size -- report it as physical block size. */ if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); us = ps = vattr.va_blocksize; uo = po = 0; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) ctl_expand_number(value, &ps); value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); if (value != NULL) ctl_expand_number(value, &us); value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } /* * Sanity check. The media size has to be at least one * sector long. */ if (be_lun->size_bytes < cbe_lun->blocksize) { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "file %s size %ju < block size %u", be_lun->dev_path, (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); } cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; return (error); } static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct ctl_lun_create_params *params; struct cdevsw *csw; struct cdev *dev; const char *value; int error, atomic, maxio, ref, unmap, tmp; off_t ps, pss, po, pos, us, uss, uo, uos, otmp; params = &be_lun->params; be_lun->dev_type = CTL_BE_BLOCK_DEV; csw = devvn_refthread(be_lun->vn, &dev, &ref); if (csw == NULL) return (ENXIO); if (strcmp(csw->d_name, "zvol") == 0) { be_lun->dispatch = ctl_be_block_dispatch_zvol; be_lun->get_lba_status = ctl_be_block_gls_zvol; atomic = maxio = CTLBLK_MAX_IO_SIZE; } else { be_lun->dispatch = ctl_be_block_dispatch_dev; be_lun->get_lba_status = NULL; atomic = 0; maxio = dev->si_iosize_max; if (maxio <= 0) maxio = DFLTPHYS; if (maxio > CTLBLK_MAX_SEG) maxio = CTLBLK_MAX_SEG; } be_lun->lun_flush = ctl_be_block_flush_dev; be_lun->getattr = ctl_be_block_getattr_dev; be_lun->unmap = ctl_be_block_unmap_dev; if (!csw->d_ioctl) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "no d_ioctl for device %s!", be_lun->dev_path); return (ENODEV); } error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGSECTORSIZE ioctl " "on %s!", error, be_lun->dev_path); return (error); } /* * If the user has asked for a blocksize that is greater than the * backing device's blocksize, we can do it only if the blocksize * the user is asking for is an even multiple of the underlying * device's blocksize. */ if ((params->blocksize_bytes != 0) && (params->blocksize_bytes >= tmp)) { if (params->blocksize_bytes % tmp == 0) { cbe_lun->blocksize = params->blocksize_bytes; } else { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u is not an even " "multiple of backing device blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } } else if (params->blocksize_bytes != 0) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested blocksize %u < backing device " "blocksize %u", params->blocksize_bytes, tmp); return (EINVAL); } else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = MAX(tmp, 2048); else cbe_lun->blocksize = tmp; error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, curthread); if (error) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "error %d returned for DIOCGMEDIASIZE " " ioctl on %s!", error, be_lun->dev_path); return (error); } if (params->lun_size_bytes != 0) { if (params->lun_size_bytes > otmp) { dev_relthread(dev, ref); snprintf(req->error_str, sizeof(req->error_str), "requested LUN size %ju > backing device " "size %ju", (uintmax_t)params->lun_size_bytes, (uintmax_t)otmp); return (EINVAL); } be_lun->size_bytes = params->lun_size_bytes; } else be_lun->size_bytes = otmp; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, curthread); if (error) ps = po = 0; else { error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, FREAD, curthread); if (error) po = 0; } us = ps; uo = po; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) ctl_expand_number(value, &ps); value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); if (value != NULL) ctl_expand_number(value, &po); pss = ps / cbe_lun->blocksize; pos = po / cbe_lun->blocksize; if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { cbe_lun->pblockexp = fls(pss) - 1; cbe_lun->pblockoff = (pss - pos) % pss; } value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); if (value != NULL) ctl_expand_number(value, &us); value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); if (value != NULL) ctl_expand_number(value, &uo); uss = us / cbe_lun->blocksize; uos = uo / cbe_lun->blocksize; if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { cbe_lun->ublockexp = fls(uss) - 1; cbe_lun->ublockoff = (uss - uos) % uss; } cbe_lun->atomicblock = atomic / cbe_lun->blocksize; cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { unmap = 1; } else { struct diocgattr_arg arg; strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); arg.len = sizeof(arg.value.i); error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, curthread); unmap = (error == 0) ? arg.value.i : 0; } value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); if (value != NULL) unmap = (strcmp(value, "on") == 0); if (unmap) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; else cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; dev_relthread(dev, ref); return (0); } static int ctl_be_block_close(struct ctl_be_block_lun *be_lun) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; int flags; if (be_lun->vn) { flags = FREAD; if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) flags |= FWRITE; (void)vn_close(be_lun->vn, flags, NOCRED, curthread); be_lun->vn = NULL; switch (be_lun->dev_type) { case CTL_BE_BLOCK_DEV: break; case CTL_BE_BLOCK_FILE: if (be_lun->backend.file.cred != NULL) { crfree(be_lun->backend.file.cred); be_lun->backend.file.cred = NULL; } break; case CTL_BE_BLOCK_NONE: break; default: panic("Unexpected backend type %d", be_lun->dev_type); break; } be_lun->dev_type = CTL_BE_BLOCK_NONE; } return (0); } static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; struct nameidata nd; const char *value; int error, flags; error = 0; if (rootvnode == NULL) { snprintf(req->error_str, sizeof(req->error_str), "Root filesystem is not mounted"); return (1); } pwd_ensure_dirs(); value = dnvlist_get_string(cbe_lun->options, "file", NULL); if (value == NULL) { snprintf(req->error_str, sizeof(req->error_str), "no file argument specified"); return (1); } free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = strdup(value, M_CTLBLK); flags = FREAD; value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); if (value != NULL) { if (strcmp(value, "on") != 0) flags |= FWRITE; } else if (cbe_lun->lun_type == T_DIRECT) flags |= FWRITE; again: NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); error = vn_open(&nd, &flags, 0, NULL); if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { flags &= ~FWRITE; goto again; } if (error) { /* * This is the only reasonable guess we can make as far as * path if the user doesn't give us a fully qualified path. * If they want to specify a file, they need to specify the * full path. */ if (be_lun->dev_path[0] != '/') { char *dev_name; asprintf(&dev_name, M_CTLBLK, "/dev/%s", be_lun->dev_path); free(be_lun->dev_path, M_CTLBLK); be_lun->dev_path = dev_name; goto again; } snprintf(req->error_str, sizeof(req->error_str), "error opening %s: %d", be_lun->dev_path, error); return (error); } if (flags & FWRITE) cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; else cbe_lun->flags |= CTL_LUN_FLAG_READONLY; NDFREE(&nd, NDF_ONLY_PNBUF); be_lun->vn = nd.ni_vp; /* We only support disks and files. */ if (vn_isdisk_error(be_lun->vn, &error)) { error = ctl_be_block_open_dev(be_lun, req); } else if (be_lun->vn->v_type == VREG) { error = ctl_be_block_open_file(be_lun, req); } else { error = EINVAL; snprintf(req->error_str, sizeof(req->error_str), "%s is not a disk or plain file", be_lun->dev_path); } VOP_UNLOCK(be_lun->vn); if (error != 0) ctl_be_block_close(be_lun); cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (be_lun->dispatch != ctl_be_block_dispatch_dev) - cbe_lun->serseq = CTL_LUN_SERSEQ_READ; + cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; + else if (value != NULL && strcmp(value, "soft") == 0) + cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; return (0); } static int ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_be_lun *cbe_lun; struct ctl_be_block_lun *be_lun; struct ctl_lun_create_params *params; char num_thread_str[16]; char tmpstr[32]; const char *value; int retval, num_threads; int tmp_num_threads; params = &req->reqdata.create; retval = 0; req->status = CTL_LUN_OK; be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; be_lun->params = req->reqdata.create; be_lun->softc = softc; STAILQ_INIT(&be_lun->input_queue); STAILQ_INIT(&be_lun->config_read_queue); STAILQ_INIT(&be_lun->config_write_queue); STAILQ_INIT(&be_lun->datamove_queue); mtx_init(&be_lun->io_lock, "ctlblock io", NULL, MTX_DEF); mtx_init(&be_lun->queue_lock, "ctlblock queue", NULL, MTX_DEF); cbe_lun->options = nvlist_clone(req->args_nvl); if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = 0; cbe_lun->flags = 0; value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { be_lun->size_bytes = params->lun_size_bytes; if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 0 : (be_lun->size_blocks - 1); if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { retval = ctl_be_block_open(be_lun, req); if (retval != 0) { retval = 0; req->status = CTL_LUN_WARNING; } } num_threads = cbb_num_threads; } else { num_threads = 1; } value = dnvlist_get_string(cbe_lun->options, "num_threads", NULL); if (value != NULL) { tmp_num_threads = strtol(value, NULL, 0); /* * We don't let the user specify less than one * thread, but hope he's clueful enough not to * specify 1000 threads. */ if (tmp_num_threads < 1) { snprintf(req->error_str, sizeof(req->error_str), "invalid number of threads %s", num_thread_str); goto bailout_error; } num_threads = tmp_num_threads; } if (be_lun->vn == NULL) cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; /* Tell the user the blocksize we ended up using */ params->lun_size_bytes = be_lun->size_bytes; params->blocksize_bytes = cbe_lun->blocksize; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; cbe_lun->be = &ctl_be_block_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); be_lun->io_taskqueue = taskqueue_create("ctlblocktq", M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "unable to create taskqueue"); goto bailout_error; } /* * Note that we start the same number of threads by default for * both the file case and the block device case. For the file * case, we need multiple threads to allow concurrency, because the * vnode interface is designed to be a blocking interface. For the * block device case, ZFS zvols at least will block the caller's * context in many instances, and so we need multiple threads to * overcome that problem. Other block devices don't need as many * threads, but they shouldn't cause too many problems. * * If the user wants to just have a single thread for a block * device, he can specify that when the LUN is created, or change * the tunable/sysctl to alter the default number of threads. */ retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, /*num threads*/num_threads, /*priority*/PUSER, /*proc*/control_softc->ctl_proc, /*thread name*/"block"); if (retval != 0) goto bailout_error; be_lun->num_threads = num_threads; retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "ctl_add_lun() returned error %d, see dmesg for " "details", retval); retval = 0; goto bailout_error; } be_lun->disk_stats = devstat_new_entry("cbb", cbe_lun->lun_id, cbe_lun->blocksize, DEVSTAT_ALL_SUPPORTED, cbe_lun->lun_type | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); mtx_lock(&softc->lock); softc->num_luns++; SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); params->req_lun_id = cbe_lun->lun_id; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); ctl_be_block_close(be_lun); if (be_lun->dev_path != NULL) free(be_lun->dev_path, M_CTLBLK); nvlist_destroy(cbe_lun->options); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); free(be_lun, M_CTLBLK); return (retval); } static int ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_rm_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; params = &req->reqdata.rm; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) { SLIST_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); softc->num_luns--; break; } } mtx_unlock(&softc->lock); sx_xunlock(&softc->modify_lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); ctl_be_block_close(be_lun); } mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_remove_lun(cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "error %d returned from ctl_remove_lun() for " "LUN %d", retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblockrm", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; if (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { mtx_unlock(&softc->lock); free(be_lun, M_CTLBLK); } else { mtx_unlock(&softc->lock); return (EINTR); } req->status = CTL_LUN_OK; return (0); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) { struct ctl_lun_modify_params *params; struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; const char *value; uint64_t oldsize; int error, wasprim; params = &req->reqdata.modify; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "LUN %u is not managed by the block backend", params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; if (req->args_nvl != NULL) { nvlist_destroy(cbe_lun->options); cbe_lun->options = nvlist_clone(req->args_nvl); } wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } oldsize = be_lun->size_blocks; if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { if (be_lun->vn == NULL) error = ctl_be_block_open(be_lun, req); else if (vn_isdisk_error(be_lun->vn, &error)) error = ctl_be_block_open_dev(be_lun, req); else if (be_lun->vn->v_type == VREG) { vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); error = ctl_be_block_open_file(be_lun, req); VOP_UNLOCK(be_lun->vn); } else error = EINVAL; if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && be_lun->vn != NULL) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && be_lun->vn == NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; } else { if (be_lun->vn != NULL) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); taskqueue_drain_all(be_lun->io_taskqueue); error = ctl_be_block_close(be_lun); } else error = 0; } if (be_lun->size_blocks != oldsize) ctl_lun_capacity_changed(cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; sx_xunlock(&softc->modify_lock); req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; return (0); bailout_error: sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_ERROR; return (0); } static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun) { struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)cbe_lun; struct ctl_be_block_softc *softc = be_lun->softc; taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); if (be_lun->disk_stats != NULL) devstat_remove_entry(be_lun->disk_stats); nvlist_destroy(be_lun->cbe_lun.options); free(be_lun->dev_path, M_CTLBLK); mtx_destroy(&be_lun->queue_lock); mtx_destroy(&be_lun->io_lock); mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; if (be_lun->flags & CTL_BE_BLOCK_LUN_WAITING) wakeup(be_lun); else free(be_lun, M_CTLBLK); mtx_unlock(&softc->lock); } static int ctl_be_block_config_write(union ctl_io *io) { struct ctl_be_block_lun *be_lun; struct ctl_be_lun *cbe_lun; int retval; DPRINTF("entered\n"); cbe_lun = CTL_BACKEND_LUN(io); be_lun = (struct ctl_be_block_lun *)cbe_lun; retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: case WRITE_SAME_10: case WRITE_SAME_16: case UNMAP: /* * The upper level CTL code will filter out any CDBs with * the immediate bit set and return the proper error. * * We don't really need to worry about what LBA range the * user asked to be synced out. When they issue a sync * cache command, we'll sync out the whole thing. */ mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; struct ctl_lun_req req; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { retval = ctl_be_block_open(be_lun, &req); cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; if (retval == 0) { cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; ctl_lun_has_media(cbe_lun); } else { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; ctl_lun_no_media(cbe_lun); } } ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) { cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; ctl_lun_ejected(cbe_lun); if (be_lun->vn != NULL) ctl_be_block_close(be_lun); } } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_config_read(union ctl_io *io) { struct ctl_be_block_lun *be_lun; int retval = 0; DPRINTF("entered\n"); be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->config_read_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); retval = CTL_RETVAL_QUEUED; break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb) { struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; int retval; retval = sbuf_printf(sb, "\t"); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "%d", lun->num_threads); if (retval != 0) goto bailout; retval = sbuf_printf(sb, "\n"); bailout: return (retval); } static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) { struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; if (lun->getattr == NULL) return (UINT64_MAX); return (lun->getattr(lun, attrname)); } static int ctl_be_block_init(void) { struct ctl_be_block_softc *softc = &backend_block_softc; sx_init(&softc->modify_lock, "ctlblock modify"); mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); SLIST_INIT(&softc->lun_list); return (0); } static int ctl_be_block_shutdown(void) { struct ctl_be_block_softc *softc = &backend_block_softc; struct ctl_be_block_lun *lun; mtx_lock(&softc->lock); while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { SLIST_REMOVE_HEAD(&softc->lun_list, links); softc->num_luns--; /* * Drop our lock here. Since ctl_remove_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_remove_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); uma_zdestroy(softc->bufmin_zone); if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) uma_zdestroy(softc->bufmax_zone); uma_zdestroy(softc->beio_zone); mtx_destroy(&softc->lock); sx_destroy(&softc->modify_lock); return (0); } diff --git a/sys/cam/ctl/ctl_backend_ramdisk.c b/sys/cam/ctl/ctl_backend_ramdisk.c index e67d699bda70..6febdd469bdd 100644 --- a/sys/cam/ctl/ctl_backend_ramdisk.c +++ b/sys/cam/ctl/ctl_backend_ramdisk.c @@ -1,1239 +1,1242 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2003, 2008 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2014-2017 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ */ /* * CAM Target Layer black hole and RAM disk backend. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PRIV(io) \ ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) #define ARGS(io) \ ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) #define PPP (PAGE_SIZE / sizeof(uint8_t **)) #ifdef __LP64__ #define PPPS (PAGE_SHIFT - 3) #else #define PPPS (PAGE_SHIFT - 2) #endif #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) #define P_UNMAPPED NULL /* Page is unmapped. */ #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ typedef enum { GP_READ, /* Return data page or zero page. */ GP_WRITE, /* Return data page, try allocate if none. */ GP_ANCHOR, /* Return data page, try anchor if none. */ GP_OTHER, /* Return what present, do not allocate/anchor. */ } getpage_op_t; typedef enum { CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, CTL_BE_RAMDISK_LUN_WAITING = 0x04 } ctl_be_ramdisk_lun_flags; struct ctl_be_ramdisk_lun { struct ctl_be_lun cbe_lun; /* Must be first element. */ struct ctl_lun_create_params params; int indir; uint8_t **pages; uint8_t *zero_page; struct sx page_lock; u_int pblocksize; u_int pblockmul; uint64_t size_bytes; uint64_t size_blocks; uint64_t cap_bytes; uint64_t cap_used; struct ctl_be_ramdisk_softc *softc; ctl_be_ramdisk_lun_flags flags; SLIST_ENTRY(ctl_be_ramdisk_lun) links; struct taskqueue *io_taskqueue; struct task io_task; STAILQ_HEAD(, ctl_io_hdr) cont_queue; struct mtx_padalign queue_lock; }; struct ctl_be_ramdisk_softc { struct sx modify_lock; struct mtx lock; int num_luns; SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list; }; static struct ctl_be_ramdisk_softc rd_softc; extern struct ctl_softc *control_softc; static int ctl_backend_ramdisk_init(void); static int ctl_backend_ramdisk_shutdown(void); static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr); static void ctl_backend_ramdisk_compare(union ctl_io *io); static void ctl_backend_ramdisk_rw(union ctl_io *io); static int ctl_backend_ramdisk_submit(union ctl_io *io); static void ctl_backend_ramdisk_worker(void *context, int pending); static int ctl_backend_ramdisk_config_read(union ctl_io *io); static int ctl_backend_ramdisk_config_write(union ctl_io *io); static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req); static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun); static struct ctl_backend_driver ctl_be_ramdisk_driver = { .name = "ramdisk", .flags = CTL_BE_FLAG_HAS_CONFIG, .init = ctl_backend_ramdisk_init, .shutdown = ctl_backend_ramdisk_shutdown, .data_submit = ctl_backend_ramdisk_submit, .config_read = ctl_backend_ramdisk_config_read, .config_write = ctl_backend_ramdisk_config_write, .ioctl = ctl_backend_ramdisk_ioctl, .lun_attr = ctl_backend_ramdisk_lun_attr, }; MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk"); CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); static int ctl_backend_ramdisk_init(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; memset(softc, 0, sizeof(*softc)); sx_init(&softc->modify_lock, "ctlrammod"); mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF); SLIST_INIT(&softc->lun_list); return (0); } static int ctl_backend_ramdisk_shutdown(void) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_be_ramdisk_lun *lun; mtx_lock(&softc->lock); while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { SLIST_REMOVE_HEAD(&softc->lun_list, links); softc->num_luns--; /* * Drop our lock here. Since ctl_remove_lun() can call * back into us, this could potentially lead to a recursive * lock of the same mutex, which would cause a hang. */ mtx_unlock(&softc->lock); ctl_remove_lun(&lun->cbe_lun); mtx_lock(&softc->lock); } mtx_unlock(&softc->lock); mtx_destroy(&softc->lock); sx_destroy(&softc->modify_lock); return (0); } static uint8_t * ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, getpage_op_t op) { uint8_t **p, ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) { switch (op) { case GP_READ: return (be_lun->zero_page); case GP_WRITE: return ((uint8_t *)be_lun->pages); case GP_ANCHOR: return (P_ANCHORED); default: return (P_UNMAPPED); } } if (op == GP_WRITE || op == GP_ANCHOR) { sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) { *pp = malloc(PAGE_SIZE, M_RAMDISK, M_WAITOK|M_ZERO); } i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { if (op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } else *pp = P_ANCHORED; be_lun->cap_used += be_lun->pblocksize; } else if (*pp == P_ANCHORED && op == GP_WRITE) { *pp = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); } sx_xunlock(&be_lun->page_lock); return ((uint8_t *)*pp); } else { sx_slock(&be_lun->page_lock); p = be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (p == NULL) break; i = pn >> s; p = (uint8_t **)p[i]; pn -= i << s; } sx_sunlock(&be_lun->page_lock); if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) return (be_lun->zero_page); return ((uint8_t *)p); } }; static void ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_ANCHORED) { be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } else if (*pp != P_UNMAPPED) { free(*pp, M_RAMDISK); be_lun->cap_used -= be_lun->pblocksize; *pp = P_UNMAPPED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) { uint8_t ***pp; off_t i; int s; if (be_lun->cap_bytes == 0) return; sx_xlock(&be_lun->page_lock); pp = &be_lun->pages; for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { if (*pp == NULL) goto noindir; i = pn >> s; pp = (uint8_t ***)&(*pp)[i]; pn -= i << s; } if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { be_lun->cap_used += be_lun->pblocksize; *pp = P_ANCHORED; } else if (*pp != P_ANCHORED) { free(*pp, M_RAMDISK); *pp = P_ANCHORED; } noindir: sx_xunlock(&be_lun->page_lock); }; static void ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) { int i; if (p == NULL) return; if (indir == 0) { free(p, M_RAMDISK); return; } for (i = 0; i < PPP; i++) { if (p[i] == NULL) continue; ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); } free(p, M_RAMDISK); }; static size_t cmp(uint8_t *a, uint8_t *b, size_t size) { size_t i; for (i = 0; i < size; i++) { if (a[i] != b[i]) break; } return (i); } static int ctl_backend_ramdisk_cmp(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint8_t *page; uint8_t info[8]; uint64_t lba; u_int lbaoff, lbas, res, off; lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; lba = ARGS(io)->lba + PRIV(io)->len - lbas; off = 0; for (; lbas > 0; lbas--, lba++) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_READ); lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; res = cmp(io->scsiio.kern_data_ptr + off, page, cbe_lun->blocksize); off += res; if (res < cbe_lun->blocksize) break; } if (lbas > 0) { off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; scsi_u64to8b(off, info); ctl_set_sense(&io->scsiio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_MISCOMPARE, /*asc*/ 0x1D, /*ascq*/ 0x00, /*type*/ SSD_ELEM_INFO, /*size*/ sizeof(info), /*data*/ &info, /*type*/ SSD_ELEM_NONE); return (1); } return (0); } static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io); CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); if (io->scsiio.kern_sg_entries > 0) free(io->scsiio.kern_data_ptr, M_RAMDISK); io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { if (ARGS(io)->flags & CTL_LLF_COMPARE) { /* We have data block ready for comparison. */ if (ctl_backend_ramdisk_cmp(io)) goto done; } if (ARGS(io)->len > PRIV(io)->len) { mtx_lock(&be_lun->queue_lock); STAILQ_INSERT_TAIL(&be_lun->cont_queue, &io->io_hdr, links); mtx_unlock(&be_lun->queue_lock); taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); return (0); } ctl_set_success(&io->scsiio); } done: ctl_data_submit_done(io); return(0); } static void ctl_backend_ramdisk_compare(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); u_int lbas, len; lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, 131072 / cbe_lun->blocksize); len = lbas * cbe_lun->blocksize; io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); io->scsiio.kern_data_len = len; io->scsiio.kern_sg_entries = 0; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; ctl_datamove(io); } static void ctl_backend_ramdisk_rw(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_sg_entry *sg_entries; uint8_t *page; uint64_t lba; u_int i, len, lbaoff, lbas, sgs, off; getpage_op_t op; lba = ARGS(io)->lba + PRIV(io)->len; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); lbas = ARGS(io)->len - PRIV(io)->len; lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; off = lbaoff * cbe_lun->blocksize; op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; if (sgs > 1) { io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * sgs, M_RAMDISK, M_WAITOK); sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; len = lbas * cbe_lun->blocksize; for (i = 0; i < sgs; i++) { page = ctl_backend_ramdisk_getpage(be_lun, (lba >> cbe_lun->pblockexp) + i, op); if (page == P_UNMAPPED || page == P_ANCHORED) { free(io->scsiio.kern_data_ptr, M_RAMDISK); nospc: ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } sg_entries[i].addr = page + off; sg_entries[i].len = MIN(len, be_lun->pblocksize - off); len -= sg_entries[i].len; off = 0; } } else { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, op); if (page == P_UNMAPPED || page == P_ANCHORED) goto nospc; sgs = 0; io->scsiio.kern_data_ptr = page + off; } io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; io->scsiio.kern_sg_entries = sgs; io->io_hdr.flags |= CTL_FLAG_ALLOCATED; PRIV(io)->len += lbas; if ((ARGS(io)->flags & CTL_LLF_READ) && ARGS(io)->len <= PRIV(io)->len) { ctl_set_success(&io->scsiio); - ctl_serseq_done(io); + if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) + ctl_serseq_done(io); } ctl_datamove(io); } static int ctl_backend_ramdisk_submit(union ctl_io *io) { struct ctl_lba_len_flags *lbalen = ARGS(io); if (lbalen->flags & CTL_LLF_VERIFY) { ctl_set_success(&io->scsiio); ctl_data_submit_done(io); return (CTL_RETVAL_COMPLETE); } PRIV(io)->len = 0; if (lbalen->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); return (CTL_RETVAL_COMPLETE); } static void ctl_backend_ramdisk_worker(void *context, int pending) { struct ctl_be_ramdisk_lun *be_lun; union ctl_io *io; be_lun = (struct ctl_be_ramdisk_lun *)context; mtx_lock(&be_lun->queue_lock); for (;;) { io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links); mtx_unlock(&be_lun->queue_lock); if (ARGS(io)->flags & CTL_LLF_COMPARE) ctl_backend_ramdisk_compare(io); else ctl_backend_ramdisk_rw(io); mtx_lock(&be_lun->queue_lock); continue; } /* * If we get here, there is no work left in the queues, so * just break out and let the task queue go to sleep. */ break; } mtx_unlock(&be_lun->queue_lock); } static int ctl_backend_ramdisk_gls(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct scsi_get_lba_status_data *data; uint8_t *page; u_int lbaoff; data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); page = ctl_backend_ramdisk_getpage(be_lun, ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); if (page == P_UNMAPPED) data->descr[0].status = 1; else if (page == P_ANCHORED) data->descr[0].status = 2; else data->descr[0].status = 0; ctl_config_read_done(io); return (CTL_RETVAL_COMPLETE); } static int ctl_backend_ramdisk_config_read(union ctl_io *io) { int retval = 0; switch (io->scsiio.cdb[0]) { case SERVICE_ACTION_IN: if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { retval = ctl_backend_ramdisk_gls(io); break; } ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_read_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static void ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, int anchor) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint8_t *page; uint64_t p, lp; u_int lbaoff; getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; /* Partially zero first partial page. */ p = lba >> cbe_lun->pblockexp; lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); if (lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, p, op); if (page != P_UNMAPPED && page != P_ANCHORED) { memset(page + lbaoff * cbe_lun->blocksize, 0, min(len, be_lun->pblockmul - lbaoff) * cbe_lun->blocksize); } p++; } /* Partially zero last partial page. */ lp = (lba + len) >> cbe_lun->pblockexp; lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); if (p <= lp && lbaoff != 0) { page = ctl_backend_ramdisk_getpage(be_lun, lp, op); if (page != P_UNMAPPED && page != P_ANCHORED) memset(page, 0, lbaoff * cbe_lun->blocksize); } /* Delete remaining full pages. */ if (anchor) { for (; p < lp; p++) ctl_backend_ramdisk_anchorpage(be_lun, p); } else { for (; p < lp; p++) ctl_backend_ramdisk_unmappage(be_lun, p); } } static void ctl_backend_ramdisk_ws(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_lba_len_flags *lbalen = ARGS(io); uint8_t *page; uint64_t lba; u_int lbaoff, lbas; if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } if (lbalen->flags & SWS_UNMAP) { ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, (lbalen->flags & SWS_ANCHOR) != 0); ctl_set_success(&io->scsiio); ctl_config_write_done(io); return; } for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { page = ctl_backend_ramdisk_getpage(be_lun, lba >> cbe_lun->pblockexp, GP_WRITE); if (page == P_UNMAPPED || page == P_ANCHORED) { ctl_set_space_alloc_fail(&io->scsiio); ctl_data_submit_done(io); return; } lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); page += lbaoff * cbe_lun->blocksize; if (lbalen->flags & SWS_NDOB) { memset(page, 0, cbe_lun->blocksize); } else { memcpy(page, io->scsiio.kern_data_ptr, cbe_lun->blocksize); } if (lbalen->flags & SWS_LBDATA) scsi_ulto4b(lba, page); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static void ctl_backend_ramdisk_unmap(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); struct scsi_unmap_desc *buf, *end; if ((ptrlen->flags & ~SU_ANCHOR) != 0) { ctl_set_invalid_field(&io->scsiio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_config_write_done(io); return; } buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (; buf < end; buf++) { ctl_backend_ramdisk_delete(cbe_lun, scsi_8btou64(buf->lba), scsi_4btoul(buf->length), (ptrlen->flags & SU_ANCHOR) != 0); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); } static int ctl_backend_ramdisk_config_write(union ctl_io *io) { struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); int retval = 0; switch (io->scsiio.cdb[0]) { case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: /* We have no cache to flush. */ ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case START_STOP_UNIT: { struct scsi_start_stop_unit *cdb; cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; if ((cdb->how & SSS_PC_MASK) != 0) { ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } if (cdb->how & SSS_START) { if (cdb->how & SSS_LOEJ) ctl_lun_has_media(cbe_lun); ctl_start_lun(cbe_lun); } else { ctl_stop_lun(cbe_lun); if (cdb->how & SSS_LOEJ) ctl_lun_ejected(cbe_lun); } ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; } case PREVENT_ALLOW: ctl_set_success(&io->scsiio); ctl_config_write_done(io); break; case WRITE_SAME_10: case WRITE_SAME_16: ctl_backend_ramdisk_ws(io); break; case UNMAP: ctl_backend_ramdisk_unmap(io); break; default: ctl_set_invalid_opcode(&io->scsiio); ctl_config_write_done(io); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; uint64_t val; val = UINT64_MAX; if (be_lun->cap_bytes == 0) return (val); sx_slock(&be_lun->page_lock); if (strcmp(attrname, "blocksused") == 0) { val = be_lun->cap_used / be_lun->cbe_lun.blocksize; } else if (strcmp(attrname, "blocksavail") == 0) { val = (be_lun->cap_bytes - be_lun->cap_used) / be_lun->cbe_lun.blocksize; } sx_sunlock(&be_lun->page_lock); return (val); } static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_be_ramdisk_softc *softc = &rd_softc; struct ctl_lun_req *lun_req; int retval; retval = 0; switch (cmd) { case CTL_LUN_REQ: lun_req = (struct ctl_lun_req *)addr; switch (lun_req->reqtype) { case CTL_LUNREQ_CREATE: retval = ctl_backend_ramdisk_create(softc, lun_req); break; case CTL_LUNREQ_RM: retval = ctl_backend_ramdisk_rm(softc, lun_req); break; case CTL_LUNREQ_MODIFY: retval = ctl_backend_ramdisk_modify(softc, lun_req); break; default: lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "%s: invalid LUN request type %d", __func__, lun_req->reqtype); break; } break; default: retval = ENOTTY; break; } return (retval); } static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_lun_rm_params *params; int retval; params = &req->reqdata.rm; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) { SLIST_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, links); softc->num_luns--; break; } } mtx_unlock(&softc->lock); sx_xunlock(&softc->modify_lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } /* * Set the waiting flag before we invalidate the LUN. Our shutdown * routine can be called any time after we invalidate the LUN, * and can be called from our context. * * This tells the shutdown routine that we're waiting, or we're * going to wait for the shutdown to happen. */ mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); retval = ctl_remove_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: error %d returned from ctl_remove_lun() for " "LUN %d", __func__, retval, params->lun_id); mtx_lock(&softc->lock); be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; mtx_unlock(&softc->lock); goto bailout_error; } mtx_lock(&softc->lock); while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0); if (retval == EINTR) break; } be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { mtx_unlock(&softc->lock); free(be_lun, M_RAMDISK); } else { mtx_unlock(&softc->lock); return (EINTR); } req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; return (0); } static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_create_params *params; const char *value; char tmpstr[32]; uint64_t t; int retval; retval = 0; params = &req->reqdata.create; be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); cbe_lun = &be_lun->cbe_lun; cbe_lun->options = nvlist_clone(req->args_nvl); be_lun->params = req->reqdata.create; be_lun->softc = softc; if (params->flags & CTL_LUN_FLAG_DEV_TYPE) cbe_lun->lun_type = params->device_type; else cbe_lun->lun_type = T_DIRECT; be_lun->flags = 0; cbe_lun->flags = 0; value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; be_lun->pblocksize = PAGE_SIZE; value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); if (value != NULL) { ctl_expand_number(value, &t); be_lun->pblocksize = t; } if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { snprintf(req->error_str, sizeof(req->error_str), "%s: unsupported pblocksize %u", __func__, be_lun->pblocksize); goto bailout_error; } if (cbe_lun->lun_type == T_DIRECT || cbe_lun->lun_type == T_CDROM) { if (params->blocksize_bytes != 0) cbe_lun->blocksize = params->blocksize_bytes; else if (cbe_lun->lun_type == T_CDROM) cbe_lun->blocksize = 2048; else cbe_lun->blocksize = 512; be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { snprintf(req->error_str, sizeof(req->error_str), "%s: pblocksize %u not exp2 of blocksize %u", __func__, be_lun->pblocksize, cbe_lun->blocksize); goto bailout_error; } if (params->lun_size_bytes < cbe_lun->blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, params->lun_size_bytes, cbe_lun->blocksize); goto bailout_error; } be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; be_lun->indir = 0; t = be_lun->size_bytes / be_lun->pblocksize; while (t > 1) { t /= PPP; be_lun->indir++; } cbe_lun->maxlba = be_lun->size_blocks - 1; cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; cbe_lun->pblockoff = 0; cbe_lun->ublockexp = cbe_lun->pblockexp; cbe_lun->ublockoff = 0; cbe_lun->atomicblock = be_lun->pblocksize; cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; value = dnvlist_get_string(cbe_lun->options, "capacity", NULL); if (value != NULL) ctl_expand_number(value, &be_lun->cap_bytes); } else { be_lun->pblockmul = 1; cbe_lun->pblockexp = 0; } /* Tell the user the blocksize we ended up using */ params->blocksize_bytes = cbe_lun->blocksize; params->lun_size_bytes = be_lun->size_bytes; value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); if (value == NULL || strcmp(value, "off") != 0) cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); if (value != NULL) { if (strcmp(value, "on") == 0) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; } else if (cbe_lun->lun_type != T_DIRECT) cbe_lun->flags |= CTL_LUN_FLAG_READONLY; cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); if (value != NULL && strcmp(value, "on") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_READ; + else if (value != NULL && strcmp(value, "soft") == 0) + cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; else if (value != NULL && strcmp(value, "off") == 0) cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; if (params->flags & CTL_LUN_FLAG_ID_REQ) { cbe_lun->req_lun_id = params->req_lun_id; cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; } else cbe_lun->req_lun_id = 0; cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; cbe_lun->be = &ctl_be_ramdisk_driver; if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", softc->num_luns); strncpy((char *)cbe_lun->serial_num, tmpstr, MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); /* Tell the user what we used for a serial number */ strncpy((char *)params->serial_num, tmpstr, MIN(sizeof(params->serial_num), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->serial_num, params->serial_num, MIN(sizeof(cbe_lun->serial_num), sizeof(params->serial_num))); } if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); strncpy((char *)cbe_lun->device_id, tmpstr, MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); /* Tell the user what we used for a device ID */ strncpy((char *)params->device_id, tmpstr, MIN(sizeof(params->device_id), sizeof(tmpstr))); } else { strncpy((char *)cbe_lun->device_id, params->device_id, MIN(sizeof(cbe_lun->device_id), sizeof(params->device_id))); } STAILQ_INIT(&be_lun->cont_queue); sx_init(&be_lun->page_lock, "ctlram page"); if (be_lun->cap_bytes == 0) { be_lun->indir = 0; be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); } be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK|M_ZERO); mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF); TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, be_lun); be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK, taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); if (be_lun->io_taskqueue == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: Unable to create taskqueue", __func__); goto bailout_error; } retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, /*num threads*/1, /*priority*/PUSER, /*proc*/control_softc->ctl_proc, /*thread name*/"ramdisk"); if (retval != 0) goto bailout_error; retval = ctl_add_lun(&be_lun->cbe_lun); if (retval != 0) { snprintf(req->error_str, sizeof(req->error_str), "%s: ctl_add_lun() returned error %d, see dmesg for " "details", __func__, retval); retval = 0; goto bailout_error; } mtx_lock(&softc->lock); softc->num_luns++; SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); mtx_unlock(&softc->lock); params->req_lun_id = cbe_lun->lun_id; req->status = CTL_LUN_OK; return (retval); bailout_error: req->status = CTL_LUN_ERROR; if (be_lun != NULL) { if (be_lun->io_taskqueue != NULL) taskqueue_free(be_lun->io_taskqueue); nvlist_destroy(cbe_lun->options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); free(be_lun, M_RAMDISK); } return (retval); } static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, struct ctl_lun_req *req) { struct ctl_be_ramdisk_lun *be_lun; struct ctl_be_lun *cbe_lun; struct ctl_lun_modify_params *params; const char *value; uint32_t blocksize; int wasprim; params = &req->reqdata.modify; sx_xlock(&softc->modify_lock); mtx_lock(&softc->lock); SLIST_FOREACH(be_lun, &softc->lun_list, links) { if (be_lun->cbe_lun.lun_id == params->lun_id) break; } mtx_unlock(&softc->lock); if (be_lun == NULL) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN %u is not managed by the ramdisk backend", __func__, params->lun_id); goto bailout_error; } cbe_lun = &be_lun->cbe_lun; if (params->lun_size_bytes != 0) be_lun->params.lun_size_bytes = params->lun_size_bytes; if (req->args_nvl != NULL) { nvlist_destroy(cbe_lun->options); cbe_lun->options = nvlist_clone(req->args_nvl); } wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); if (value != NULL) { if (strcmp(value, "primary") == 0) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; else cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ctl_lun_primary(cbe_lun); else ctl_lun_secondary(cbe_lun); } blocksize = be_lun->cbe_lun.blocksize; if (be_lun->params.lun_size_bytes < blocksize) { snprintf(req->error_str, sizeof(req->error_str), "%s: LUN size %ju < blocksize %u", __func__, be_lun->params.lun_size_bytes, blocksize); goto bailout_error; } be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; be_lun->size_bytes = be_lun->size_blocks * blocksize; be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; ctl_lun_capacity_changed(&be_lun->cbe_lun); /* Tell the user the exact size we ended up using */ params->lun_size_bytes = be_lun->size_bytes; sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_OK; return (0); bailout_error: sx_xunlock(&softc->modify_lock); req->status = CTL_LUN_ERROR; return (0); } static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun) { struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; struct ctl_be_ramdisk_softc *softc = be_lun->softc; taskqueue_drain_all(be_lun->io_taskqueue); taskqueue_free(be_lun->io_taskqueue); nvlist_destroy(be_lun->cbe_lun.options); free(be_lun->zero_page, M_RAMDISK); ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); sx_destroy(&be_lun->page_lock); mtx_destroy(&be_lun->queue_lock); mtx_lock(&softc->lock); be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING) wakeup(be_lun); else free(be_lun, M_RAMDISK); mtx_unlock(&softc->lock); } diff --git a/usr.sbin/ctladm/ctladm.8 b/usr.sbin/ctladm/ctladm.8 index 25380c86441a..b8b90ad200fe 100644 --- a/usr.sbin/ctladm/ctladm.8 +++ b/usr.sbin/ctladm/ctladm.8 @@ -1,1097 +1,1098 @@ .\" .\" Copyright (c) 2003 Silicon Graphics International Corp. -.\" Copyright (c) 2015-2020 Alexander Motin +.\" Copyright (c) 2015-2021 Alexander Motin .\" Copyright (c) 2018 Marcelo Araujo .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions, and the following disclaimer, .\" without modification. .\" 2. Redistributions in binary form must reproduce at minimum a disclaimer .\" substantially similar to the "NO WARRANTY" disclaimer below .\" ("Disclaimer") and any redistribution must be conditioned upon .\" including a substantially similar Disclaimer requirement for further .\" binary redistribution. .\" .\" NO WARRANTY .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR .\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT .\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, .\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING .\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGES. .\" .\" ctladm utility man page. .\" .\" Author: Ken Merry .\" .\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $ .\" $FreeBSD$ .\" -.Dd November 23, 2020 +.Dd March 9, 2021 .Dt CTLADM 8 .Os .Sh NAME .Nm ctladm .Nd CAM Target Layer control utility .Sh SYNOPSIS .Nm .Aq Ar command .Op lun .Op generic args .Op command args .Nm .Ic tur .Aq lun .Op general options .Nm .Ic inquiry .Aq lun .Op general options .Nm .Ic reqsense .Aq lun .Op general options .Nm .Ic reportluns .Aq lun .Op general options .Nm .Ic read .Aq lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic write .Aq lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic readcap .Aq lun .Op general options .Op Fl c Ar cdbsize .Nm .Ic modesense .Aq lun .Aq Fl m Ar page | Fl l .Op Fl P Ar pc .Op Fl d .Op Fl S Ar subpage .Op Fl c Ar size .Nm .Ic start .Aq lun .Op general options .Op Fl i .Op Fl o .Nm .Ic stop .Aq lun .Op general options .Op Fl i .Op Fl o .Nm .Ic synccache .Aq lun .Op general options .Op Fl l Ar lba .Op Fl b Ar blockcount .Op Fl r .Op Fl i .Op Fl c Ar cdbsize .Nm .Ic lunlist .Nm .Ic delay .Aq lun .Aq Fl l Ar datamove|done .Aq Fl t Ar secs .Op Fl T Ar oneshot|cont .Nm .Ic inject .Aq Fl i Ar action .Aq Fl p Ar pattern .Op Fl r Ar lba,len .Op Fl s Ar len fmt Op Ar args .Op Fl c .Op Fl d Ar delete_id .Nm .Ic create .Aq Fl b Ar backend .Op Fl B Ar blocksize .Op Fl d Ar device_id .Op Fl l Ar lun_id .Op Fl o Ar name=value .Op Fl s Ar size_bytes .Op Fl S Ar serial_num .Op Fl t Ar device_type .Nm .Ic remove .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Op Fl o Ar name=value .Nm .Ic modify .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Op Fl o Ar name=value .Aq Fl s Ar size_bytes .Nm .Ic devlist .Op Fl b Ar backend .Op Fl v .Op Fl x .Nm .Ic port .Op Fl c .Op Fl o Ar on|off .Op Fl w Ar wwpn .Op Fl W Ar wwnn .Op Fl O Ar pp|vp .Op Fl p Ar targ_port .Op Fl r Ar targ_port .Op Fl t Ar fe_type .Nm .Ic portlist .Op Fl f Ar frontend .Op Fl i .Op Fl l .Op Fl p Ar targ_port .Op Fl q .Op Fl v .Op Fl x .Nm .Ic lunmap .Aq Fl p Ar targ_port .Op Fl l Ar pLUN .Op Fl L Ar cLUN .Nm .Ic dumpooa .Nm .Ic dumpstructs .Nm .Ic islist .Op Fl v .Op Fl x .Nm .Ic islogout .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic isterminate .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic help .Sh DESCRIPTION The .Nm utility is designed to provide a way to access and control the CAM Target Layer (CTL). It provides a way to send .Tn SCSI commands to the CTL layer, and also provides some meta-commands that utilize .Tn SCSI commands. (For instance, the .Ic lunlist command is implemented using the .Tn SCSI REPORT LUNS and INQUIRY commands.) .Pp The .Nm utility has a number of primary functions, many of which require a device identifier. The device identifier takes the following form: .Bl -tag -width 14n .It lun Specify the LUN number to operate on. .El Many of the primary functions of the .Nm utility take the following optional arguments: .Bl -tag -width 10n .It Fl C Ar retries Specify the number of times to retry a command in the event of failure. .It Fl D Ar device Specify the device to open. This allows opening a device other than the default device, .Pa /dev/cam/ctl , to be opened for sending commands. .It Fl I Ar id Specify the initiator number to use. By default, .Nm will use 7 as the initiator number. .El .Pp Primary commands: .Bl -tag -width 11n .It Ic tur Send the .Tn SCSI TEST UNIT READY command to the device and report whether or not it is ready. .It Ic inquiry Send the .Tn SCSI INQUIRY command to the device and display some of the returned inquiry data. .It Ic reqsense Send the .Tn SCSI REQUEST SENSE command to the device and display the returned sense information. .It Ic reportluns Send the .Tn SCSI REPORT LUNS command to the device and display supported LUNs. .It Ic read Send a .Tn SCSI READ command to the device, and write the requested data to a file or stdout. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the READ. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar datalen Specify the length, in 512 byte blocks, of the READ request. .It Fl f Ar file Specify the destination for the data read by the READ command. Either a filename or .Sq - for stdout may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm from the kernel when doing a read, just execute the command without copying data. This is to be used for performance testing. .El .It Ic write Read data from a file or stdin, and write the data to the device using the .Tn SCSI WRITE command. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the WRITE. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar atalen Specify the length, in 512 byte blocks, of the WRITE request. .It Fl f Ar file Specify the source for the data to be written by the WRITE command. Either a filename or .Sq - for stdin may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm to the kernel when doing a write, just execute the command without copying data. This is to be used for performance testing. .El .It Ic readcap Send the .Tn SCSI READ CAPACITY command to the device and display the device size and device block size. By default, READ CAPACITY(10) is used. If the device returns a maximum LBA of 0xffffffff, however, .Nm will automatically issue a READ CAPACITY(16), which is implemented as a service action of the SERVICE ACTION IN(16) opcode. The user can specify the minimum CDB size with the .Fl c argument. Valid values for the .Fl c option are 10 and 16. If a 10 byte CDB is specified, the request will be automatically reissued with a 16 byte CDB if the maximum LBA returned is 0xffffffff. .It Ic modesense Send a .Tn SCSI MODE SENSE command to the device, and display the requested mode page(s) or page list. .Bl -tag -width 10n .It Fl m Ar page Specify the mode page to display. This option and the .Fl l option are mutually exclusive. One of the two must be specified, though. Mode page numbers may be specified in decimal or hexadecimal. .It Fl l Request that the list of mode pages supported by the device be returned. This option and the .Fl m option are mutually exclusive. One of the two must be specified, though. .It Fl P Ar pc Specify the mode page control value. Possible values are: .Bl -tag -width 2n -compact .It 0 Current values. .It 1 Changeable value bitmask. .It 2 Default values. .It 3 Saved values. .El .It Fl d Disable block descriptors when sending the mode sense request. .It Fl S Ar subpage Specify the subpage used with the mode sense request. .It Fl c Ar cdbsize Specify the CDB size used for the mode sense request. Supported values are 6 and 10. .El .It Ic start Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit set. .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .El .It Ic stop Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit cleared. We use an ordered tag to stop the LUN, so we can guarantee that all pending I/O executes before it is stopped. (CTL guarantees this anyway, but .Nm sends an ordered tag for completeness.) .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .El .It Ic synccache Send the .Tn SCSI SYNCHRONIZE CACHE command to the device. By default, SYNCHRONIZE CACHE(10) is used. If the specified starting LBA is greater than 0xffffffff or the length is greater than 0xffff, though, SYNCHRONIZE CACHE(16) will be used. The 16 byte command will also be used if the user specifies a 16 byte CDB with the .Fl c argument. .Bl -tag -width 14n .It Fl l Ar lba Specify the starting LBA of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl b Ar blockcount Specify the length of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl r Specify relative addressing for the starting LBA. CTL does not support relative addressing, since it only works for linked commands, and CTL does not support linked commands. .It Fl i Tell the target to return status immediately after issuing the SYNCHRONIZE CACHE command rather than waiting for the cache to finish syncing. CTL does not support this bit. .It Fl c Ar cdbsize Specify the minimum CDB size. Valid values are 10 and 16 bytes. .El .It Ic lunlist List all LUNs registered with CTL. Because this command uses the ioctl port, it will only work when the FETDs (Front End Target Drivers) are enabled. This command is the equivalent of doing a REPORT LUNS on one LUN and then an INQUIRY on each LUN in the system. .It Ic delay Delay commands at the given location. There are two places where commands may be delayed currently: before data is transferred .Pq Dq datamove and just prior to sending status to the host .Pq Dq done . One of the two must be supplied as an argument to the .Fl l option. The .Fl t option must also be specified. .Bl -tag -width 12n .It Fl l Ar delayloc Delay command(s) at the specified location. This can either be at the data movement stage (datamove) or prior to command completion (done). .It Fl t Ar delaytime Delay command(s) for the specified number of seconds. This must be specified. If set to 0, it will clear out any previously set delay for this particular location (datamove or done). .It Fl T Ar delaytype Specify the delay type. By default, the .Ic delay option will delay the next command sent to the given LUN. With the .Fl T Ar cont option, every command will be delayed by the specified period of time. With the .Fl T Ar oneshot the next command sent to the given LUN will be delayed and all subsequent commands will be completed normally. This is the default. .El .It Ic inject Inject the specified type of error for the LUN specified, when a command that matches the given pattern is seen. The sense data returned is in either fixed or descriptor format, depending upon the status of the D_SENSE bit in the control mode page (page 0xa) for the LUN. .Pp Errors are only injected for commands that have not already failed for other reasons. By default, only the first command matching the pattern specified is returned with the supplied error. .Pp If the .Fl c flag is specified, all commands matching the pattern will be returned with the specified error until the error injection command is deleted with .Fl d flag. .Bl -tag -width 17n .It Fl i Ar action Specify the error to return: .Bl -tag -width 10n .It aborted Return the next matching command on the specified LUN with the sense key ABORTED COMMAND (0x0b), and the ASC/ASCQ 0x45,0x00 ("Select or reselect failure"). .It mediumerr Return the next matching command on the specified LUN with the sense key MEDIUM ERROR (0x03) and the ASC/ASCQ 0x11,0x00 ("Unrecovered read error") for reads, or ASC/ASCQ 0x0c,0x02 ("Write error - auto reallocation failed") for write errors. .It ua Return the next matching command on the specified LUN with the sense key UNIT ATTENTION (0x06) and the ASC/ASCQ 0x29,0x00 ("POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"). .It custom Return the next matching command on the specified LUN with the supplied sense data. The .Fl s argument must be specified. .El .It Fl p Ar pattern Specify which commands should be returned with the given error. .Bl -tag -width 10n .It read The error should apply to READ(6), READ(10), READ(12), READ(16), etc. .It write The error should apply to WRITE(6), WRITE(10), WRITE(12), WRITE(16), WRITE AND VERIFY(10), etc. .It rw The error should apply to both read and write type commands. .It readcap The error should apply to READ CAPACITY(10) and READ CAPACITY(16) commands. .It tur The error should apply to TEST UNIT READY commands. .It any The error should apply to any command. .El .It Fl r Ar lba,len Specify the starting lba and length of the range of LBAs which should trigger an error. This option is only applies when read and/or write patterns are specified. If used with other command types, the error will never be triggered. .It Fl s Ar len fmt Op Ar args Specify the sense data that is to be returned for custom actions. If the format is .Sq - , len bytes of sense data will be read from standard input and written to the sense buffer. If len is longer than 252 bytes (the maximum allowable .Tn SCSI sense data length), it will be truncated to that length. The sense data format is described in .Xr cam_cdbparse 3 . .It Fl c The error injection should be persistent, instead of happening once. Persistent errors must be deleted with the .Fl d argument. .It Fl d Ar delete_id Delete the specified error injection serial number. The serial number is returned when the error is injected. .El .It Ic port Perform one of several CTL frontend port operations. Either get a list of frontend ports .Pq Fl l , turn one or more frontends on or off .Pq Fl o Ar on|off , or set the World Wide Node Name .Pq Fl w Ar wwnn or World Wide Port Name .Pq Fl W Ar wwpn for a given port. One of .Fl l , .Fl o , or .Fl w or .Fl W must be specified. The WWNN and WWPN may both be specified at the same time, but cannot be combined with enabling/disabling or listing ports. .Bl -tag -width 12n .It Fl c Create new frontend port using free pp and vp=0. .It Fl o Ar on|off Turn the specified CTL frontend ports on or off. If no port number or port type is specified, all ports are turned on or off. .It Fl O Ar pp|vp Specify generic options on the ioctl frontend port. At present, only pp and vp port numbers can be set. .It Fl p Ar targ_port Specify the frontend port number. The port numbers can be found in the frontend port list. .It Fl r Remove port specified with .Pq Fl p Ar targ_port . .It Fl t Ar fe_type Specify the frontend type. Currently defined port types are .Dq fc (Fibre Channel), .Dq scsi (Parallel SCSI), .Dq ioctl (CTL ioctl interface), and .Dq internal (CTL CAM SIM). .It Fl w Ar wwnn Set the World Wide Node Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWNN should be the same across all ports on the system. .It Fl W Ar wwpn Set the World Wide Port Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWPN must be different for every port in the system. .El .It Ic portlist List CTL frontend ports. .Bl -tag -width 12n .It Fl f Ar frontend Specify the frontend type. .It Fl i Report target and connected initiators addresses. .It Fl l Report LUN mapping. .It Fl p Ar targ_port Specify the frontend port number. .It Fl q Omit the header in the port list output. .It Fl v Enable verbose output (report all port options). .It Fl x Output the port list in XML format. .El .It Ic lunmap Change LUN mapping for specified port. If both .Ar pLUN and .Ar cLUN are specified -- LUN will be mapped. If .Ar pLUN is specified, but .Ar cLUN is not -- LUN will be unmapped. If neither .Ar pLUN nor .Ar cLUN are specified -- LUN mapping will be disabled, exposing all CTL LUNs. .Bl -tag -width 12n .It Fl p Ar targ_port Specify the frontend port number. .It Fl l Ar pLUN LUN number visible by specified port. .It Fl L Ar cLUN CTL LUN number. .El .It Ic dumpooa Dump the OOA (Order Of Arrival) queue for each LUN registered with CTL. .It Ic dumpstructs Dump the CTL structures to the console. .It Ic create Create a new LUN. The backend must be specified, and depending upon the backend requested, some of the other options may be required. If the LUN is created successfully, the LUN configuration will be displayed. If LUN creation fails, a message will be displayed describing the failure. .Bl -tag -width 14n .It Fl b Ar backend The .Fl b flag is required. This specifies the name backend to use when creating the LUN. Examples are .Dq ramdisk and .Dq block . .It Fl B Ar blocksize Specify the blocksize of the backend in bytes. .It Fl d Ar device_id Specify the LUN-associated string to use in the .Tn SCSI INQUIRY VPD page 0x83 data. .It Fl l Ar lun_id Request that a particular LUN number be assigned. If the requested LUN number is not available, the request will fail. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. Some backends may allow setting the size (e.g. the ramdisk backend) and for others the size may be implicit (e.g. the block backend). .It Fl S Ar serial_num Specify the serial number to be used in the .Tn SCSI INQUIRY VPD page 0x80 data. .It Fl t Ar device_type Specify the numeric SCSI device type to use when creating the LUN. If this flag is not used, the type of LUN created is backend-specific. Not all LUN types are supported. Currently CTL supports Direct Access (type 0), Processor (type 3) and CD/DVD (type 5) LUNs. The backend requested may or may not support all of the LUN types that CTL supports. .El .It Ic remove Remove a LUN. The backend must be specified, and the LUN number must also be specified. Backend-specific options may also be specified with the .Fl o flag. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be removed. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to remove. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .El .It Ic modify Modify a LUN size. The backend, the LUN number, and the size must be specified. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be modified. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to modify. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. For the .Dq block backend, an .Dq auto keyword may be passed instead; this will make CTL use the size of backing file or device. .El .It Ic devlist Get a list of all configured LUNs. This also includes the LUN size and blocksize, serial number and device ID. .Bl -tag -width 11n .It Fl b Ar backend Specify the backend. This restricts the LUN list to the named backend. Examples are .Dq ramdisk and .Dq block . .It Fl v Be verbose. This will also display any backend-specific LUN attributes in addition to the standard per-LUN information. .It Fl x Dump the raw XML. The LUN list information from the kernel comes in XML format, and this option allows the display of the raw XML data. This option and the .Fl v and .Fl b options are mutually exclusive. If you specify .Fl x , the entire LUN database is displayed in XML format. .El .It Ic islist Get a list of currently running iSCSI sessions. This includes initiator and target names and the unique connection IDs. .Bl -tag -width 11n .It Fl v Verbose mode. .It Fl x Dump the raw XML. The sessions list information from the kernel comes in XML format, and this option allows the display of the raw XML data. .El .It Ic islogout Ask the initiator to log out iSCSI sessions matching criteria. .Bl -tag -width 11n .It Fl a Log out all sessions. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic isterminate Forcibly terminate iSCSI sessions matching criteria. .Bl -tag -width 11n .It Fl a Terminate all sessions. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic help Display .Nm usage information. .El .Sh OPTIONS Number of additional configuration options may be specified for LUNs. Some options are global, others are backend-specific. .Pp Global options: .Bl -tag -width 12n .It Va vendor Specifies LUN vendor string up to 8 chars. .It Va product Specifies LUN product string up to 16 chars. .It Va revision Specifies LUN revision string up to 4 chars. .It Va scsiname Specifies LUN SCSI name string. .It Va eui Specifies LUN EUI-64 identifier. .It Va naa Specifies LUN NAA identifier. .It Va uuid Specifies LUN locally assigned RFC 4122 UUID identifier. EUI, NAA or UUID identifier should be set to UNIQUE value to allow EXTENDED COPY command access the LUN. Non-unique LUN identifiers may lead to data corruption. Some initiators may not support later introduced UUID identifiers. .It Va ident_info Specified LUN identification information (string or 0x + hex). .It Va text_ident_info Specified LUN text identification information (UTF-8 string). .It Va ha_role Setting to "primary" or "secondary" overrides default role of the node in HA cluster, set by kern.cam.ctl.ha_role sysctl. .It Va insecure_tpc Setting to "on" allows EXTENDED COPY command sent to this LUN access other LUNs on this host, not accessible otherwise. This allows to offload copying between different iSCSI targets residing on the same host in trusted environments. .It Va readcache Set to "off", disables read caching for the LUN, if supported by the backend. .It Va readonly Set to "on", blocks all media write operations to the LUN, reporting it as write protected. .It Va removable Set to "on", makes LUN removable. .It Va reordering Set to "unrestricted", allows target to process commands with SIMPLE task attribute in arbitrary order. Any data integrity exposures related to command sequence order shall be explicitly handled by the application client through the selection of appropriate commands and task attributes. The default value is "restricted". It improves data integrity, but may introduce some additional delays. .It Va serseq -Set to "on" to serialize consecutive reads/writes. -Set to "read" to serialize consecutive reads. +Set to "on" to fully serialize consecutive reads/writes. +Set to "read" to fully serialize consecutive reads. +Set to "soft" to slightly serialize consecutive reads. Set to "off" to allow them be issued in parallel. Parallel issue of consecutive operations may confuse logic of the backing file system, hurting performance; but it may improve performance of backing stores without prefetch/write-back. .It Va pblocksize .It Va pblockoffset Specify physical block size and offset of the device. .It Va ublocksize .It Va ublockoffset Specify UNMAP block size and offset of the device. .It Va rpm Specifies medium rotation rate of the device: 0 -- not reported, 1 -- non-rotating (SSD), >1024 -- value in revolutions per minute. .It Va formfactor Specifies nominal form factor of the device: 0 -- not reported, 1 -- 5.25", 2 -- 3.5", 3 -- 2.5", 4 -- 1.8", 5 -- less then 1.8". .It Va temperature .It Va reftemperature Specify current and reference (maximum) temperatures of the device. .It Va provisioning_type When UNMAP support is enabled, this option specifies provisioning type: "resource", "thin" or "unknown". Default value is "thin". Logical units without UNMAP support are reported as fully provisioned. .It Va unmap Setting to "on" or "off" controls UNMAP support for the logical unit. Default value is "on" if supported by the backend. .It Va unmap_max_lba .It Va unmap_max_descr Specify maximum allowed number of LBAs and block descriptors per UNMAP command to report in Block Limits VPD page. .It Va write_same_max_lba Specify maximum allowed number of LBAs per WRITE SAME command to report in Block Limits VPD page. .It Va avail-threshold .It Va used-threshold .It Va pool-avail-threshold .It Va pool-used-threshold Set per-LUN/-pool thin provisioning soft thresholds. LUN will establish UNIT ATTENTION condition if its or pool available space get below configured avail values, or its or pool used space get above configured used values. Pool thresholds are working only for ZVOL-backed LUNs. .It Va writecache Set to "off", disables write caching for the LUN, if supported by the backend. .El .Pp Options specific for block backend: .Bl -tag -width 12n .It Va file Specifies file or device name to use for backing store. .It Va num_threads Specifies number of backend threads to use for this LUN. .El .Pp Options specific for ramdisk backend: .Bl -tag -width 12n .It Va capacity Specifies capacity of backing store (maximum RAM for data). The default value is zero, that disables backing store completely, making all writes go to nowhere, while all reads return zeroes. .El .Sh EXAMPLES .Pp Send a .Tn SCSI TEST UNIT READY command to LUN 1. .Pp .Dl ctladm tur 1 .Pp Display the list of mode pages supported by LUN 1. .Pp .Dl ctladm modesense 1 -l .Pp Display the saved version of the Control mode page (page 10) on LUN 0. Disable fetching block descriptors, and use a 10 byte MODE SENSE command instead of the default 6 byte command. .Pp .Dl ctladm modesense 0 -m 10 -P 3 -d -c 10 .Pp Read the first 512 byte block from LUN 2 and dump it to the file .Bd -literal .Dl ctladm read 2 -l 0 -d 1 -b 512 -f - > foo .Ed .Pp Read 10240 bytes from the file .Pa /tmp/bar and write it to LUN 3. starting at LBA 0xff432140. .Pp .Bd -literal .Dl ctladm write 3 -l 0xff432140 -d 20 -b 512 -f /tmp/bar .Ed .Pp Create a LUN with the .Dq fake ramdisk as a backing store. The LUN will claim to have a size of approximately 10 terabytes, while having no real data store (all written data are lost). .Pp .Dl ctladm create -b ramdisk -s 10485760000000000 .Pp Create a thin provisioned LUN with a ramdisk as a backing store. The LUN will have maximal backing store capacity of 10 gigabytes, while reporting size of 10 terabytes, .Pp .Dl ctladm create -b ramdisk -s 10T -o capacity=10G .Pp Create a LUN using the block backend, specify the ZFS volume .Pa tank/example as the backing store, and specify the .Tn SCSI VPD page 0x80 and 0x83 serial number .Fl ( S ) and device ID .Fl ( d ) . The size of the LUN will be derived from the size of the ZVOL. .Pp .Dl ctladm create -b block -o file=/dev/zvol/tank/example -S MYSERIAL321 -d MYDEVID123 .Pp Use to specify generic options on ioctl frontend port, now it is only possible to set pp and/or vp port number. .Pp .Dl ctladm port -c -O pp=11 -O vp=12 .Pp Remove specified targ_port. .Pp .Dl ctladm port -r -p 4 .Pp .Pp Remove LUN 12, which is handled by the block backend, from the system. .Pp .Dl ctladm remove -b block -l 12 .Pp List configured LUNs in the system, along with their backend and serial number. This works when the Front End Target Drivers are enabled or disabled. .Pp .Dl ctladm devlist .Pp List all LUNs in the system, along with their inquiry data and device type. This only works when the FETDs are enabled, since the commands go through the ioctl port. .Pp .Dl ctladm lunlist .Pp Inject a medium error on LUN 6 for every read that covers the first 512 blocks of the LUN. .Pp .Dl ctladm inject 6 -i mediumerr -p read -r 0,512 -c .Pp Inject a custom error on LUN 6 for the next TEST UNIT READY command only. This will result in a sense key of NOT READY (0x02), and an ASC/ASCQ of 0x04,0x02 ("Logical unit not ready, initializing command required"). .Pp .Bd -literal -offset indent ctladm inject 6 -i custom -p tur -s 18 "f0 0 02 s12 04 02" .Ed .Sh SEE ALSO .Xr cam 3 , .Xr cam_cdbparse 3 , .Xr cam 4 , .Xr ctl 4 , .Xr xpt 4 , .Xr camcontrol 8 , .Xr ctld 8 , .Xr ctlstat 8 .Sh HISTORY The .Nm utility was originally written during the Winter/Spring of 2003 as an interface to CTL. .Sh AUTHORS .An Ken Merry Aq Mt ken@FreeBSD.org