Index: head/sys/cam/ctl/ctl.c =================================================================== --- head/sys/cam/ctl/ctl.c (revision 293349) +++ head/sys/cam/ctl/ctl.c (revision 293350) @@ -1,13776 +1,13785 @@ /*- * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * Copyright (c) 2015 Alexander Motin * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #define _CTL_C #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct copan_debugconf_subpage debugconf_page_default = { DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ DBGCNF_SUBPAGE_CODE, /* subpage */ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ DBGCNF_VERSION, /* page_version */ {CTL_TIME_IO_DEFAULT_SECS>>8, CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ }; const static struct copan_debugconf_subpage debugconf_page_changeable = { DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ DBGCNF_SUBPAGE_CODE, /* subpage */ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 0, /* page_version */ {0xff,0xff}, /* ctl_time_io_secs */ }; const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/0, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; #define CTL_CEM_LEN (sizeof(struct scsi_control_ext_page) - 4) const static struct scsi_control_ext_page control_ext_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_control_ext_page control_ext_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE | SMPH_SPF, /*subpage_code*/0x01, /*page_length*/{CTL_CEM_LEN >> 8, CTL_CEM_LEN}, /*flags*/0, /*prio*/0, /*max_sense*/0 }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_DEXCPT, /*mrie*/0, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 0} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/0, /*mrie*/0, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 0} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct scsi_cddvd_capabilities_page cddvd_page_default = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0x3f, /*caps2*/0x00, /*caps3*/0xf0, /*caps4*/0x00, /*caps5*/0x29, /*caps6*/0x00, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{8, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; const static struct scsi_cddvd_capabilities_page cddvd_page_changeable = { /*page_code*/SMS_CDDVD_CAPS_PAGE, /*page_length*/sizeof(struct scsi_cddvd_capabilities_page) - 2, /*caps1*/0, /*caps2*/0, /*caps3*/0, /*caps4*/0, /*caps5*/0, /*caps6*/0, /*obsolete*/{0, 0}, /*nvol_levels*/{0, 0}, /*buffer_size*/{0, 0}, /*obsolete2*/{0, 0}, /*reserved*/0, /*digital*/0, /*obsolete3*/0, /*copy_management*/0, /*reserved2*/0, /*rotation_control*/0, /*cur_write_speed*/0, /*num_speed_descr*/0, }; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); static int worker_threads = -1; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest); static int ctl_init(void); void ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, struct ctl_be_lun *be_lun); static int ctl_free_lun(struct ctl_lun *lun); static void ctl_create_lun(struct ctl_be_lun *be_lun); static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io); static int ctl_check_blocked(struct ctl_lun *lun); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); static void ctl_failover_lun(union ctl_io *io); static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, ctl_ua_type ua_type); static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type); static int ctl_lun_reset(struct ctl_softc *ctl_softc, union ctl_io *io); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_query_task(union ctl_io *io, int task_set); static int ctl_i_t_nexus_reset(union ctl_io *io); static int ctl_query_async_event(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); static void ctl_datamove_remote_write(union ctl_io *io); static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static void ctl_process_done(union ctl_io *io); static void ctl_lun_thread(void *arg); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx); static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ha_frontend = { .name = "ha", }; static void ctl_ha_datamove(union ctl_io *io) { struct ctl_lun *lun; struct ctl_sg_entry *sgl; union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy, i, j; lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; memset(&msg.dt, 0, sizeof(msg.dt)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[0].addr = (void *)vtophys(io->scsiio.kern_data_ptr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; #endif msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; do_sg_copy = 1; } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before processing. */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list) / sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { #if 0 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[j].addr = sgl[i].addr; } else { /* XXX KDM use busdma here! */ msg.dt.sg_list[j].addr = (void *)vtophys(sgl[i].addr); } #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); msg.dt.sg_list[j].addr = sgl[i].addr; #endif msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; msg.dt.sg_last = (sg_entries_sent >= msg.dt.kern_sg_entries); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.dt) - sizeof(msg.dt.sg_list) + sizeof(struct ctl_sg_entry) * msg.dt.cur_sg_entries, M_WAITOK) > CTL_HA_STATUS_SUCCESS) { io->io_hdr.port_status = 31341; io->scsiio.be_move_done(io); return; } msg.dt.sent_sg_entries = sg_entries_sent; } /* * Officially handover the request from us to peer. * If failover has just happened, then we must return error. * If failover happen just after, then it is not our problem. */ if (lun) mtx_lock(&lun->lun_lock); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { if (lun) mtx_unlock(&lun->lun_lock); io->io_hdr.port_status = 31342; io->scsiio.be_move_done(io); return; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; io->io_hdr.flags |= CTL_FLAG_DMA_INPROG; if (lun) mtx_unlock(&lun->lun_lock); } static void ctl_ha_done(union ctl_io *io) { union ctl_ha_msg msg; if (io->io_hdr.io_type == CTL_IO_SCSI) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.sense_residual = io->scsiio.sense_residual; msg.scsi.residual = io->scsiio.residual; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); } ctl_free_io(io); } static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; ctsio->sense_residual = msg_info->scsi.sense_residual; ctsio->residual = msg_info->scsi.residual; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, msg_info->scsi.sense_len); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } void ctl_isc_announce_lun(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg *msg; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&lun->lun_lock); i = sizeof(msg->lun); if (lun->lun_devid) i += lun->lun_devid->len; i += sizeof(pr_key) * lun->pr_key_count; alloc: mtx_unlock(&lun->lun_lock); msg = malloc(i, M_CTL, M_WAITOK); mtx_lock(&lun->lun_lock); k = sizeof(msg->lun); if (lun->lun_devid) k += lun->lun_devid->len; k += sizeof(pr_key) * lun->pr_key_count; if (i < k) { free(msg, M_CTL); i = k; goto alloc; } bzero(&msg->lun, sizeof(msg->lun)); msg->hdr.msg_type = CTL_MSG_LUN_SYNC; msg->hdr.nexus.targ_lun = lun->lun; msg->hdr.nexus.targ_mapped_lun = lun->lun; msg->lun.flags = lun->flags; msg->lun.pr_generation = lun->pr_generation; msg->lun.pr_res_idx = lun->pr_res_idx; msg->lun.pr_res_type = lun->pr_res_type; msg->lun.pr_key_count = lun->pr_key_count; i = 0; if (lun->lun_devid) { msg->lun.lun_devid_len = lun->lun_devid->len; memcpy(&msg->lun.data[i], lun->lun_devid->data, msg->lun.lun_devid_len); i += msg->lun.lun_devid_len; } for (k = 0; k < CTL_MAX_INITIATORS; k++) { if ((pr_key.pr_key = ctl_get_prkey(lun, k)) == 0) continue; pr_key.pr_iid = k; memcpy(&msg->lun.data[i], &pr_key, sizeof(pr_key)); i += sizeof(pr_key); } mtx_unlock(&lun->lun_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); if (lun->flags & CTL_LUN_PRIMARY_SC) { for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { ctl_isc_announce_mode(lun, -1, lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].subpage); } } } void ctl_isc_announce_port(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; i = sizeof(msg->port) + strlen(port->port_name) + 1; if (port->lun_map) i += sizeof(uint32_t) * CTL_MAX_LUNS; if (port->port_devid) i += port->port_devid->len; if (port->target_devid) i += port->target_devid->len; if (port->init_devid) i += port->init_devid->len; msg = malloc(i, M_CTL, M_WAITOK); bzero(&msg->port, sizeof(msg->port)); msg->hdr.msg_type = CTL_MSG_PORT_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->port.port_type = port->port_type; msg->port.physical_port = port->physical_port; msg->port.virtual_port = port->virtual_port; msg->port.status = port->status; i = 0; msg->port.name_len = sprintf(&msg->port.data[i], "%d:%s", softc->ha_id, port->port_name) + 1; i += msg->port.name_len; if (port->lun_map) { msg->port.lun_map_len = sizeof(uint32_t) * CTL_MAX_LUNS; memcpy(&msg->port.data[i], port->lun_map, msg->port.lun_map_len); i += msg->port.lun_map_len; } if (port->port_devid) { msg->port.port_devid_len = port->port_devid->len; memcpy(&msg->port.data[i], port->port_devid->data, msg->port.port_devid_len); i += msg->port.port_devid_len; } if (port->target_devid) { msg->port.target_devid_len = port->target_devid->len; memcpy(&msg->port.data[i], port->target_devid->data, msg->port.target_devid_len); i += msg->port.target_devid_len; } if (port->init_devid) { msg->port.init_devid_len = port->init_devid->len; memcpy(&msg->port.data[i], port->init_devid->data, msg->port.init_devid_len); i += msg->port.init_devid_len; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->port, sizeof(msg->port) + i, M_WAITOK); free(msg, M_CTL); } void ctl_isc_announce_iid(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; union ctl_ha_msg *msg; int i, l; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max || softc->ha_link != CTL_HA_LINK_ONLINE) return; mtx_lock(&softc->ctl_lock); i = sizeof(msg->iid); l = 0; if (port->wwpn_iid[iid].name) l = strlen(port->wwpn_iid[iid].name) + 1; i += l; msg = malloc(i, M_CTL, M_NOWAIT); if (msg == NULL) { mtx_unlock(&softc->ctl_lock); return; } bzero(&msg->iid, sizeof(msg->iid)); msg->hdr.msg_type = CTL_MSG_IID_SYNC; msg->hdr.nexus.targ_port = port->targ_port; msg->hdr.nexus.initid = iid; msg->iid.in_use = port->wwpn_iid[iid].in_use; msg->iid.name_len = l; msg->iid.wwpn = port->wwpn_iid[iid].wwpn; if (port->wwpn_iid[iid].name) strlcpy(msg->iid.data, port->wwpn_iid[iid].name, l); mtx_unlock(&softc->ctl_lock); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg->iid, i, M_NOWAIT); free(msg, M_CTL); } void ctl_isc_announce_mode(struct ctl_lun *lun, uint32_t initidx, uint8_t page, uint8_t subpage) { struct ctl_softc *softc = lun->ctl_softc; union ctl_ha_msg msg; int i; if (softc->ha_link != CTL_HA_LINK_ONLINE) return; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == page && lun->mode_pages.index[i].subpage == subpage) break; } if (i == CTL_NUM_MODE_PAGES) return; /* Don't try to replicate pages not present on this device. */ if (lun->mode_pages.index[i].page_data == NULL) return; bzero(&msg.mode, sizeof(msg.mode)); msg.hdr.msg_type = CTL_MSG_MODE_SYNC; msg.hdr.nexus.targ_port = initidx / CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.initid = initidx % CTL_MAX_INIT_PER_PORT; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.mode.page_code = page; msg.mode.subpage = subpage; msg.mode.page_len = lun->mode_pages.index[i].page_len; memcpy(msg.mode.data, lun->mode_pages.index[i].page_data, msg.mode.page_len); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.mode, sizeof(msg.mode), M_WAITOK); } static void ctl_isc_ha_link_up(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_ha_msg msg; int i; /* Announce this node parameters to peer for validation. */ msg.login.msg_type = CTL_MSG_LOGIN; msg.login.version = CTL_HA_VERSION; msg.login.ha_mode = softc->ha_mode; msg.login.ha_id = softc->ha_id; msg.login.max_luns = CTL_MAX_LUNS; msg.login.max_ports = CTL_MAX_PORTS; msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login), M_WAITOK); STAILQ_FOREACH(port, &softc->port_list, links) { ctl_isc_announce_port(port); for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use) ctl_isc_announce_iid(port, i); } } STAILQ_FOREACH(lun, &softc->lun_list, links) ctl_isc_announce_lun(lun); } static void ctl_isc_ha_link_down(struct ctl_softc *softc) { struct ctl_port *port; struct ctl_lun *lun; union ctl_io *io; int i; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PEER_SC_PRIMARY) { lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); io = ctl_alloc_io(softc->othersc_pool); mtx_lock(&softc->ctl_lock); ctl_zero_io(io); io->io_hdr.msg_type = CTL_MSG_FAILOVER; io->io_hdr.nexus.targ_mapped_lun = lun->lun; ctl_enqueue_isc(io); } STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port >= softc->port_min && port->targ_port < softc->port_max) continue; port->status &= ~CTL_PORT_STATUS_ONLINE; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { port->wwpn_iid[i].in_use = 0; free(port->wwpn_iid[i].name, M_CTL); port->wwpn_iid[i].name = NULL; } } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; uint32_t iid = ctl_get_initindex(&msg->hdr.nexus); mtx_lock(&softc->ctl_lock); if (msg->hdr.nexus.targ_lun < CTL_MAX_LUNS && (lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) != NULL) { mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (msg->ua.ua_type == CTL_UA_THIN_PROV_THRES && msg->ua.ua_set) memcpy(lun->ua_tpt_info, msg->ua.ua_info, 8); if (msg->ua.ua_all) { if (msg->ua.ua_set) ctl_est_ua_all(lun, iid, msg->ua.ua_type); else ctl_clr_ua_all(lun, iid, msg->ua.ua_type); } else { if (msg->ua.ua_set) ctl_est_ua(lun, iid, msg->ua.ua_type); else ctl_clr_ua(lun, iid, msg->ua.ua_type); } mtx_unlock(&lun->lun_lock); } else mtx_unlock(&softc->ctl_lock); } static void ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; struct ctl_ha_msg_lun_pr_key pr_key; int i, k; ctl_lun_flags oflags; uint32_t targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || ((lun = softc->ctl_luns[targ_lun]) == NULL)) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } i = (lun->lun_devid != NULL) ? lun->lun_devid->len : 0; if (msg->lun.lun_devid_len != i || (i > 0 && memcmp(&msg->lun.data[0], lun->lun_devid->data, i) != 0)) { mtx_unlock(&lun->lun_lock); printf("%s: Received conflicting HA LUN %d\n", __func__, msg->hdr.nexus.targ_lun); return; } else { /* Record whether peer is primary. */ oflags = lun->flags; if ((msg->lun.flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_DISABLED) == 0) lun->flags |= CTL_LUN_PEER_SC_PRIMARY; else lun->flags &= ~CTL_LUN_PEER_SC_PRIMARY; if (oflags != lun->flags) ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); /* If peer is primary and we are not -- use data */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY)) { lun->pr_generation = msg->lun.pr_generation; lun->pr_res_idx = msg->lun.pr_res_idx; lun->pr_res_type = msg->lun.pr_res_type; lun->pr_key_count = msg->lun.pr_key_count; for (k = 0; k < CTL_MAX_INITIATORS; k++) ctl_clr_prkey(lun, k); for (k = 0; k < msg->lun.pr_key_count; k++) { memcpy(&pr_key, &msg->lun.data[i], sizeof(pr_key)); ctl_alloc_prkey(lun, pr_key.pr_iid); ctl_set_prkey(lun, pr_key.pr_iid, pr_key.pr_key); i += sizeof(pr_key); } } mtx_unlock(&lun->lun_lock); CTL_DEBUG_PRINT(("%s: Known LUN %d, peer is %s\n", __func__, msg->hdr.nexus.targ_lun, (msg->lun.flags & CTL_LUN_PRIMARY_SC) ? "primary" : "secondary")); /* If we are primary but peer doesn't know -- notify */ if ((lun->flags & CTL_LUN_PRIMARY_SC) && (msg->lun.flags & CTL_LUN_PEER_SC_PRIMARY) == 0) ctl_isc_announce_lun(lun); } } static void ctl_isc_port_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; struct ctl_lun *lun; int i, new; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { CTL_DEBUG_PRINT(("%s: New port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 1; port = malloc(sizeof(*port), M_CTL, M_WAITOK | M_ZERO); port->frontend = &ha_frontend; port->targ_port = msg->hdr.nexus.targ_port; port->fe_datamove = ctl_ha_datamove; port->fe_done = ctl_ha_done; } else if (port->frontend == &ha_frontend) { CTL_DEBUG_PRINT(("%s: Updated port %d\n", __func__, msg->hdr.nexus.targ_port)); new = 0; } else { printf("%s: Received conflicting HA port %d\n", __func__, msg->hdr.nexus.targ_port); return; } port->port_type = msg->port.port_type; port->physical_port = msg->port.physical_port; port->virtual_port = msg->port.virtual_port; port->status = msg->port.status; i = 0; free(port->port_name, M_CTL); port->port_name = strndup(&msg->port.data[i], msg->port.name_len, M_CTL); i += msg->port.name_len; if (msg->port.lun_map_len != 0) { if (port->lun_map == NULL) port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, M_CTL, M_WAITOK); memcpy(port->lun_map, &msg->port.data[i], sizeof(uint32_t) * CTL_MAX_LUNS); i += msg->port.lun_map_len; } else { free(port->lun_map, M_CTL); port->lun_map = NULL; } if (msg->port.port_devid_len != 0) { if (port->port_devid == NULL || port->port_devid->len != msg->port.port_devid_len) { free(port->port_devid, M_CTL); port->port_devid = malloc(sizeof(struct ctl_devid) + msg->port.port_devid_len, M_CTL, M_WAITOK); } memcpy(port->port_devid->data, &msg->port.data[i], msg->port.port_devid_len); port->port_devid->len = msg->port.port_devid_len; i += msg->port.port_devid_len; } else { free(port->port_devid, M_CTL); port->port_devid = NULL; } if (msg->port.target_devid_len != 0) { if (port->target_devid == NULL || port->target_devid->len != msg->port.target_devid_len) { free(port->target_devid, M_CTL); port->target_devid = malloc(sizeof(struct ctl_devid) + msg->port.target_devid_len, M_CTL, M_WAITOK); } memcpy(port->target_devid->data, &msg->port.data[i], msg->port.target_devid_len); port->target_devid->len = msg->port.target_devid_len; i += msg->port.target_devid_len; } else { free(port->target_devid, M_CTL); port->target_devid = NULL; } if (msg->port.init_devid_len != 0) { if (port->init_devid == NULL || port->init_devid->len != msg->port.init_devid_len) { free(port->init_devid, M_CTL); port->init_devid = malloc(sizeof(struct ctl_devid) + msg->port.init_devid_len, M_CTL, M_WAITOK); } memcpy(port->init_devid->data, &msg->port.data[i], msg->port.init_devid_len); port->init_devid->len = msg->port.init_devid_len; i += msg->port.init_devid_len; } else { free(port->init_devid, M_CTL); port->init_devid = NULL; } if (new) { if (ctl_port_register(port) != 0) { printf("%s: ctl_port_register() failed with error\n", __func__); } } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_INQ_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); } static void ctl_isc_iid_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_port *port; int iid; port = softc->ctl_ports[msg->hdr.nexus.targ_port]; if (port == NULL) { printf("%s: Received IID for unknown port %d\n", __func__, msg->hdr.nexus.targ_port); return; } iid = msg->hdr.nexus.initid; port->wwpn_iid[iid].in_use = msg->iid.in_use; port->wwpn_iid[iid].wwpn = msg->iid.wwpn; free(port->wwpn_iid[iid].name, M_CTL); if (msg->iid.name_len) { port->wwpn_iid[iid].name = strndup(&msg->iid.data[0], msg->iid.name_len, M_CTL); } else port->wwpn_iid[iid].name = NULL; } static void ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { if (msg->login.version != CTL_HA_VERSION) { printf("CTL HA peers have different versions %d != %d\n", msg->login.version, CTL_HA_VERSION); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_mode != softc->ha_mode) { printf("CTL HA peers have different ha_mode %d != %d\n", msg->login.ha_mode, softc->ha_mode); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.ha_id == softc->ha_id) { printf("CTL HA peers have same ha_id %d\n", msg->login.ha_id); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } if (msg->login.max_luns != CTL_MAX_LUNS || msg->login.max_ports != CTL_MAX_PORTS || msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) { printf("CTL HA peers have different limits\n"); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); return; } } static void ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len) { struct ctl_lun *lun; int i; uint32_t initidx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || ((lun = softc->ctl_luns[targ_lun]) == NULL)) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) == msg->mode.page_code && lun->mode_pages.index[i].subpage == msg->mode.subpage) break; } if (i == CTL_NUM_MODE_PAGES) { mtx_unlock(&lun->lun_lock); return; } memcpy(lun->mode_pages.index[i].page_data, msg->mode.data, lun->mode_pages.index[i].page_len); initidx = ctl_get_initindex(&msg->hdr.nexus); if (initidx != -1) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc = control_softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; CTL_DEBUG_PRINT(("CTL: Isc Msg event %d\n", event)); if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg *msg, msgbuf; if (param > sizeof(msgbuf)) msg = malloc(param, M_CTL, M_WAITOK); else msg = &msgbuf; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, msg, param, M_WAITOK); if (isc_status != CTL_HA_STATUS_SUCCESS) { printf("%s: Error receiving message: %d\n", __func__, isc_status); if (msg != &msgbuf) free(msg, M_CTL); return; } CTL_DEBUG_PRINT(("CTL: msg_type %d\n", msg->msg_type)); switch (msg->hdr.msg_type) { case CTL_MSG_SERIALIZE: io = ctl_alloc_io(softc->othersc_pool); ctl_zero_io(io); // populate ctsio from msg io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.original_sc = msg->hdr.original_sc; io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode != CTL_HA_MODE_XFER) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg->hdr.nexus; #if 0 printf("port %u, iid %u, lun %u\n", io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_lun); #endif io->scsiio.tag_num = msg->scsi.tag_num; io->scsiio.tag_type = msg->scsi.tag_type; #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ io->scsiio.cdb_len = msg->scsi.cdb_len; memcpy(io->scsiio.cdb, msg->scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.serializing_sc = msg->hdr.serializing_sc; if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.status = msg->hdr.status; if (msg->dt.sg_sequence == 0) { #ifdef CTL_TIME_IO getbinuptime(&io->io_hdr.dma_start_bt); #endif i = msg->dt.kern_sg_entries + msg->dt.kern_data_len / CTL_HA_DATAMOVE_SEGMENT + 1; sgl = malloc(sizeof(*sgl) * i, M_CTL, M_WAITOK | M_ZERO); io->io_hdr.remote_sglist = sgl; io->io_hdr.local_sglist = &sgl[msg->dt.kern_sg_entries]; io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg->dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg->dt.kern_sg_entries; io->scsiio.kern_data_len = msg->dt.kern_data_len; io->scsiio.kern_total_len = msg->dt.kern_total_len; io->scsiio.kern_data_resid = msg->dt.kern_data_resid; io->scsiio.kern_rel_offset = msg->dt.kern_rel_offset; io->io_hdr.flags &= ~CTL_FLAG_BUS_ADDR; io->io_hdr.flags |= msg->dt.flags & CTL_FLAG_BUS_ADDR; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg->dt.sent_sg_entries, j = 0; i < (msg->dt.sent_sg_entries + msg->dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg->dt.sg_list[j].addr; sgl[i].len = msg->dt.sg_list[j].len; #if 0 printf("%s: DATAMOVE: %p,%lu j=%d, i=%d\n", __func__, sgl[i].addr, sgl[i].len, j, i); #endif } /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg->dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg->hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.flags &= ~CTL_FLAG_DMA_INPROG; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.port_status = msg->scsi.fetd_status; io->scsiio.residual = msg->scsi.residual; if (msg->hdr.status != CTL_STATUS_NONE) { io->io_hdr.status = msg->hdr.status; io->scsiio.scsi_status = msg->scsi.scsi_status; io->scsiio.sense_len = msg->scsi.sense_len; io->scsiio.sense_residual =msg->scsi.sense_residual; memcpy(&io->scsiio.sense_data, &msg->scsi.sense_data, msg->scsi.sense_len); if (msg->hdr.status == CTL_SUCCESS) io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); break; } io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.serializing_sc = msg->hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, msg); else ctl_isc_handler_finish_ser_only(softc, msg); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg->hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(msg, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg->hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg->hdr.nexus; taskio->task_action = msg->task.task_action; taskio->tag_num = msg->task.tag_num; taskio->tag_type = msg->task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbinuptime(&taskio->io_hdr.start_bt); #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io( softc->othersc_pool); ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; presio->io_hdr.nexus = msg->hdr.nexus; presio->pr_msg = msg->pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_UA: ctl_isc_ua(softc, msg, param); break; case CTL_MSG_PORT_SYNC: ctl_isc_port_sync(softc, msg, param); break; case CTL_MSG_LUN_SYNC: ctl_isc_lun_sync(softc, msg, param); break; case CTL_MSG_IID_SYNC: ctl_isc_iid_sync(softc, msg, param); break; case CTL_MSG_LOGIN: ctl_isc_login(softc, msg, param); break; case CTL_MSG_MODE_SYNC: ctl_isc_mode_sync(softc, msg, param); break; default: printf("Received HA message of unknown type %d\n", msg->hdr.msg_type); ctl_ha_msg_abort(CTL_HA_CHAN_CTL); break; } if (msg != &msgbuf) free(msg, M_CTL); } else if (event == CTL_HA_EVT_LINK_CHANGE) { printf("CTL: HA link status changed from %d to %d\n", softc->ha_link, param); if (param == softc->ha_link) return; if (softc->ha_link == CTL_HA_LINK_ONLINE) { softc->ha_link = param; ctl_isc_ha_link_down(softc); } else { softc->ha_link = param; if (softc->ha_link == CTL_HA_LINK_ONLINE) ctl_isc_ha_link_up(softc); } return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { memcpy(&dest->scsiio.sense_data, &src->scsi.sense_data, src->scsi.sense_len); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_copy_sense_data_back(union ctl_io *src, union ctl_ha_msg *dest) { memcpy(&dest->scsi.sense_data, &src->scsiio.sense_data, src->scsiio.sense_len); dest->scsi.scsi_status = src->scsiio.scsi_status; dest->scsi.sense_len = src->scsiio.sense_len; dest->hdr.status = src->io_hdr.status; } void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } void ctl_est_ua_port(struct ctl_lun *lun, int port, uint32_t except, ctl_ua_type ua) { int i; mtx_assert(&lun->lun_lock, MA_OWNED); if (lun->pending_ua[port] == NULL) return; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port * CTL_MAX_INIT_PER_PORT + i == except) continue; lun->pending_ua[port][i] |= ua; } } void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) ctl_est_ua_port(lun, i, except, ua); } void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; ctl_ua_type *pu; if (initidx < softc->init_min || initidx >= softc->init_max) return; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { struct ctl_softc *softc = lun->ctl_softc; int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = softc->port_min; i < softc->port_max; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } void ctl_clr_ua_allluns(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, ua_type); mtx_unlock(&lun->lun_lock); } } static int ctl_ha_role_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; struct ctl_lun_req ireq; int error, value; value = (softc->flags & CTL_FLAG_ACTIVE_SHELF) ? 0 : 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_unlock(&softc->ctl_lock); bzero(&ireq, sizeof(ireq)); ireq.reqtype = CTL_LUNREQ_MODIFY; ireq.reqdata.modify.lun_id = lun->lun; lun->backend->ioctl(NULL, CTL_LUN_REQ, (caddr_t)&ireq, 0, curthread); if (ireq.status != CTL_LUN_OK) { printf("%s: CTL_LUNREQ_MODIFY returned %d '%s'\n", __func__, ireq.status, ireq.error_str); } mtx_lock(&softc->ctl_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { + struct make_dev_args args; struct ctl_softc *softc; void *other_pool; int i, error; softc = control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); - softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, - "cam/ctl"); - softc->dev->si_drv1 = softc; + make_dev_args_init(&args); + args.mda_devsw = &ctl_cdevsw; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = softc; + error = make_dev_s(&args, &softc->dev, "cam/ctl"); + if (error != 0) { + free(control_softc, M_DEVBUF); + return (error); + } sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(control_softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->flags = 0; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0, "HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)"); /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_cnt = CTL_MAX_PORTS; softc->port_min = 0; } else { softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES; softc->port_min = (softc->ha_id - 1) * softc->port_cnt; } softc->port_max = softc->port_min + softc->port_cnt; softc->init_min = softc->port_min * CTL_MAX_INIT_PER_PORT; softc->init_max = softc->port_max * CTL_MAX_INIT_PER_PORT; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_link", CTLFLAG_RD, (int *)&softc->ha_link, 0, "HA link state (0 - offline, 1 - unknown, 2 - online)"); STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->pending_lun_queue); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &other_pool) != 0) { printf("ctl: can't allocate %d entry other SC pool, " "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); return (ENOMEM); } softc->othersc_pool = other_pool; if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); ctl_pool_free(other_pool); return (error); } } error = kproc_kthread_add(ctl_lun_thread, softc, &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); if (error != 0) { printf("error creating CTL lun thread!\n"); ctl_pool_free(other_pool); return (error); } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); ctl_pool_free(other_pool); return (error); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_role", CTLTYPE_INT | CTLFLAG_RWTUN, softc, 0, ctl_ha_role_sysctl, "I", "HA role for this head"); if (softc->is_single == 0) { ctl_frontend_register(&ha_frontend); if (ctl_ha_msg_init(softc) != CTL_HA_STATUS_SUCCESS) { printf("ctl_init: ctl_ha_msg_init failed.\n"); softc->is_single = 1; } else if (ctl_ha_msg_register(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { printf("ctl_init: ctl_ha_msg_register failed.\n"); softc->is_single = 1; } } return (0); } void ctl_shutdown(void) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun, *next_lun; if (softc->is_single == 0) { ctl_ha_msg_shutdown(softc); if (ctl_ha_msg_deregister(CTL_HA_CHAN_CTL) != CTL_HA_STATUS_SUCCESS) printf("%s: ctl_ha_msg_deregister failed.\n", __func__); if (ctl_ha_msg_destroy(softc) != CTL_HA_STATUS_SUCCESS) printf("%s: ctl_ha_msg_destroy failed.\n", __func__); ctl_frontend_deregister(&ha_frontend); } mtx_lock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) ctl_free_lun(lun); mtx_unlock(&softc->ctl_lock); #if 0 ctl_shutdown_thread(softc->work_thread); mtx_destroy(&softc->queue_lock); #endif ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); destroy_dev(softc->dev); sysctl_ctx_free(&softc->sysctl_ctx); free(control_softc, M_DEVBUF); control_softc = NULL; } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = port->ctl_softc; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); port->wwpn_iid[iid].in_use--; port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = port->ctl_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); /* * XXX KDM clear have_ca and ua_pending on each LUN for * this initiator. */ } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); ctl_isc_announce_iid(port, iid); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = control_softc; union ctl_ha_msg msg_info; struct ctl_port *port; struct ctl_lun *lun; const struct ctl_cmd_entry *entry; int retval = 0; uint32_t targ_lun; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); /* Make sure that we know about this port. */ port = ctl_io_port(&ctsio->io_hdr); if (port == NULL || (port->status & CTL_PORT_STATUS_ONLINE) == 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 1); goto badjuju; } /* Make sure that we know about this LUN. */ if ((targ_lun < CTL_MAX_LUNS) && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; } } else { mtx_unlock(&softc->ctl_lock); lun = NULL; } if (lun == NULL) { /* * The other node would not send this request to us unless * received announce that we are primary node for this LUN. * If this LUN does not exist now, it is probably result of * a race, so respond to initiator in the most opaque way. */ ctl_set_busy(ctsio); goto badjuju; } entry = ctl_get_cmd_entry(ctsio, NULL); if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); goto badjuju; } ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->idle_time += getsbinuptime() - lun->last_busy; #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); mtx_unlock(&lun->lun_lock); } else { ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_WAITOK); } break; case CTL_ACTION_OVERLAP: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); goto badjuju; case CTL_ACTION_OVERLAP_TAG: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num); goto badjuju; case CTL_ACTION_ERROR: default: TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); badjuju: ctl_copy_sense_data_back((union ctl_io *)ctsio, &msg_info); msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi), M_WAITOK); retval = 1; break; } return (retval); } /* * Returns 0 for success, errno for failure. */ static void ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { union ctl_io *io; mtx_lock(&lun->lun_lock); for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links)) { struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) continue; entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.flags & CTL_FLAG_BLOCKED) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; } mtx_unlock(&lun->lun_lock); } static void * ctl_copyin_alloc(void *user_addr, int len, char *error_str, size_t error_str_len) { void *kptr; kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); if (copyin(user_addr, kptr, len) != 0) { snprintf(error_str, error_str_len, "Error copying %d bytes " "from user address %p to kernel address %p", len, user_addr, kptr); free(kptr, M_CTL); return (NULL); } return (kptr); } static void ctl_free_args(int num_args, struct ctl_be_arg *args) { int i; if (args == NULL) return; for (i = 0; i < num_args; i++) { free(args[i].kname, M_CTL); free(args[i].kvalue, M_CTL); } free(args, M_CTL); } static struct ctl_be_arg * ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, char *error_str, size_t error_str_len) { struct ctl_be_arg *args; int i; args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), error_str, error_str_len); if (args == NULL) goto bailout; for (i = 0; i < num_args; i++) { args[i].kname = NULL; args[i].kvalue = NULL; } for (i = 0; i < num_args; i++) { uint8_t *tmpptr; args[i].kname = ctl_copyin_alloc(args[i].name, args[i].namelen, error_str, error_str_len); if (args[i].kname == NULL) goto bailout; if (args[i].kname[args[i].namelen - 1] != '\0') { snprintf(error_str, error_str_len, "Argument %d " "name is not NUL-terminated", i); goto bailout; } if (args[i].flags & CTL_BEARG_RD) { tmpptr = ctl_copyin_alloc(args[i].value, args[i].vallen, error_str, error_str_len); if (tmpptr == NULL) goto bailout; if ((args[i].flags & CTL_BEARG_ASCII) && (tmpptr[args[i].vallen - 1] != '\0')) { snprintf(error_str, error_str_len, "Argument " "%d value is not NUL-terminated", i); goto bailout; } args[i].kvalue = tmpptr; } else { args[i].kvalue = malloc(args[i].vallen, M_CTL, M_WAITOK | M_ZERO); } } return (args); bailout: ctl_free_args(num_args, args); return (NULL); } static void ctl_copyout_args(int num_args, struct ctl_be_arg *args) { int i; for (i = 0; i < num_args; i++) { if (args[i].flags & CTL_BEARG_WR) copyout(args[i].kvalue, args[i].value, args[i].vallen); } } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc = dev->si_drv1; struct ctl_lun *lun; int retval; retval = 0; switch (cmd) { case CTL_IO: retval = ctl_ioctl_io(dev, cmd, addr, flag, td); break; case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; if (port->targ_port < softc->port_min || port->targ_port >= softc->port_max) continue; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action == 0) continue; /* * XXX KDM we have to drop the lock here, because * the online/offline operations can potentially * block. We need to reference count the frontends * so they can't go away, */ if (cmd == CTL_ENABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_online(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_DISABLE_PORT) { mtx_unlock(&softc->ctl_lock); ctl_port_offline(port); mtx_lock(&softc->ctl_lock); } else if (cmd == CTL_SET_PORT_WWNS) { ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbinuptime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if ((delay_info->lun_id >= CTL_MAX_LUNS) || (softc->ctl_luns[delay_info->lun_id] == NULL)) { delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; } else { lun = softc->ctl_luns[delay_info->lun_id]; mtx_lock(&lun->lun_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: break; case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } case CTL_GETSTATS: { struct ctl_stats *stats; int i; stats = (struct ctl_stats *)addr; if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; stats->num_luns = softc->num_luns; break; } /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ i = 0; STAILQ_FOREACH(lun, &softc->lun_list, links) { retval = copyout(&lun->stats, &stats->lun_stats[i++], sizeof(lun->stats)); if (retval != 0) break; } stats->num_luns = softc->num_luns; stats->fill_len = sizeof(struct ctl_lun_io_stats) * softc->num_luns; stats->status = CTL_SS_OK; #ifdef CTL_TIME_IO stats->flags = CTL_STATS_FLAG_TIME_VALID; #else stats->flags = CTL_STATS_FLAG_NONE; #endif getnanouptime(&stats->timestamp); break; } case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[err_desc->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[delete_desc->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int i, j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); for (i = 0; i < CTL_MAX_LUNS; i++) { lun = softc->ctl_luns[i]; if ((lun == NULL) || ((lun->flags & CTL_LUN_DISABLED) != 0)) continue; for (j = 0; j < CTL_MAX_PORTS; j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %d port %d iid %d key " "%#jx\n", i, j, k, (uintmax_t)lun->pr_keys[j][k]); } } } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; lun_req = (struct ctl_lun_req *)addr; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->num_be_args > 0) { lun_req->kern_be_args = ctl_copyin_args( lun_req->num_be_args, lun_req->be_args, lun_req->error_str, sizeof(lun_req->error_str)); if (lun_req->kern_be_args == NULL) { lun_req->status = CTL_LUN_ERROR; break; } } retval = backend->ioctl(dev, cmd, addr, flag, td); if (lun_req->num_be_args > 0) { ctl_copyout_args(lun_req->num_be_args, lun_req->kern_be_args); ctl_free_args(lun_req->num_be_args, lun_req->kern_be_args); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun_list *list; struct ctl_option *opt; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &lun->be_lun->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; req = (struct ctl_req *)addr; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->num_args > 0) { req->kern_args = ctl_copyin_args(req->num_args, req->args, req->error_str, sizeof(req->error_str)); if (req->kern_args == NULL) { req->status = CTL_LUN_ERROR; break; } } if (fe->ioctl) retval = fe->ioctl(dev, cmd, addr, flag, td); else retval = ENODEV; if (req->num_args > 0) { ctl_copyout_args(req->num_args, req->kern_args); ctl_free_args(req->num_args, req->kern_args); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; struct ctl_option *opt; int j; uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &port->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } if (port->lun_map != NULL) { sbuf_printf(sb, "\ton\n"); for (j = 0; j < CTL_MAX_LUNS; j++) { plun = ctl_lun_map_from_port(port, j); if (plun >= CTL_MAX_LUNS) continue; sbuf_printf(sb, "\t%u\n", j, plun); } } for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_LUN_MAP: { struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; struct ctl_port *port; mtx_lock(&softc->ctl_lock); if (lm->port < softc->port_min || lm->port >= softc->port_max || (port = softc->ctl_ports[lm->port]) == NULL) { mtx_unlock(&softc->ctl_lock); return (ENXIO); } if (port->status & CTL_PORT_STATUS_ONLINE) { STAILQ_FOREACH(lun, &softc->lun_list, links) { if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; mtx_lock(&lun->lun_lock); ctl_est_ua_port(lun, lm->port, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); // XXX: port_enable sleeps if (lm->plun < CTL_MAX_LUNS) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_unset(port, lm->plun); else if (lm->lun < CTL_MAX_LUNS && softc->ctl_luns[lm->lun] != NULL) retval = ctl_lun_map_set(port, lm->plun, lm->lun); else return (ENXIO); } else if (lm->plun == UINT32_MAX) { if (lm->lun == UINT32_MAX) retval = ctl_lun_map_deinit(port); else retval = ctl_lun_map_init(port); } else return (ENXIO); if (port->status & CTL_PORT_STATUS_ONLINE) ctl_isc_announce_port(port); break; } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { return (nexus->initid + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } int ctl_lun_map_init(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; uint32_t i; if (port->lun_map == NULL) port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, M_CTL, M_NOWAIT); if (port->lun_map == NULL) return (ENOMEM); for (i = 0; i < CTL_MAX_LUNS; i++) port->lun_map[i] = UINT32_MAX; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_disable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_disable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_deinit(struct ctl_port *port) { struct ctl_softc *softc = port->ctl_softc; struct ctl_lun *lun; if (port->lun_map == NULL) return (0); free(port->lun_map, M_CTL); port->lun_map = NULL; if (port->status & CTL_PORT_STATUS_ONLINE) { if (port->lun_enable != NULL) { STAILQ_FOREACH(lun, &softc->lun_list, links) port->lun_enable(port->targ_lun_arg, lun->lun); } ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) { int status; uint32_t old; if (port->lun_map == NULL) { status = ctl_lun_map_init(port); if (status != 0) return (status); } old = port->lun_map[plun]; port->lun_map[plun] = glun; if ((port->status & CTL_PORT_STATUS_ONLINE) && old >= CTL_MAX_LUNS) { if (port->lun_enable != NULL) port->lun_enable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) { uint32_t old; if (port->lun_map == NULL) return (0); old = port->lun_map[plun]; port->lun_map[plun] = UINT32_MAX; if ((port->status & CTL_PORT_STATUS_ONLINE) && old < CTL_MAX_LUNS) { if (port->lun_disable != NULL) port->lun_disable(port->targ_lun_arg, plun); ctl_isc_announce_port(port); } return (0); } uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) { if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) return (lun_id); return (port->lun_map[lun_id]); } uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { uint32_t i; if (port == NULL) return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < CTL_MAX_LUNS; i++) { if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } uint32_t ctl_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result = 0xffffffff; be64enc(lun, encoded); switch (lun[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lun[0] & 0x3f) == 0 && lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = lun[1]; break; case RPL_LUNDATA_ATYP_FLAT: if (lun[2] == 0 && lun[3] == 0 && lun[4] == 0 && lun[5] == 0 && lun[6] == 0 && lun[7] == 0) result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case RPL_LUNDATA_ATYP_EXTLUN: switch (lun[0] & RPL_LUNDATA_EXT_EAM_MASK) { case 0x02: switch (lun[0] & RPL_LUNDATA_EXT_LEN_MASK) { case 0x00: result = lun[1]; break; case 0x10: result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; break; case 0x20: if (lun[1] == 0 && lun[6] == 0 && lun[7] == 0) result = (lun[2] << 24) + (lun[3] << 16) + (lun[4] << 8) + lun[5]; break; } break; case RPL_LUNDATA_EXT_EAM_NOT_SPEC: result = 0xffffffff; break; } break; } return (result); } uint64_t ctl_encode_lun(uint32_t decoded) { uint64_t l = decoded; if (l <= 0xff) return (((uint64_t)RPL_LUNDATA_ATYP_PERIPH << 56) | (l << 48)); if (l <= 0x3fff) return (((uint64_t)RPL_LUNDATA_ATYP_FLAT << 56) | (l << 48)); if (l <= 0xffffff) return (((uint64_t)(RPL_LUNDATA_ATYP_EXTLUN | 0x12) << 56) | (l << 32)); return ((((uint64_t)RPL_LUNDATA_ATYP_EXTLUN | 0x22) << 56) | (l << 16)); } static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr) { return (control_softc->ctl_ports[io_hdr->nexus.targ_port]); } int ctl_ffz(uint32_t *mask, uint32_t first, uint32_t last) { int i; for (i = first; i < last; i++) { if ((mask[i / 32] & (1 << (i % 32))) == 0) return (i); } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { #ifdef IO_POOLS struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ *npool = pool; #else *npool = ctl_softc->io_zone; #endif return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); free(pool, M_CTL); #endif } union ctl_io * ctl_alloc_io(void *pool_ref) { union ctl_io *io; #ifdef IO_POOLS struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; io = uma_zalloc(pool->zone, M_WAITOK); #else io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); #endif if (io != NULL) io->io_hdr.pool = pool_ref; return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { union ctl_io *io; #ifdef IO_POOLS struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; io = uma_zalloc(pool->zone, M_NOWAIT); #else io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); #endif if (io != NULL) io->io_hdr.pool = pool_ref; return (io); } void ctl_free_io(union ctl_io *io) { #ifdef IO_POOLS struct ctl_io_pool *pool; #endif if (io == NULL) return; #ifdef IO_POOLS pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); #else uma_zfree((uma_zone_t)io->io_hdr.pool, io); #endif } void ctl_zero_io(union ctl_io *io) { void *pool_ref; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool_ref = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool_ref; } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i, page_code; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; page_code = page_index->page_code & SMPH_PC_MASK; switch (page_code) { case SMS_RW_ERROR_RECOVERY_PAGE: { KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "writecache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = ctl_get_opt(&lun->be_lun->options, "readcache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: { struct scsi_control_page *control_page; memcpy(&lun->mode_pages.control_page[ CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "reordering"); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_page[ CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case 0x01: memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_DEFAULT], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CHANGEABLE], &control_ext_page_changeable, sizeof(control_ext_page_changeable)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], &control_ext_page_default, sizeof(control_ext_page_default)); memcpy(&lun->mode_pages.control_ext_page[ CTL_PAGE_CURRENT], &lun->mode_pages.control_ext_page[ CTL_PAGE_SAVED], sizeof(control_ext_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_ext_page; break; default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = ctl_get_opt(&lun->be_lun->options, "used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } case SMS_CDDVD_CAPS_PAGE:{ KASSERT(page_index->subpage == SMS_SUBPAGE_PAGE_0, ("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_DEFAULT], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[ CTL_PAGE_CHANGEABLE], &cddvd_page_changeable, sizeof(cddvd_page_changeable)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], &cddvd_page_default, sizeof(cddvd_page_default)); memcpy(&lun->mode_pages.cddvd_page[CTL_PAGE_CURRENT], &lun->mode_pages.cddvd_page[CTL_PAGE_SAVED], sizeof(cddvd_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.cddvd_page; break; } case SMS_VENDOR_SPECIFIC_PAGE:{ switch (page_index->subpage) { case DBGCNF_SUBPAGE_CODE: { memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_CURRENT], &debugconf_page_default, sizeof(debugconf_page_default)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_CHANGEABLE], &debugconf_page_changeable, sizeof(debugconf_page_changeable)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_DEFAULT], &debugconf_page_default, sizeof(debugconf_page_default)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_SAVED], &debugconf_page_default, sizeof(debugconf_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.debugconf_subpage; break; } default: panic("subpage %#x for page %#x is incorrect!", page_index->subpage, page_code); } break; } default: panic("invalid page code value %#x", page_code); } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; lun->log_pages.index[3].page_data = (uint8_t *)&lun->log_pages.stat_page; lun->log_pages.index[3].page_len = sizeof(lun->log_pages.stat_page); return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * LUN allocation. * * Requirements: * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he * wants us to allocate the LUN and he can block. * - ctl_softc is always set * - be_lun is set if the LUN has a backend (needed for disk LUNs) * * Returns 0 for success, non-zero (errno) for failure. */ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, struct ctl_be_lun *const be_lun) { struct ctl_lun *nlun, *lun; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *vendor, *value; int lun_number, i, lun_malloced; int devidlen, idlen1, idlen2 = 0, len; if (be_lun == NULL) return (EINVAL); /* * We currently only support Direct Access or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: case T_PROCESSOR: case T_CDROM: break; case T_SEQUENTIAL: case T_CHANGER: default: be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); break; } if (ctl_lun == NULL) { lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); lun_malloced = 1; } else { lun_malloced = 0; lun = ctl_lun; } memset(lun, 0, sizeof(*lun)); if (lun_malloced) lun->flags = CTL_LUN_MALLOCED; /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = ctl_get_opt(&be_lun->options, "scsiname"); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = ctl_get_opt(&be_lun->options, "eui"); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = ctl_get_opt(&be_lun->options, "naa"); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { printf("ctl: requested LUN ID %d is higher " "than CTL_MAX_LUNS - 1 (%d)\n", be_lun->req_lun_id, CTL_MAX_LUNS - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN, out of LUNs\n"); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); return (ENOSPC); } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; atomic_add_int(&be_lun->be->num_luns, 1); if (be_lun->flags & CTL_LUN_FLAG_EJECTED) lun->flags |= CTL_LUN_EJECTED; if (be_lun->flags & CTL_LUN_FLAG_NO_MEDIA) lun->flags |= CTL_LUN_NO_MEDIA; if (be_lun->flags & CTL_LUN_FLAG_STOPPED) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = ctl_get_opt(&be_lun->options, "removable"); if (value != NULL) { if (strcmp(value, "on") == 0) lun->flags |= CTL_LUN_REMOVABLE; } else if (be_lun->lun_type == T_CDROM) lun->flags |= CTL_LUN_REMOVABLE; lun->ctl_softc = ctl_softc; #ifdef CTL_TIME_IO lun->last_busy = getsbinuptime(); #endif TAILQ_INIT(&lun->ooa_queue); TAILQ_INIT(&lun->blocked_queue); STAILQ_INIT(&lun->error_list); ctl_tpc_lun_init(lun); /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; /* Setup statistics gathering */ lun->stats.device_type = be_lun->lun_type; lun->stats.lun_number = lun_number; lun->stats.blocksize = be_lun->blocksize; if (be_lun->blocksize == 0) lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; for (i = 0;i < CTL_MAX_PORTS;i++) lun->stats.ports[i].targ_port = i; mtx_unlock(&ctl_softc->ctl_lock); lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); return (0); } /* * Delete a LUN. * Assumptions: * - LUN has already been marked invalid and any pending I/O has been taken * care of. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc; struct ctl_lun *nlun; int i; softc = lun->ctl_softc; mtx_assert(&softc->ctl_lock, MA_OWNED); STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; if (!TAILQ_EMPTY(&lun->ooa_queue)) panic("Freeing a LUN %p with outstanding I/O!!\n", lun); softc->num_luns--; /* * Tell the backend to free resources, if this LUN has a backend. */ atomic_subtract_int(&lun->be_lun->be->num_luns, 1); lun->be_lun->lun_shutdown(lun->be_lun->be_lun); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < CTL_MAX_PORTS; i++) free(lun->pending_ua[i], M_CTL); for (i = 0; i < CTL_MAX_PORTS; i++) free(lun->pr_keys[i], M_CTL); free(lun->write_buffer, M_CTL); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } return (0); } static void ctl_create_lun(struct ctl_be_lun *be_lun) { /* * ctl_alloc_lun() should handle all potential failure cases. */ ctl_alloc_lun(control_softc, NULL, be_lun); } int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc = control_softc; mtx_lock(&softc->ctl_lock); STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); mtx_unlock(&softc->ctl_lock); wakeup(&softc->pending_lun_queue); return (0); } int ctl_enable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) == 0) { /* * eh? Why did we get called if the LUN is already * enabled? */ mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH_SAFE(port, &softc->port_list, links, nport) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_enable == NULL) continue; /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_disable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0 || port->lun_map != NULL || port->lun_disable == NULL) continue; /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_disable(port->targ_lun_arg, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_disable on lun %jd\n", __func__, port->port_name, port->targ_port, retval, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_no_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_NO_MEDIA; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_has_media(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); lun->flags &= ~(CTL_LUN_NO_MEDIA | CTL_LUN_EJECTED); if (lun->flags & CTL_LUN_REMOVABLE) ctl_est_ua_all(lun, -1, CTL_UA_MEDIUM_CHANGE); mtx_unlock(&lun->lun_lock); if ((lun->flags & CTL_LUN_REMOVABLE) && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_MEDIUM_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } return (0); } int ctl_lun_ejected(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_EJECTED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_primary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_lun_secondary(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PRIMARY_SC; ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); ctl_isc_announce_lun(lun); return (0); } int ctl_invalidate_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&lun->lun_lock); /* * The LUN needs to be disabled before it can be marked invalid. */ if ((lun->flags & CTL_LUN_DISABLED) == 0) { mtx_unlock(&lun->lun_lock); return (-1); } /* * Mark the LUN invalid. */ lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); mtx_lock(&softc->ctl_lock); ctl_free_lun(lun); mtx_unlock(&softc->ctl_lock); } else mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; union ctl_ha_msg msg; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGE); mtx_unlock(&lun->lun_lock); if (lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = 1; msg.ua.ua_type = CTL_UA_CAPACITY_CHANGE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); } } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int ctl_config_move_done(union ctl_io *io) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { /* * For hardware error sense keys, the sense key * specific value is defined to be a retry count, * but we use it to pass back an internal FETD * error code. XXX KDM Hopefully the FETD is only * using 16 bits for an error code, since that's * all the space we have in the sks field. */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; uint32_t residx; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; uint32_t residx; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_initindex(&ctsio->io_hdr.nexus); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } /* SPC-3 exceptions to SPC-2 RESERVE and RELEASE behavior. */ if (lun->flags & CTL_LUN_PR_RESERVED) { ctl_set_success(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct scsi_start_stop_unit *cdb; struct ctl_lun *lun; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_start_stop_unit *)ctsio->cdb; if ((cdb->how & SSS_PC_MASK) == 0) { if ((lun->flags & CTL_LUN_PR_RESERVED) && (cdb->how & SSS_START) == 0) { uint32_t residx; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx != residx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } if ((cdb->how & SSS_LOEJ) && (lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 4, /*bit_valid*/ 1, /*bit*/ 1); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->how & SSS_START) == 0 && (cdb->how & SSS_LOEJ) && lun->prevent_count > 0) { /* "Medium removal prevented" */ ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/(lun->flags & CTL_LUN_NO_MEDIA) ? SSD_KEY_NOT_READY : SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x53, /*ascq*/ 0x02, SSD_ELEM_NONE); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_prevent_allow(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_prevent *cdb; int retval; uint32_t initidx; CTL_DEBUG_PRINT(("ctl_prevent_allow\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_prevent *)ctsio->cdb; if ((lun->flags & CTL_LUN_REMOVABLE) == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&lun->lun_lock); if ((cdb->how & PR_PREVENT) && ctl_is_set(lun->prevent, initidx) == 0) { ctl_set_mask(lun->prevent, initidx); lun->prevent_count++; } else if ((cdb->how & PR_PREVENT) == 0 && ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_softc *softc; struct ctl_lba_len_flags *lbalen; uint64_t starting_lba; uint32_t block_count; int retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); byte2 = cdb->byte2; break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); byte2 = cdb->byte2; break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = starting_lba; lbalen->len = block_count; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; struct ctl_lun *lun; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; uint64_t buffer_offset; uint32_t len; uint8_t byte2; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case READ_BUFFER: { struct scsi_read_buffer *cdb; cdb = (struct scsi_read_buffer *)ctsio->cdb; buffer_offset = scsi_3btoul(cdb->offset); len = scsi_3btoul(cdb->length); byte2 = cdb->byte2; break; } case READ_BUFFER_16: { struct scsi_read_buffer_16 *cdb; cdb = (struct scsi_read_buffer_16 *)ctsio->cdb; buffer_offset = scsi_8btou64(cdb->offset); len = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* This shouldn't happen. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) != RWB_MODE_DATA && (byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && (byte2 & RWB_MODE) != RWB_MODE_DESCR) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (buffer_offset > CTL_WRITE_BUFFER_SIZE || buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct scsi_write_buffer *cdb; struct ctl_lun *lun; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_write_buffer *)ctsio->cdb; if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_write_same\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* ANCHOR flag can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & SWS_ANCHOR) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; CTL_DEBUG_PRINT(("ctl_unmap\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Note that this function currently doesn't actually do anything inside * CTL to enforce things if the DQue bit is turned on. * * Also note that this function can't be used in the default case, because * the DQue bit isn't set in the changeable mask for the control mode page * anyway. This is just here as an example for how to implement a page * handler, and a placeholder in case we want to allow the user to turn * tagged queueing on and off. * * The D_SENSE bit handling is functional, however, and will turn * descriptor sense on and off for a given LUN. */ int ctl_control_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct scsi_control_page *current_cp, *saved_cp, *user_cp; struct ctl_lun *lun; int set_ua; uint32_t initidx; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; user_cp = (struct scsi_control_page *)page_ptr; current_cp = (struct scsi_control_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); saved_cp = (struct scsi_control_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_SAVED)); mtx_lock(&lun->lun_lock); if (((current_cp->rlec & SCP_DSENSE) == 0) && ((user_cp->rlec & SCP_DSENSE) != 0)) { /* * Descriptor sense is currently turned off and the user * wants to turn it on. */ current_cp->rlec |= SCP_DSENSE; saved_cp->rlec |= SCP_DSENSE; lun->flags |= CTL_LUN_SENSE_DESC; set_ua = 1; } else if (((current_cp->rlec & SCP_DSENSE) != 0) && ((user_cp->rlec & SCP_DSENSE) == 0)) { /* * Descriptor sense is currently turned on, and the user * wants to turn it off. */ current_cp->rlec &= ~SCP_DSENSE; saved_cp->rlec &= ~SCP_DSENSE; lun->flags &= ~CTL_LUN_SENSE_DESC; set_ua = 1; } if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; set_ua = 1; } if ((current_cp->eca_and_aen & SCP_SWP) != (user_cp->eca_and_aen & SCP_SWP)) { current_cp->eca_and_aen &= ~SCP_SWP; current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; saved_cp->eca_and_aen &= ~SCP_SWP; saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (0); } int ctl_caching_sp_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct scsi_caching_page *current_cp, *saved_cp, *user_cp; struct ctl_lun *lun; int set_ua; uint32_t initidx; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; user_cp = (struct scsi_caching_page *)page_ptr; current_cp = (struct scsi_caching_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); saved_cp = (struct scsi_caching_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_SAVED)); mtx_lock(&lun->lun_lock); if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); if (set_ua) { ctl_isc_announce_mode(lun, ctl_get_initindex(&ctsio->io_hdr.nexus), page_index->page_code, page_index->subpage); } return (0); } int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { uint8_t *c; int i; c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; ctl_time_io_secs = (c[0] << 8) | (c[1] << 0) | 0; CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); printf("page data:"); for (i=0; i<8; i++) printf(" %.2x",page_ptr[i]); printf("\n"); return (0); } int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct copan_debugconf_subpage *page; page = (struct copan_debugconf_subpage *)page_index->page_data + (page_index->page_len * pc); switch (pc) { case SMS_PAGE_CTRL_CHANGEABLE >> 6: case SMS_PAGE_CTRL_DEFAULT >> 6: case SMS_PAGE_CTRL_SAVED >> 6: /* * We don't update the changable or default bits for this page. */ break; case SMS_PAGE_CTRL_CURRENT >> 6: page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; break; default: break; } return (0); } static int ctl_do_mode_select(union ctl_io *io) { struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int page_len, page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; struct ctl_lun *lun; int *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; if ((page_index->page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((page_index->page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((page_index->page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (page_index->subpage == sph->subpage) { page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((i >= CTL_NUM_MODE_PAGES) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if ((page_len != (page_index->page_len - page_len_offset - page_len_size)) || (*len_left < page_index->page_len)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { int param_len, pf, sp; int header_size, bd_len; union ctl_modepage_info *modepage_info; switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); modepage_info->header.len_left = param_len - header_size - bd_len; modepage_info->header.len_used = header_size + bd_len; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; int pc, page_code, dbd, llba, subpage; int alloc_len, page_len, header_len, total_len; struct scsi_mode_block_descr *block_desc; struct ctl_page_index *page_index; dbd = 0; llba = 0; block_desc = NULL; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); if (cdb->byte2 & SMS10_LLBAA) llba = 1; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; #if 0 printf("found page %#x len %d\n", page_index->page_code & SMPH_PC_MASK, page_index->page_len); #endif page_len += page_index->page_len; } break; } default: { int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; #if 0 printf("found page %#x len %d\n", page_index->page_code & SMPH_PC_MASK, page_index->page_len); #endif page_len += page_index->page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; #if 0 printf("header_len = %d, page_len = %d, total_len = %d\n", header_len, page_len, total_len); #endif ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) header->block_descr_len = 0; else header->block_descr_len = sizeof(struct scsi_mode_block_descr); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (lun->be_lun->lun_type == T_DIRECT) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->be_lun->flags & CTL_LUN_FLAG_READONLY) || (lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) scsi_ulto2b(0, header->block_descr_len); else scsi_ulto2b(sizeof(struct scsi_mode_block_descr), header->block_descr_len); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } default: panic("%s: Invalid CDB type %#x", __func__, ctsio->cdb[0]); } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (dbd == 0) { if (lun->be_lun->lun_type == T_DIRECT) scsi_ulto3b(lun->be_lun->blocksize, block_desc->block_len); else scsi_ulto3b(0, block_desc->block_len); } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if (lun->be_lun->lun_type == T_DIRECT && (page_index->page_flags & CTL_PAGE_FLAG_DIRECT) == 0) continue; if (lun->be_lun->lun_type == T_PROCESSOR && (page_index->page_flags & CTL_PAGE_FLAG_PROC) == 0) continue; if (lun->be_lun->lun_type == T_CDROM && (page_index->page_flags & CTL_PAGE_FLAG_CDROM) == 0) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun; struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_sap_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun; struct stat_page *data; uint64_t rn, wn, rb, wb; struct bintime rt, wt; int i; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data = (struct stat_page *)page_index->page_data; scsi_ulto2b(SLP_SAP, data->sap.hdr.param_code); data->sap.hdr.param_control = SLP_LBIN; data->sap.hdr.param_len = sizeof(struct scsi_log_stat_and_perf) - sizeof(struct scsi_log_param_header); rn = wn = rb = wb = 0; bintime_clear(&rt); bintime_clear(&wt); for (i = 0; i < CTL_MAX_PORTS; i++) { rn += lun->stats.ports[i].operations[CTL_STATS_READ]; wn += lun->stats.ports[i].operations[CTL_STATS_WRITE]; rb += lun->stats.ports[i].bytes[CTL_STATS_READ]; wb += lun->stats.ports[i].bytes[CTL_STATS_WRITE]; bintime_add(&rt, &lun->stats.ports[i].time[CTL_STATS_READ]); bintime_add(&wt, &lun->stats.ports[i].time[CTL_STATS_WRITE]); } scsi_u64to8b(rn, data->sap.read_num); scsi_u64to8b(wn, data->sap.write_num); if (lun->stats.blocksize > 0) { scsi_u64to8b(wb / lun->stats.blocksize, data->sap.recvieved_lba); scsi_u64to8b(rb / lun->stats.blocksize, data->sap.transmitted_lba); } scsi_u64to8b((uint64_t)rt.sec * 1000 + rt.frac / (UINT64_MAX / 1000), data->sap.read_int); scsi_u64to8b((uint64_t)wt.sec * 1000 + wt.frac / (UINT64_MAX / 1000), data->sap.write_int); scsi_u64to8b(0, data->sap.weighted_num); scsi_u64to8b(0, data->sap.weighted_int); scsi_ulto2b(SLP_IT, data->it.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->it.hdr.param_len = sizeof(struct scsi_log_idle_time) - sizeof(struct scsi_log_param_header); #ifdef CTL_TIME_IO scsi_u64to8b(lun->idle_time / SBT_1MS, data->it.idle_int); #endif scsi_ulto2b(SLP_TI, data->ti.hdr.param_code); data->it.hdr.param_control = SLP_LBIN; data->ti.hdr.param_len = sizeof(struct scsi_log_time_interval) - sizeof(struct scsi_log_param_header); scsi_ulto4b(3, data->ti.exponent); scsi_ulto4b(1, data->ti.integer); page_index->page_len = sizeof(*data); return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING) header->page |= SL_DS; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; struct ctl_lun *lun; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->residual = 0; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; struct ctl_lun *lun; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; if (sizeof(*data) < alloc_len) { ctsio->residual = alloc_len - sizeof(*data); ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, pc, pg, ts, os; int num_ha_groups, num_target_ports, shared_group; struct ctl_lun *lun; struct ctl_softc *softc; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } num_target_ports = 0; shared_group = (softc->is_single != 0); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; num_target_ports++; if (port->status & CTL_PORT_STATUS_HA_SHARED) shared_group = 1; } mtx_unlock(&softc->ctl_lock); num_ha_groups = (softc->is_single) ? 0 : NUM_HA_SHELVES; if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * (shared_group + num_ha_groups) + sizeof(struct scsi_target_port_descriptor) * num_target_ports; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_min / softc->port_cnt; if (lun->flags & (CTL_LUN_PRIMARY_SC | CTL_LUN_PEER_SC_PRIMARY)) { /* Some shelf is known to be primary. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) os = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; else if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) os = TPG_ASYMMETRIC_ACCESS_STANDBY; else os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; if (lun->flags & CTL_LUN_PRIMARY_SC) { ts = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { /* No known primary shelf. */ if (softc->ha_link == CTL_HA_LINK_OFFLINE) { ts = TPG_ASYMMETRIC_ACCESS_UNAVAILABLE; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else if (softc->ha_link == CTL_HA_LINK_UNKNOWN) { ts = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } else { ts = os = TPG_ASYMMETRIC_ACCESS_TRANSITIONING; } } if (shared_group) { tpg_desc->pref_state = ts; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (!softc->is_single && (port->status & CTL_PORT_STATUS_HA_SHARED) == 0) continue; if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } for (g = 0; g < num_ha_groups; g++) { tpg_desc->pref_state = (g == pg) ? ts : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP | TPG_U_SUP | TPG_T_SUP; scsi_ulto2b(2 + g, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if (port->targ_port < g * softc->port_cnt || port->targ_port >= (g + 1) * softc->port_cnt) continue; if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (port->status & CTL_PORT_STATUS_HA_SHARED) continue; if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; scsi_ulto2b(port->targ_port, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_QTS | RST_TRS; data->byte2 |= RST_QAES | RST_QTSS | RST_ITNRS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ struct ctl_lun *lun; struct ctl_softc *softc; uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->pr_generation, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->pr_res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags1 = SPRI_CRH; res_cap->flags2 = SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->pr_generation, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->pr_res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[i / CTL_MAX_INIT_PER_PORT]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int i; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } } return (0); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->pr_res_type && (lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = msg->pr.pr_info.res_type; if (lun->pr_res_type != SPR_TYPE_WR_EX_AR && lun->pr_res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->pr_generation++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { int retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct ctl_lun *lun; struct scsi_per_res_out_parms* param; struct ctl_softc *softc; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_per_res_out *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_initindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { #if 0 printf("Registration received\n"); #endif /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++){ if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; } case SPRO_RESERVE: #if 0 printf("Reserve executed type %d\n", type); #endif mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->pr_res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; /* * if this isn't an exclusive access * res generate UA for all other * registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX) { for (i = softc->init_min; i < softc->init_max; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io.pr), M_WAITOK); break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("%s: Invalid PR type %#x", __func__, cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; int i; uint32_t residx, targ_lun; targ_lun = msg->hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || ((lun = softc->ctl_luns[targ_lun]) == NULL)) { mtx_unlock(&softc->ctl_lock); return; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } residx = ctl_get_initindex(&msg->hdr.nexus); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->pr_generation++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = softc->init_min; i < softc->init_max; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->pr_res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->pr_generation++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->pr_res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * if this isn't an exclusive access res generate UA for all * other registrants. */ if (lun->pr_res_type != SPR_TYPE_EX_AC && lun->pr_res_type != SPR_TYPE_WR_EX) { for (i = softc->init_min; i < softc->init_max; i++) if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->pr_res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_generation++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_write_atomic_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_2btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_scsiio *ctsio; struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc; struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; struct ctl_lun *lun, *request_lun; struct ctl_port *port; int num_luns, retval; uint32_t alloc_len, lun_datalen; int num_filled; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_report_luns *)ctsio->cdb; port = ctl_io_port(&ctsio->io_hdr); softc = port->ctl_softc; CTL_DEBUG_PRINT(("ctl_report_luns\n")); mtx_lock(&softc->ctl_lock); num_luns = 0; for (targ_lun_id = 0; targ_lun_id < CTL_MAX_LUNS; targ_lun_id++) { if (ctl_lun_map_from_port(port, targ_lun_id) < CTL_MAX_LUNS) num_luns++; } mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: case RPL_REPORT_NONSUBSID: break; case RPL_REPORT_WELLKNOWN: case RPL_REPORT_ADMIN: case RPL_REPORT_CONGLOM: num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } request_lun = (struct ctl_lun *) ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id >= CTL_MAX_LUNS) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; be64enc(lun_data->luns[num_filled++].lundata, ctl_encode_lun(targ_lun_id)); /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_LUN_CHANGE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); if (lun_datalen < alloc_len) { ctsio->residual = alloc_len - lun_datalen; ctsio->kern_data_len = lun_datalen; ctsio->kern_total_len = lun_datalen; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr; struct ctl_softc *ctl_softc; struct ctl_lun *lun; uint32_t initidx; int have_error; scsi_sense_data_type sense_format; ctl_ua_type ua_type; cdb = (struct scsi_request_sense *)ctsio->cdb; ctl_softc = control_softc; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->residual = 0; ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL) goto no_sense; have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); #ifdef CTL_WITH_CA if (ctl_is_set(lun->have_ca, initidx)) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(&lun->pending_sense[initidx]); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) &lun->pending_sense[initidx], (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) &lun->pending_sense[initidx], (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, &lun->pending_sense[initidx], MIN(sizeof(*sense_ptr), sizeof(lun->pending_sense[initidx]))); ctl_clear_mask(lun->have_ca, initidx); have_error = 1; } else #endif { ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; if (ua_type == CTL_UA_LUN_CHANGE) { mtx_unlock(&lun->lun_lock); mtx_lock(&ctl_softc->ctl_lock); ctl_clr_ua_allluns(ctl_softc, initidx, ua_type); mtx_unlock(&ctl_softc->ctl_lock); mtx_lock(&lun->lun_lock); } } mtx_unlock(&lun->lun_lock); /* * We already have a pending error, return it. */ if (have_error != 0) { /* * We report the SCSI status as OK, since the status of the * request sense command itself is OK. * We report 0 for the sense length, because we aren't doing * autosense in this case. We're reporting sense as * parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } no_sense: /* * No sense information to report, so we report that everything is * okay. */ ctl_set_sense_data(sense_ptr, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ 0x00, /*ascq*/ 0x00, SSD_ELEM_NONE); /* * We report 0 for the sense length, because we aren't doing * autosense in this case. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_supported_pages *pages; int sup_page_size; struct ctl_lun *lun; int p; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sup_page_size < alloc_len) { ctsio->residual = alloc_len - sup_page_size; ctsio->kern_data_len = sup_page_size; ctsio->kern_total_len = sup_page_size; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_unit_serial_number *sn_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_extended_inquiry_data *eid_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLT; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_mode_page_policy *mpp_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; struct ctl_softc *softc; struct ctl_lun *lun; struct ctl_port *port; int data_len, g; uint8_t proto; softc = control_softc; port = ctl_io_port(&ctsio->io_hdr); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port && port->port_devid) data_len += port->port_devid->len; if (port && port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port && port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port && port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port && port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; if (softc->is_single || (port && port->status & CTL_PORT_STATUS_HA_SHARED)) g = 1; else g = 2 + ctsio->io_hdr.nexus.targ_port / softc->port_cnt; scsi_ulto2b(g, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port && port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = control_softc; struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_lun *lun; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; scsi_ulto2b(port->targ_port, pd->relative_port_id); if (port->init_devid) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_block_limits *bl_ptr; struct ctl_lun *lun; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*bl_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*bl_ptr); ctsio->kern_data_len = sizeof(*bl_ptr); ctsio->kern_total_len = sizeof(*bl_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); scsi_ulto4b(0, bl_ptr->max_atomic_transfer_length_with_atomic_boundary); scsi_ulto4b(0, bl_ptr->max_atomic_boundary_size); } scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_block_device_characteristics *bdc_ptr; struct ctl_lun *lun; const char *value; u_int i; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*bdc_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*bdc_ptr); ctsio->kern_data_len = sizeof(*bdc_ptr); ctsio->kern_total_len = sizeof(*bdc_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_logical_block_prov *lbp_ptr; struct ctl_lun *lun; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*lbp_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*lbp_ptr); ctsio->kern_data_len = sizeof(*lbp_ptr); ctsio->kern_total_len = sizeof(*lbp_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_inquiry *cdb; int alloc_len, retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; struct ctl_softc *softc = control_softc; struct ctl_port *port; struct ctl_lun *lun; char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; port = ctl_io_port(&ctsio->io_hdr); port_type = port->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } if (lun != NULL) { if ((lun->flags & CTL_LUN_PRIMARY_SC) || softc->ha_link >= CTL_HA_LINK_UNKNOWN) { inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; } else { inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | lun->be_lun->lun_type; } if (lun->flags & CTL_LUN_REMOVABLE) inq_ptr->dev_qual2 |= SID_RMB; } else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC4; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The reponse format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; inq_ptr->spc2_flags |= SPC2_SID_MultiP; inq_ptr->flags = SID_CmdQue; if (port_type == CTL_PORT_SCSI) inq_ptr->flags |= SID_WBus16 | SID_Sync; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "vendor")) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; case T_CDROM: strncpy(inq_ptr->product, CTL_CDROM_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "revision")) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-5 (no version claimed) */ scsi_ulto2b(0x00A0, inq_ptr->version1); /* SPC-4 (no version claimed) */ scsi_ulto2b(0x0460, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: break; case T_CDROM: /* MMC-6 (no version claimed) */ scsi_ulto2b(0x04E0, inq_ptr->version4); break; default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } int ctl_get_config(struct ctl_scsiio *ctsio) { struct scsi_get_config_header *hdr; struct scsi_get_config_feature *feature; struct scsi_get_config *cdb; struct ctl_lun *lun; uint32_t alloc_len, data_len; int rt, starting; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_get_config *)ctsio->cdb; rt = (cdb->rt & SGC_RT_MASK); starting = scsi_2btoul(cdb->starting_feature); alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_config_header) + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 8 + sizeof(struct scsi_get_config_feature) + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4 + sizeof(struct scsi_get_config_feature) + 4; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; hdr = (struct scsi_get_config_header *)ctsio->kern_data_ptr; if (lun->flags & CTL_LUN_NO_MEDIA) scsi_ulto2b(0x0000, hdr->current_profile); else scsi_ulto2b(0x0010, hdr->current_profile); feature = (struct scsi_get_config_feature *)(hdr + 1); if (starting > 0x003b) goto done; if (starting > 0x003a) goto f3b; if (starting > 0x002b) goto f3a; if (starting > 0x002a) goto f2b; if (starting > 0x001f) goto f2a; if (starting > 0x001e) goto f1f; if (starting > 0x001d) goto f1e; if (starting > 0x0010) goto f1d; if (starting > 0x0003) goto f10; if (starting > 0x0002) goto f3; if (starting > 0x0001) goto f2; if (starting > 0x0000) goto f1; /* Profile List */ scsi_ulto2b(0x0000, feature->feature_code); feature->flags = SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto2b(0x0008, &feature->feature_data[0]); /* CD-ROM */ feature->feature_data[2] = 0x00; scsi_ulto2b(0x0010, &feature->feature_data[4]); /* DVD-ROM */ feature->feature_data[6] = 0x01; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1: /* Core */ scsi_ulto2b(0x0001, feature->feature_code); feature->flags = 0x08 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(0x00000000, &feature->feature_data[0]); feature->feature_data[4] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2: /* Morphing */ scsi_ulto2b(0x0002, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x02; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3: /* Removable Medium */ scsi_ulto2b(0x0003, feature->feature_code); feature->flags = 0x04 | SGC_F_PERSISTENT | SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x39; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; if (rt == SGC_RT_CURRENT && (lun->flags & CTL_LUN_NO_MEDIA)) goto done; f10: /* Random Read */ scsi_ulto2b(0x0010, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 8; scsi_ulto4b(lun->be_lun->blocksize, &feature->feature_data[0]); scsi_ulto2b(1, &feature->feature_data[4]); feature->feature_data[6] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1d: /* Multi-Read */ scsi_ulto2b(0x001D, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 0; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1e: /* CD Read */ scsi_ulto2b(0x001E, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f1f: /* DVD Read */ scsi_ulto2b(0x001F, feature->feature_code); feature->flags = 0x08; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x01; feature->feature_data[2] = 0x03; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2a: /* DVD+RW */ scsi_ulto2b(0x002A, feature->feature_code); feature->flags = 0x04; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f2b: /* DVD+R */ scsi_ulto2b(0x002B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3a: /* DVD+RW Dual Layer */ scsi_ulto2b(0x003A, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature->feature_data[1] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; f3b: /* DVD+R Dual Layer */ scsi_ulto2b(0x003B, feature->feature_code); feature->flags = 0x00; if ((lun->flags & CTL_LUN_NO_MEDIA) == 0) feature->flags |= SGC_F_CURRENT; feature->add_length = 4; feature->feature_data[0] = 0x00; feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; done: data_len = (uint8_t *)feature - (uint8_t *)hdr; if (rt == SGC_RT_SPECIFIC && data_len > 4) { feature = (struct scsi_get_config_feature *)(hdr + 1); if (scsi_2btoul(feature->feature_code) == starting) feature = (struct scsi_get_config_feature *) &feature->feature_data[feature->add_length]; data_len = (uint8_t *)feature - (uint8_t *)hdr; } scsi_ulto4b(data_len - 4, hdr->data_length); if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_event_status(struct ctl_scsiio *ctsio) { struct scsi_get_event_status_header *hdr; struct scsi_get_event_status *cdb; struct ctl_lun *lun; uint32_t alloc_len, data_len; int notif_class; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_get_event_status *)ctsio->cdb; if ((cdb->byte2 & SGESN_POLLED) == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } notif_class = cdb->notif_class; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_get_event_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } hdr = (struct scsi_get_event_status_header *)ctsio->kern_data_ptr; scsi_ulto2b(0, hdr->descr_length); hdr->nea_class = SGESN_NEA; hdr->supported_class = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_mechanism_status(struct ctl_scsiio *ctsio) { struct scsi_mechanism_status_header *hdr; struct scsi_mechanism_status *cdb; struct ctl_lun *lun; uint32_t alloc_len, data_len; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_mechanism_status *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); data_len = sizeof(struct scsi_mechanism_status_header); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } hdr = (struct scsi_mechanism_status_header *)ctsio->kern_data_ptr; hdr->state1 = 0x00; hdr->state2 = 0xe0; scsi_ulto3b(0, hdr->lba); hdr->slots_num = 0; scsi_ulto2b(0, hdr->slots_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_ultomsf(uint32_t lba, uint8_t *buf) { lba += 150; buf[0] = 0; buf[1] = bin2bcd((lba / 75) / 60); buf[2] = bin2bcd((lba / 75) % 60); buf[3] = bin2bcd(lba % 75); } int ctl_read_toc(struct ctl_scsiio *ctsio) { struct scsi_read_toc_hdr *hdr; struct scsi_read_toc_type01_descr *descr; struct scsi_read_toc *cdb; struct ctl_lun *lun; uint32_t alloc_len, data_len; int format, msf; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_read_toc *)ctsio->cdb; msf = (cdb->byte2 & CD_MSF) != 0; format = cdb->format; alloc_len = scsi_2btoul(cdb->data_len); data_len = sizeof(struct scsi_read_toc_hdr); if (format == 0) data_len += 2 * sizeof(struct scsi_read_toc_type01_descr); else data_len += sizeof(struct scsi_read_toc_type01_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } hdr = (struct scsi_read_toc_hdr *)ctsio->kern_data_ptr; if (format == 0) { scsi_ulto2b(0x12, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); descr++; descr->addr_ctl = 0x14; descr->track_number = 0xaa; if (msf) ctl_ultomsf(lun->be_lun->maxlba+1, descr->track_start); else scsi_ulto4b(lun->be_lun->maxlba+1, descr->track_start); } else { scsi_ulto2b(0x0a, hdr->data_length); hdr->first = 1; hdr->last = 1; descr = (struct scsi_read_toc_type01_descr *)(hdr + 1); descr->addr_ctl = 0x14; descr->track_number = 1; if (msf) ctl_ultomsf(0, descr->track_start); else scsi_ulto4b(0, descr->track_start); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { if (io->io_hdr.io_type != CTL_IO_SCSI) return (1); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_write_atomic_16 *cdb; cdb = (struct scsi_write_atomic_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: return (1); break; /* NOTREACHED */ } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; /* If not UNMAP -- go other way. */ if (io->io_hdr.io_type != CTL_IO_SCSI || io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_ERROR); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_ERROR) return (retval); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) seq = FALSE; return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; if (io1->io_hdr.flags & CTL_FLAG_SERSEQ_DONE) return (CTL_ACTION_PASS); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io) { const struct ctl_cmd_entry *pending_entry, *ooa_entry; const ctl_serialize_action *serialize_row; /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid == ooa_io->io_hdr.nexus.initid)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) return (CTL_ACTION_PASS); /* * Ordered tags have to block until all items ahead of them * have completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) return (CTL_ACTION_BLOCK); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) return (CTL_ACTION_BLOCK); pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); KASSERT(pending_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for pending CDB %02x %02x @ %p", __func__, pending_entry->seridx, pending_io->scsiio.cdb[0], pending_io->scsiio.cdb[1], pending_io)); ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); if (ooa_entry->seridx == CTL_SERIDX_INVLD) return (CTL_ACTION_PASS); /* Unsupported command in OOA queue */ KASSERT(ooa_entry->seridx < CTL_SERIDX_COUNT, ("%s: Invalid seridx %d for ooa CDB %02x %02x @ %p", __func__, ooa_entry->seridx, ooa_io->scsiio.cdb[0], ooa_io->scsiio.cdb[1], ooa_io)); serialize_row = ctl_serialize_table[ooa_entry->seridx]; switch (serialize_row[pending_entry->seridx]) { case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_EXTENTOPT: if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (ctl_extent_check(ooa_io, pending_io, (lun->be_lun && lun->be_lun->serseq == CTL_LUN_SERSEQ_ON))); return (CTL_ACTION_PASS); case CTL_SER_EXTENTSEQ: if (lun->be_lun && lun->be_lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_extent_check_seq(ooa_io, pending_io)); return (CTL_ACTION_PASS); case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_BLOCKOPT: if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); case CTL_SER_SKIP: return (CTL_ACTION_SKIP); default: panic("%s: Invalid serialization value %d for %d => %d", __func__, serialize_row[pending_entry->seridx], pending_entry->seridx, ooa_entry->seridx); } return (CTL_ACTION_ERROR); } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io) { union ctl_io *ooa_io; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (ooa_io = starting_io; ooa_io != NULL; ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, ooa_links)){ /* * This routine just checks to see whether * cur_blocked is blocked by ooa_io, which is ahead * of it in the queue. It doesn't queue/dequeue * cur_blocked. */ action = ctl_check_for_blockage(lun, pending_io, ooa_io); switch (action) { case CTL_ACTION_BLOCK: case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: case CTL_ACTION_SKIP: case CTL_ACTION_ERROR: return (action); break; /* NOTREACHED */ case CTL_ACTION_PASS: break; default: panic("%s: Invalid action %d\n", __func__, action); } } return (CTL_ACTION_PASS); } /* * Assumptions: * - An I/O has just completed, and has been removed from the per-LUN OOA * queue, so some items on the blocked queue may now be unblocked. */ static int ctl_check_blocked(struct ctl_lun *lun) { struct ctl_softc *softc = lun->ctl_softc; union ctl_io *cur_blocked, *next_blocked; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run forward from the head of the blocked queue, checking each * entry against the I/Os prior to it on the OOA queue to see if * there is still any blockage. * * We cannot use the TAILQ_FOREACH() macro, because it can't deal * with our removing a variable on it while it is traversing the * list. */ for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); cur_blocked != NULL; cur_blocked = next_blocked) { union ctl_io *prev_ooa; ctl_action action; next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, blocked_links); prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, ctl_ooaq, ooa_links); /* * If cur_blocked happens to be the first item in the OOA * queue now, prev_ooa will be NULL, and the action * returned will just be CTL_ACTION_PASS. */ action = ctl_check_ooa(lun, cur_blocked, prev_ooa); switch (action) { case CTL_ACTION_BLOCK: /* Nothing to do here, still blocked */ break; case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: /* * This shouldn't happen! In theory we've already * checked this command for overlap... */ break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: { const struct ctl_cmd_entry *entry; /* * The skip case shouldn't happen, this transaction * should have never made it onto the blocked queue. */ /* * This I/O is no longer blocked, we can remove it * from the blocked queue. Since this is a TAILQ * (doubly linked list), we can do O(1) removals * from any place on the list. */ TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, blocked_links); cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; if ((softc->ha_mode != CTL_HA_MODE_XFER) && (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)){ /* * Need to send IO back to original side to * run */ union ctl_ha_msg msg_info; cur_blocked->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; msg_info.hdr.original_sc = cur_blocked->io_hdr.original_sc; msg_info.hdr.serializing_sc = cur_blocked; msg_info.hdr.msg_type = CTL_MSG_R2R; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.hdr), M_NOWAIT); break; } entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); /* * Check this I/O for LUN state changes that may * have happened while this command was blocked. * The LUN state may have been changed by a command * ahead of us in the queue, so we need to re-check * for any states that can be caused by SCSI * commands. */ if (ctl_scsiio_lun_check(lun, entry, &cur_blocked->scsiio) == 0) { cur_blocked->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(cur_blocked); } else ctl_done(cur_blocked); break; } default: /* * This probably shouldn't happen -- we shouldn't * get CTL_ACTION_ERROR, or anything else. */ break; } } return (CTL_RETVAL_COMPLETE); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we may have to * reject some commands disallowed by HA mode and link state. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { if (softc->ha_link == CTL_HA_LINK_OFFLINE && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_unavail(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_PEER_SC_PRIMARY) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_UNAVAIL) == 0) { ctl_set_lun_transit(ctsio); retval = 1; goto bailout; } if (softc->ha_mode == CTL_HA_MODE_ACT_STBY && (entry->flags & CTL_CMD_FLAG_OK_ON_STANDBY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } /* The rest of checks are only done on executing side */ if (softc->ha_mode == CTL_HA_MODE_XFER) goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->be_lun && lun->be_lun->flags & CTL_LUN_FLAG_READONLY) { ctl_set_hw_write_protected(ctsio); retval = 1; goto bailout; } if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_initindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->pr_res_type == SPR_TYPE_WR_EX || lun->pr_res_type == SPR_TYPE_WR_EX_RO || lun->pr_res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->pr_res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((entry->flags & CTL_CMD_FLAG_OK_ON_NO_MEDIA) == 0) { if (lun->flags & CTL_LUN_EJECTED) ctl_set_lun_ejected(ctsio); else if (lun->flags & CTL_LUN_NO_MEDIA) { if (lun->flags & CTL_LUN_REMOVABLE) ctl_set_lun_no_media(ctsio); else ctl_set_lun_int_reqd(ctsio); } else if (lun->flags & CTL_LUN_STOPPED) ctl_set_lun_stopped(ctsio); else goto bailout; retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover_lun(union ctl_io *rio) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; struct ctl_io_hdr *io, *next_io; uint32_t targ_lun; targ_lun = rio->io_hdr.nexus.targ_mapped_lun; CTL_DEBUG_PRINT(("FAILOVER for lun %ju\n", targ_lun)); /* Find and lock the LUN. */ mtx_lock(&softc->ctl_lock); if ((targ_lun < CTL_MAX_LUNS) && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); return; } } else { mtx_unlock(&softc->ctl_lock); return; } if (softc->ha_mode == CTL_HA_MODE_XFER) { TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_ABORT; io->flags |= CTL_FLAG_FAILOVER; } else { /* This can be only due to DATAMOVE */ io->msg_type = CTL_MSG_DATAMOVE_DONE; io->flags &= ~CTL_FLAG_DMA_INPROG; io->flags |= CTL_FLAG_IO_ACTIVE; io->port_status = 31340; ctl_enqueue_isc((union ctl_io *)io); } } /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (io->flags & CTL_FLAG_IO_ACTIVE) { io->flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } } else { /* SERIALIZE modes */ TAILQ_FOREACH_SAFE(io, &lun->blocked_queue, blocked_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->blocked_queue, io, blocked_links); io->flags &= ~CTL_FLAG_BLOCKED; TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); ctl_free_io((union ctl_io *)io); } } TAILQ_FOREACH_SAFE(io, &lun->ooa_queue, ooa_links, next_io) { /* We are master */ if (io->flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->ooa_queue, io, ooa_links); ctl_free_io((union ctl_io *)io); } /* We are slave */ if (io->flags & CTL_FLAG_SENT_2OTHER_SC) { io->flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (!(io->flags & CTL_FLAG_IO_ACTIVE)) { ctl_set_busy(&((union ctl_io *)io)-> scsiio); ctl_done((union ctl_io *)io); } } } ctl_check_blocked(lun); } mtx_unlock(&lun->lun_lock); } static int ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) { struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t initidx, targ_lun; int retval; retval = 0; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if ((targ_lun < CTL_MAX_LUNS) && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; } else { ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) { lun->idle_time += getsbinuptime() - lun->last_busy; } #endif TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } } else { ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return (retval); } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_NO_LUN) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return (retval); } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return (retval); } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); #ifdef CTL_WITH_CA /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if ((ctsio->cdb[0] != REQUEST_SENSE) && (ctl_is_set(lun->have_ca, initidx))) ctl_clear_mask(lun->have_ca, initidx); #endif /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, SSD_TYPE_NONE); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = SSD_FULL_SIZE; ctl_done((union ctl_io *)ctsio); return (retval); } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (retval); } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && (lun->flags & CTL_LUN_PEER_SC_PRIMARY) != 0 && (entry->flags & CTL_CMD_FLAG_RUN_HERE) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; mtx_unlock(&lun->lun_lock); msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; msg_info.scsi.cdb_len = ctsio->cdb_len; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.scsi) - sizeof(msg_info.scsi.sense_data), M_WAITOK)) > CTL_HA_STATUS_SUCCESS) { ctl_set_busy(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } return (retval); } switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); return (retval); case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_ERROR: default: mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); ctl_done((union ctl_io *)ctsio); break; } return (retval); } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_DIRECT: if ((entry->flags & CTL_CMD_FLAG_OK_ON_DIRECT) == 0) return (0); break; case T_PROCESSOR: if ((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) return (0); break; case T_CDROM: if ((entry->flags & CTL_CMD_FLAG_OK_ON_CDROM) == 0) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } /* * Since we only implement one target right now, a bus reset simply resets * our single target. */ static int ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) { return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); } static int ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, ctl_ua_type ua_type) { struct ctl_port *port; struct ctl_lun *lun; int retval; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; if (ua_type==CTL_UA_TARG_RESET) msg_info.task.task_action = CTL_TASK_TARGET_RESET; else msg_info.task.task_action = CTL_TASK_BUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } retval = 0; mtx_lock(&softc->ctl_lock); port = ctl_io_port(&io->io_hdr); STAILQ_FOREACH(lun, &softc->lun_list, links) { if (port != NULL && ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; retval += ctl_do_lun_reset(lun, io, ua_type); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (retval); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static int ctl_do_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) { union ctl_io *xio; #if 0 uint32_t initidx; #endif int i; mtx_lock(&lun->lun_lock); /* * Run through the OOA queue and abort each I/O. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; } /* * This version sets unit attention for every */ #if 0 initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif /* * A reset (any kind, really) clears reservations established with * RESERVE/RELEASE. It does not clear reservations established * with PERSISTENT RESERVE OUT, but we don't support that at the * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address * reservations made with the RESERVE/RELEASE commands, because * those commands are obsolete in SPC-3. */ lun->flags &= ~CTL_LUN_RESERVED; #ifdef CTL_WITH_CA for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->have_ca, i); #endif lun->prevent_count = 0; for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->prevent, i); mtx_unlock(&lun->lun_lock); return (0); } static int ctl_lun_reset(struct ctl_softc *softc, union ctl_io *io) { struct ctl_lun *lun; uint32_t targ_lun; int retval; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } retval = ctl_do_lun_reset(lun, io, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0) { union ctl_ha_msg msg_info; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } return (retval); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { union ctl_io *xio; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((targ_port == UINT32_MAX || targ_port == xio->io_hdr.nexus.targ_port) && (init_id == UINT32_MAX || init_id == xio->io_hdr.nexus.initid)) { if (targ_port != xio->io_hdr.nexus.targ_port || init_id != xio->io_hdr.nexus.initid) xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; xio->io_hdr.flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xio->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; uint32_t initidx; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_I_T_NEXUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_WAITOK); } initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid, 1); #ifdef CTL_WITH_CA ctl_clear_mask(lun->have_ca, initidx); #endif if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == initidx)) lun->flags &= ~CTL_LUN_RESERVED; if (ctl_is_set(lun->prevent, initidx)) { ctl_clear_mask(lun->prevent, initidx); lun->prevent_count--; } ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_abort_task(union ctl_io *io) { union ctl_io *xio; struct ctl_lun *lun; struct ctl_softc *softc; #if 0 struct sbuf sb; char printbuf[128]; #endif int found; uint32_t targ_lun; softc = control_softc; found = 0; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } #if 0 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", lun->lun, io->taskio.tag_num, io->taskio.tag_type); #endif mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { #if 0 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", lun->lun, xio->scsiio.tag_num, xio->scsiio.tag_type, (xio->io_hdr.blocked_links.tqe_prev == NULL) ? "" : " BLOCKED", (xio->io_hdr.flags & CTL_FLAG_DMA_INPROG) ? " DMA" : "", (xio->io_hdr.flags & CTL_FLAG_ABORT) ? " ABORT" : "", (xio->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); ctl_scsi_command_string(&xio->scsiio, NULL, &sb); sbuf_finish(&sb); printf("%s\n", sbuf_data(&sb)); #endif if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) #endif /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { xio->io_hdr.flags |= CTL_FLAG_ABORT; found = 1; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; #if 0 printf("Sent Abort to other side\n"); #endif ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info.task), M_NOWAIT); } #if 0 printf("ctl_abort_task: found I/O to abort\n"); #endif } } mtx_unlock(&lun->lun_lock); if (found == 0) { /* * This isn't really an error. It's entirely possible for * the abort and command completion to cross on the wire. * This is more of an informative/diagnostic error. */ #if 0 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " "%u:%u:%u tag %d type %d\n", io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, io->taskio.tag_num, io->taskio.tag_type); #endif } io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_task(union ctl_io *io, int task_set) { union ctl_io *xio; struct ctl_lun *lun; struct ctl_softc *softc; int found = 0; uint32_t targ_lun; softc = control_softc; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid != io->io_hdr.nexus.initid) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; if (task_set || xio->scsiio.tag_num == io->taskio.tag_num) { found = 1; break; } } mtx_unlock(&lun->lun_lock); if (found) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static int ctl_query_async_event(union ctl_io *io) { struct ctl_lun *lun; struct ctl_softc *softc; ctl_ua_type ua; uint32_t targ_lun, initidx; softc = control_softc; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun >= CTL_MAX_LUNS) || (lun = softc->ctl_luns[targ_lun]) == NULL) { mtx_unlock(&softc->ctl_lock); io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST; return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); initidx = ctl_get_initindex(&io->io_hdr.nexus); ua = ctl_build_qae(lun, initidx, io->taskio.task_resp); mtx_unlock(&lun->lun_lock); if (ua != CTL_UA_NONE) io->taskio.task_status = CTL_TASK_FUNCTION_SUCCEEDED; else io->taskio.task_status = CTL_TASK_FUNCTION_COMPLETE; return (0); } static void ctl_run_task(union ctl_io *io) { struct ctl_softc *softc = control_softc; int retval = 1; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); io->taskio.task_status = CTL_TASK_FUNCTION_NOT_SUPPORTED; bzero(io->taskio.task_resp, sizeof(io->taskio.task_resp)); switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: retval = ctl_lun_reset(softc, io); break; case CTL_TASK_TARGET_RESET: retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); break; case CTL_TASK_BUS_RESET: retval = ctl_bus_reset(softc, io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; case CTL_TASK_QUERY_TASK: retval = ctl_query_task(io, 0); break; case CTL_TASK_QUERY_TASK_SET: retval = ctl_query_task(io, 1); break; case CTL_TASK_QUERY_ASYNC_EVENT: retval = ctl_query_async_event(io); break; default: printf("%s: got unknown task management event %d\n", __func__, io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { int free_io; struct ctl_lun *lun; struct ctl_softc *softc = control_softc; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; lun = softc->ctl_luns[targ_lun]; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: free_io = ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: { const struct ctl_cmd_entry *entry; /* * This is only used in SER_ONLY mode. */ free_io = 0; entry = ctl_get_cmd_entry(&io->scsiio, NULL); mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, (struct ctl_scsiio *)io) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; } case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { free_io = 0; ctl_done(io); } else { free_io = 1; mtx_lock(&lun->lun_lock); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); } break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc( (union ctl_ha_msg *)&io->presio.pr_msg); free_io = 1; break; case CTL_MSG_BAD_JUJU: free_io = 0; ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ free_io = 0; ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ free_io = 0; io->scsiio.be_move_done(io); break; case CTL_MSG_FAILOVER: ctl_failover_lun(io); free_io = 1; break; default: free_io = 1; printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); break; } if (free_io) ctl_free_io(io); } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio, (io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_OUT); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ void ctl_datamove(union ctl_io *io) { struct ctl_lun *lun; void (*fe_datamove)(union ctl_io *io); mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%u:%u:%u) aborted\n", io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; /* * Note that the backend, in this case, will get the * callback in its context. In other cases it may get * called in the frontend's interrupt thread context. */ io->scsiio.be_move_done(io); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { io->scsiio.be_move_done(io); return; } fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; fe_datamove(io); } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; #ifdef CTL_TIME_IO struct bintime cur_bt; #endif memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.serializing_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, io->scsiio.sense_len); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.sense_residual = io->scsiio.sense_residual; msg.scsi.fetd_status = io->io_hdr.port_status; msg.scsi.residual = io->scsiio.residual; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data) + msg.scsi.sense_len, M_WAITOK); #ifdef CTL_TIME_IO getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); bintime_add(&io->io_hdr.dma_bt, &cur_bt); #endif io->io_hdr.num_dmas++; } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; int i; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int ctl_datamove_remote_dm_write_cb(union ctl_io *io) { int retval; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_dm_read_cb(union ctl_io *io) { #if 0 char str[256]; char path_str[64]; struct sbuf sb; #endif int i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; #if 0 scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, io->io_hdr.flags, io->io_hdr.status); sbuf_finish(&sb); printk("%s", sbuf_data(&sb)); #endif /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d\n", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = ctl_io_port(&io->io_hdr)->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist; uint32_t len_to_go; int retval; int i; retval = 0; local_sglist = io->io_hdr.local_sglist; len_to_go = io->scsiio.kern_data_len; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; len_to_go > 0; i++) { local_sglist[i].len = MIN(len_to_go, CTL_HA_DATAMOVE_SEGMENT); local_sglist[i].addr = malloc(local_sglist[i].len, M_CTL, M_WAITOK); len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The original * number of S/G entries is available in rem_sg_entries. */ io->scsiio.kern_sg_entries = i; #if 0 printf("%s: kern_sg_entries = %d\n", __func__, io->scsiio.kern_sg_entries); for (i = 0; i < io->scsiio.kern_sg_entries; i++) printf("%s: sg[%d] = %p, %lu\n", __func__, i, local_sglist[i].addr, local_sglist[i].len); #endif return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; uint32_t local_used, remote_used, total_used; int i, j, isc_ret; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (1); } local_sglist = io->io_hdr.local_sglist; remote_sglist = io->io_hdr.remote_sglist; local_used = 0; remote_used = 0; total_used = 0; /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ isc_ret = CTL_HA_STATUS_SUCCESS; for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { uint32_t cur_len; uint8_t *tmp_ptr; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); rq->size = cur_len; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; #if 0 /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ rq->local = vtophys(tmp_ptr); } else rq->local = tmp_ptr; #else KASSERT((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0, ("HA does not support BUS_ADDR")); rq->local = tmp_ptr; #endif tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; #if 0 printf("%s: %s: local %p remote %p size %d\n", __func__, (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", rq->local, rq->remote, rq->size); #endif isc_ret = ctl_dt_single(rq); if (isc_ret > CTL_HA_STATUS_SUCCESS) break; } if (isc_ret != CTL_HA_STATUS_WAIT) { rq->ret = isc_ret; callback(rq); } return (0); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; int i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if (retval != 0) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); free(io->io_hdr.remote_sglist, M_CTL); io->io_hdr.remote_sglist = NULL; io->io_hdr.local_sglist = NULL; } } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 0); return; } /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%u:%u:%u) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) ctl_datamove_remote_write(io); else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) ctl_datamove_remote_read(io); else { io->io_hdr.port_status = 31339; ctl_send_datamove_done(io, /*have_lock*/ 0); } } static void ctl_process_done(union ctl_io *io) { struct ctl_lun *lun; struct ctl_softc *softc = control_softc; void (*fe_done)(union ctl_io *io); union ctl_ha_msg msg; uint32_t targ_port = io->io_hdr.nexus.targ_port; CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = softc->ctl_ports[targ_port]->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_INFO) ctl_io_error_print(io, NULL); fe_done(io); return; default: panic("%s: Invalid CTL I/O type %d\n", __func__, io->io_hdr.io_type); } lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if (!STAILQ_EMPTY(&lun->error_list) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { #ifdef CTL_TIME_IO struct bintime cur_bt; #endif int type; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; lun->stats.ports[targ_port].bytes[type] += io->scsiio.kern_total_len; lun->stats.ports[targ_port].operations[type]++; #ifdef CTL_TIME_IO bintime_add(&lun->stats.ports[targ_port].dma_time[type], &io->io_hdr.dma_bt); getbinuptime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.start_bt); bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); #endif lun->stats.ports[targ_port].num_dmas[type] += io->io_hdr.num_dmas; } /* * Remove this from the OOA queue. */ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); #ifdef CTL_TIME_IO if (TAILQ_EMPTY(&lun->ooa_queue)) lun->last_busy = getsbinuptime(); #endif /* * Run through the blocked queue on this LUN and see if anything * has become unblocked, now that this transaction is done. */ ctl_check_blocked(lun); /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); mtx_lock(&softc->ctl_lock); ctl_free_lun(lun); mtx_unlock(&softc->ctl_lock); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. */ if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS && (ctl_debug & CTL_DEBUG_INFO) != 0) ctl_io_error_print(io, NULL); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. */ if ((softc->ha_mode != CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC)) { memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.serializing_sc = io->io_hdr.serializing_sc; msg.hdr.nexus = io->io_hdr.nexus; ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.scsi) - sizeof(msg.scsi.sense_data), M_WAITOK); } fe_done(io); } #ifdef CTL_WITH_CA /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_lun *lun; struct ctl_port *port; struct ctl_softc *softc; uint32_t initidx, targ_lun; softc = control_softc; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. */ mtx_lock(&softc->ctl_lock); /* * If we don't have a LUN for this, just toss the sense * information. */ port = ctl_io_port(&ctsio->io_hdr); targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) lun = softc->ctl_luns[targ_lun]; else goto bailout; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&lun->lun_lock); /* * Already have CA set for this LUN...toss the sense information. */ if (ctl_is_set(lun->have_ca, initidx)) { mtx_unlock(&lun->lun_lock); goto bailout; } memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, MIN(sizeof(lun->pending_sense[initidx]), sizeof(io->scsiio.sense_data))); ctl_set_mask(lun->have_ca, initidx); mtx_unlock(&lun->lun_lock); bailout: mtx_unlock(&softc->ctl_lock); ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } #endif /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { struct ctl_port *port; CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbinuptime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ port = ctl_io_port(&io->io_hdr); io->io_hdr.nexus.targ_mapped_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_serseq_done(union ctl_io *io) { struct ctl_lun *lun; lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun->be_lun == NULL || lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF) return; mtx_lock(&lun->lun_lock); io->io_hdr.flags |= CTL_FLAG_SERSEQ_DONE; ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); } void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%u:%u:%u tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { callout_init(&io->io_hdr.delay_callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(&io->io_hdr.delay_callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); for (;;) { /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - RtR queue * - incoming queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(softc, &io->scsiio); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); } } static void ctl_lun_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_be_lun *be_lun; CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); for (;;) { mtx_lock(&softc->ctl_lock); be_lun = STAILQ_FIRST(&softc->pending_lun_queue); if (be_lun != NULL) { STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); mtx_unlock(&softc->ctl_lock); ctl_create_lun(be_lun); continue; } /* Sleep until we have something to do. */ mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, PDROP | PRIBIO, "-", 0); } } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct scsi_da_rw_recovery_page *rwpage; struct ctl_logical_block_provisioning_page *page; const char *attr; union ctl_ha_msg msg; uint64_t thres, val; int i, e, set; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); for (;;) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_NO_MEDIA) || lun->backend->lun_attr == NULL) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0 && softc->ha_mode == CTL_HA_MODE_XFER) continue; rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr( lun->be_lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e = (val >= thres); else e = (val <= thres); if (e) break; } mtx_lock(&lun->lun_lock); if (e) { scsi_u64to8b((uint8_t *)&page->descr[i] - (uint8_t *)page, lun->ua_tpt_info); if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = 1; } else set = 0; } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); set = -1; } mtx_unlock(&lun->lun_lock); if (set != 0 && lun->ctl_softc->ha_mode == CTL_HA_MODE_XFER) { /* Send msg to other side. */ bzero(&msg.ua, sizeof(msg.ua)); msg.hdr.msg_type = CTL_MSG_UA; msg.hdr.nexus.initid = -1; msg.hdr.nexus.targ_port = -1; msg.hdr.nexus.targ_lun = lun->lun; msg.hdr.nexus.targ_mapped_lun = lun->lun; msg.ua.ua_all = 1; msg.ua.ua_set = (set > 0); msg.ua.ua_type = CTL_UA_THIN_PROV_THRES; memcpy(msg.ua.ua_info, lun->ua_tpt_info, 8); mtx_unlock(&softc->ctl_lock); // XXX ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg.ua), M_WAITOK); mtx_lock(&softc->ctl_lock); } } mtx_unlock(&softc->ctl_lock); pause("-", CTL_LBP_PERIOD * hz); } } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* * vim: ts=8 */ Index: head/sys/cam/scsi/scsi_ch.c =================================================================== --- head/sys/cam/scsi/scsi_ch.c (revision 293349) +++ head/sys/cam/scsi/scsi_ch.c (revision 293350) @@ -1,1934 +1,1940 @@ /*- * Copyright (c) 1997 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Derived from the NetBSD SCSI changer driver. * * $NetBSD: ch.c,v 1.32 1998/01/12 09:49:12 thorpej Exp $ * */ /*- * Copyright (c) 1996, 1997 Jason R. Thorpe * All rights reserved. * * Partially based on an autochanger driver written by Stefan Grefen * and on an autochanger driver written by the Systems Programming Group * at the University of Utah Computer Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgements: * This product includes software developed by Jason R. Thorpe * for And Communications, http://www.and.com/ * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Timeout definitions for various changer related commands. They may * be too short for some devices (especially the timeout for INITIALIZE * ELEMENT STATUS). */ static const u_int32_t CH_TIMEOUT_MODE_SENSE = 6000; static const u_int32_t CH_TIMEOUT_MOVE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_EXCHANGE_MEDIUM = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_POSITION_TO_ELEMENT = 15 * 60 * 1000; static const u_int32_t CH_TIMEOUT_READ_ELEMENT_STATUS = 5 * 60 * 1000; static const u_int32_t CH_TIMEOUT_SEND_VOLTAG = 10000; static const u_int32_t CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS = 500000; typedef enum { CH_FLAG_INVALID = 0x001 } ch_flags; typedef enum { CH_STATE_PROBE, CH_STATE_NORMAL } ch_state; typedef enum { CH_CCB_PROBE } ch_ccb_types; typedef enum { CH_Q_NONE = 0x00, CH_Q_NO_DBD = 0x01, CH_Q_NO_DVCID = 0x02 } ch_quirks; #define CH_Q_BIT_STRING \ "\020" \ "\001NO_DBD" \ "\002NO_DVCID" #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct scsi_mode_sense_data { struct scsi_mode_header_6 header; struct scsi_mode_blk_desc blk_desc; union { struct page_element_address_assignment ea; struct page_transport_geometry_parameters tg; struct page_device_capabilities cap; } pages; }; struct ch_softc { ch_flags flags; ch_state state; ch_quirks quirks; union ccb saved_ccb; struct devstat *device_stats; struct cdev *dev; int open_count; int sc_picker; /* current picker */ /* * The following information is obtained from the * element address assignment page. */ int sc_firsts[CHET_MAX + 1]; /* firsts */ int sc_counts[CHET_MAX + 1]; /* counts */ /* * The following mask defines the legal combinations * of elements for the MOVE MEDIUM command. */ u_int8_t sc_movemask[CHET_MAX + 1]; /* * As above, but for EXCHANGE MEDIUM. */ u_int8_t sc_exchangemask[CHET_MAX + 1]; /* * Quirks; see below. XXX KDM not implemented yet */ int sc_settledelay; /* delay for settle */ }; static d_open_t chopen; static d_close_t chclose; static d_ioctl_t chioctl; static periph_init_t chinit; static periph_ctor_t chregister; static periph_oninv_t choninvalidate; static periph_dtor_t chcleanup; static periph_start_t chstart; static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void chdone(struct cam_periph *periph, union ccb *done_ccb); static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int chmove(struct cam_periph *periph, struct changer_move *cm); static int chexchange(struct cam_periph *periph, struct changer_exchange *ce); static int chposition(struct cam_periph *periph, struct changer_position *cp); static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *csr); static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr); static int chielem(struct cam_periph *periph, unsigned int timeout); static int chgetparams(struct cam_periph *periph); static int chscsiversion(struct cam_periph *periph); static struct periph_driver chdriver = { chinit, "ch", TAILQ_HEAD_INITIALIZER(chdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(ch, chdriver); static struct cdevsw ch_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = chopen, .d_close = chclose, .d_ioctl = chioctl, .d_name = "ch", }; static MALLOC_DEFINE(M_SCSICH, "scsi_ch", "scsi_ch buffers"); static void chinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, chasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ch: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void chdevgonecb(void *arg) { struct ch_softc *softc; struct cam_periph *periph; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void choninvalidate(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, chasync, periph, periph->path); softc->flags |= CH_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, chdevgonecb, periph); } static void chcleanup(struct cam_periph *periph) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void chasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data)!= T_CHANGER) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(chregister, choninvalidate, chcleanup, chstart, "ch", CAM_PERIPH_BIO, path, chasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("chasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status chregister(struct cam_periph *periph, void *arg) { struct ch_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; + struct make_dev_args args; + int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("chregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ch_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("chregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = CH_STATE_PROBE; periph->softc = softc; softc->quirks = CH_Q_NONE; /* * The DVCID and CURDATA bits were not introduced until the SMC * spec. If this device claims SCSI-2 or earlier support, then it * very likely does not support these bits. */ if (cgd->inq_data.version <= SCSI_REV_2) softc->quirks |= CH_Q_NO_DVCID; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * Changers don't have a blocksize, and obviously don't support * tagged queueing. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("ch", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | DEVSTAT_NO_ORDERED_TAGS, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ - softc->dev = make_dev(&ch_cdevsw, periph->unit_number, UID_ROOT, - GID_OPERATOR, 0600, "%s%d", periph->periph_name, - periph->unit_number); + make_dev_args_init(&args); + args.mda_devsw = &ch_cdevsw; + args.mda_unit = periph->unit_number; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = periph; + error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, + periph->unit_number); cam_periph_lock(periph); - softc->dev->si_drv1 = periph; + if (error != 0) { + cam_periph_release_locked(periph); + return (CAM_REQ_CMP_ERR); + } /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, chasync, periph, periph->path); /* * Lock this periph until we are setup. * This first call can't block */ (void)cam_periph_hold(periph, PRIBIO); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static int chopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); softc = (struct ch_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & CH_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((error = cam_periph_hold(periph, PRIBIO | PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Load information about this changer device into the softc. */ if ((error = chgetparams(periph)) != 0) { cam_periph_unhold(periph); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } cam_periph_unhold(periph); softc->open_count++; cam_periph_unlock(periph); return(error); } static int chclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return(ENXIO); mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct ch_softc *)periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return(0); } static void chstart(struct cam_periph *periph, union ccb *start_ccb) { struct ch_softc *softc; softc = (struct ch_softc *)periph->softc; switch (softc->state) { case CH_STATE_NORMAL: { xpt_release_ccb(start_ccb); break; } case CH_STATE_PROBE: { int mode_buffer_len; void *mode_buffer; /* * Include the block descriptor when calculating the mode * buffer length, */ mode_buffer_len = sizeof(struct scsi_mode_header_6) + sizeof(struct scsi_mode_blk_desc) + sizeof(struct page_element_address_assignment); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chstart: couldn't malloc mode sense data\n"); break; } bzero(mode_buffer, mode_buffer_len); /* * Get the element address assignment page. */ scsi_mode_sense(&start_ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ (softc->quirks & CH_Q_NO_DBD) ? FALSE : TRUE, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = CH_CCB_PROBE; xpt_action(start_ccb); break; } } } static void chdone(struct cam_periph *periph, union ccb *done_ccb) { struct ch_softc *softc; struct ccb_scsiio *csio; softc = (struct ch_softc *)periph->softc; csio = &done_ccb->csio; switch(done_ccb->ccb_h.ccb_state) { case CH_CCB_PROBE: { struct scsi_mode_header_6 *mode_header; struct page_element_address_assignment *ea; char announce_buf[80]; mode_header = (struct scsi_mode_header_6 *)csio->data_ptr; ea = (struct page_element_address_assignment *) find_mode_page_6(mode_header); if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP){ softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); softc->sc_picker = softc->sc_firsts[CHET_MT]; #define PLURAL(c) (c) == 1 ? "" : "s" snprintf(announce_buf, sizeof(announce_buf), "%d slot%s, %d drive%s, " "%d picker%s, %d portal%s", softc->sc_counts[CHET_ST], PLURAL(softc->sc_counts[CHET_ST]), softc->sc_counts[CHET_DT], PLURAL(softc->sc_counts[CHET_DT]), softc->sc_counts[CHET_MT], PLURAL(softc->sc_counts[CHET_MT]), softc->sc_counts[CHET_IE], PLURAL(softc->sc_counts[CHET_IE])); #undef PLURAL } else { int error; error = cherror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { struct scsi_mode_sense_6 *sms; int frozen, retry_scheduled; sms = (struct scsi_mode_sense_6 *) done_ccb->csio.cdb_io.cdb_bytes; frozen = (done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; /* * Check to see if block descriptors were * disabled. Some devices don't like that. * We're taking advantage of the fact that * the first few bytes of the 6 and 10 byte * mode sense commands are the same. If * block descriptors were disabled, enable * them and re-send the command. */ if ((sms->byte2 & SMS_DBD) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0) { sms->byte2 &= ~SMS_DBD; xpt_action(done_ccb); softc->quirks |= CH_Q_NO_DBD; retry_scheduled = 1; } else retry_scheduled = 0; /* Don't wedge this device's queue */ if (frozen) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (retry_scheduled) return; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) scsi_sense_print(&done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, failed " "to attach to device\n"); cam_periph_invalidate(periph); announce_buf[0] = '\0'; } } if (announce_buf[0] != '\0') { xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, CH_Q_BIT_STRING); } softc->state = CH_STATE_NORMAL; free(mode_header, M_SCSICH); /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(done_ccb); cam_periph_unhold(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static int cherror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct ch_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ch_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static int chioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct ch_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return(ENXIO); - cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering chioctl\n")); softc = (struct ch_softc *)periph->softc; error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, we must * have the device open for writing. */ switch (cmd) { case CHIOGPICKER: case CHIOGPARAMS: case OCHIOGSTATUS: case CHIOGSTATUS: break; default: if ((flag & FWRITE) == 0) { cam_periph_unlock(periph); return (EBADF); } } switch (cmd) { case CHIOMOVE: error = chmove(periph, (struct changer_move *)addr); break; case CHIOEXCHANGE: error = chexchange(periph, (struct changer_exchange *)addr); break; case CHIOPOSITION: error = chposition(periph, (struct changer_position *)addr); break; case CHIOGPICKER: *(int *)addr = softc->sc_picker - softc->sc_firsts[CHET_MT]; break; case CHIOSPICKER: { int new_picker = *(int *)addr; if (new_picker > (softc->sc_counts[CHET_MT] - 1)) { error = EINVAL; break; } softc->sc_picker = softc->sc_firsts[CHET_MT] + new_picker; break; } case CHIOGPARAMS: { struct changer_params *cp = (struct changer_params *)addr; cp->cp_npickers = softc->sc_counts[CHET_MT]; cp->cp_nslots = softc->sc_counts[CHET_ST]; cp->cp_nportals = softc->sc_counts[CHET_IE]; cp->cp_ndrives = softc->sc_counts[CHET_DT]; break; } case CHIOIELEM: error = chielem(periph, *(unsigned int *)addr); break; case OCHIOGSTATUS: { error = chgetelemstatus(periph, SCSI_REV_2, cmd, (struct changer_element_status_request *)addr); break; } case CHIOGSTATUS: { int scsi_version; scsi_version = chscsiversion(periph); if (scsi_version >= SCSI_REV_0) { error = chgetelemstatus(periph, scsi_version, cmd, (struct changer_element_status_request *)addr); } else { /* unable to determine the SCSI version */ cam_periph_unlock(periph); return (ENXIO); } break; } case CHIOSETVOLTAG: { error = chsetvoltag(periph, (struct changer_set_voltag_request *) addr); break; } /* Implement prevent/allow? */ default: error = cam_periph_ioctl(periph, cmd, addr, cherror); break; } cam_periph_unlock(periph); return (error); } static int chmove(struct cam_periph *periph, struct changer_move *cm) { struct ch_softc *softc; u_int16_t fromelem, toelem; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((cm->cm_fromtype > CHET_DT) || (cm->cm_totype > CHET_DT)) return (EINVAL); if ((cm->cm_fromunit > (softc->sc_counts[cm->cm_fromtype] - 1)) || (cm->cm_tounit > (softc->sc_counts[cm->cm_totype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if ((softc->sc_movemask[cm->cm_fromtype] & (1 << cm->cm_totype)) == 0) return (ENODEV); /* * Calculate the source and destination elements. */ fromelem = softc->sc_firsts[cm->cm_fromtype] + cm->cm_fromunit; toelem = softc->sc_firsts[cm->cm_totype] + cm->cm_tounit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_move_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ fromelem, /* dst */ toelem, /* invert */ (cm->cm_flags & CM_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MOVE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chexchange(struct cam_periph *periph, struct changer_exchange *ce) { struct ch_softc *softc; u_int16_t src, dst1, dst2; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if ((ce->ce_srctype > CHET_DT) || (ce->ce_fdsttype > CHET_DT) || (ce->ce_sdsttype > CHET_DT)) return (EINVAL); if ((ce->ce_srcunit > (softc->sc_counts[ce->ce_srctype] - 1)) || (ce->ce_fdstunit > (softc->sc_counts[ce->ce_fdsttype] - 1)) || (ce->ce_sdstunit > (softc->sc_counts[ce->ce_sdsttype] - 1))) return (ENODEV); /* * Check the request against the changer's capabilities. */ if (((softc->sc_exchangemask[ce->ce_srctype] & (1 << ce->ce_fdsttype)) == 0) || ((softc->sc_exchangemask[ce->ce_fdsttype] & (1 << ce->ce_sdsttype)) == 0)) return (ENODEV); /* * Calculate the source and destination elements. */ src = softc->sc_firsts[ce->ce_srctype] + ce->ce_srcunit; dst1 = softc->sc_firsts[ce->ce_fdsttype] + ce->ce_fdstunit; dst2 = softc->sc_firsts[ce->ce_sdsttype] + ce->ce_sdstunit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_exchange_medium(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* src */ src, /* dst1 */ dst1, /* dst2 */ dst2, /* invert1 */ (ce->ce_flags & CE_INVERT1) ? TRUE : FALSE, /* invert2 */ (ce->ce_flags & CE_INVERT2) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_EXCHANGE_MEDIUM); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chposition(struct cam_periph *periph, struct changer_position *cp) { struct ch_softc *softc; u_int16_t dst; union ccb *ccb; int error; error = 0; softc = (struct ch_softc *)periph->softc; /* * Check arguments. */ if (cp->cp_type > CHET_DT) return (EINVAL); if (cp->cp_unit > (softc->sc_counts[cp->cp_type] - 1)) return (ENODEV); /* * Calculate the destination element. */ dst = softc->sc_firsts[cp->cp_type] + cp->cp_unit; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_position_to_element(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* tea */ softc->sc_picker, /* dst */ dst, /* invert */ (cp->cp_flags & CP_INVERT) ? TRUE : FALSE, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_POSITION_TO_ELEMENT); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } /* * Copy a volume tag to a volume_tag struct, converting SCSI byte order * to host native byte order in the volume serial number. The volume * label as returned by the changer is transferred to user mode as * nul-terminated string. Volume labels are truncated at the first * space, as suggested by SCSI-2. */ static void copy_voltag(struct changer_voltag *uvoltag, struct volume_tag *voltag) { int i; for (i=0; ivif[i]; if (c && c != ' ') uvoltag->cv_volid[i] = c; else break; } uvoltag->cv_serial = scsi_2btoul(voltag->vsn); } /* * Copy an element status descriptor to a user-mode * changer_element_status structure. */ static void copy_element_status(struct ch_softc *softc, u_int16_t flags, struct read_element_status_descriptor *desc, struct changer_element_status *ces, int scsi_version) { u_int16_t eaddr = scsi_2btoul(desc->eaddr); u_int16_t et; struct volume_tag *pvol_tag = NULL, *avol_tag = NULL; struct read_element_status_device_id *devid = NULL; ces->ces_int_addr = eaddr; /* set up logical address in element status */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_addr = eaddr - softc->sc_firsts[et]; ces->ces_type = et; break; } } ces->ces_flags = desc->flags1; ces->ces_sensecode = desc->sense_code; ces->ces_sensequal = desc->sense_qual; if (desc->flags2 & READ_ELEMENT_STATUS_INVERT) ces->ces_flags |= CES_INVERT; if (desc->flags2 & READ_ELEMENT_STATUS_SVALID) { eaddr = scsi_2btoul(desc->ssea); /* convert source address to logical format */ for (et = CHET_MT; et <= CHET_DT; et++) { if ((softc->sc_firsts[et] <= eaddr) && ((softc->sc_firsts[et] + softc->sc_counts[et]) > eaddr)) { ces->ces_source_addr = eaddr - softc->sc_firsts[et]; ces->ces_source_type = et; ces->ces_flags |= CES_SOURCE_VALID; break; } } if (!(ces->ces_flags & CES_SOURCE_VALID)) printf("ch: warning: could not map element source " "address %ud to a valid element type\n", eaddr); } /* * pvoltag and avoltag are common between SCSI-2 and later versions */ if (flags & READ_ELEMENT_STATUS_PVOLTAG) pvol_tag = &desc->voltag_devid.pvoltag; if (flags & READ_ELEMENT_STATUS_AVOLTAG) avol_tag = (flags & READ_ELEMENT_STATUS_PVOLTAG) ? &desc->voltag_devid.voltag[1] :&desc->voltag_devid.pvoltag; /* * For SCSI-3 and later, element status can carry designator and * other information. */ if (scsi_version >= SCSI_REV_SPC) { if ((flags & READ_ELEMENT_STATUS_PVOLTAG) ^ (flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.pvol_and_devid.devid; else if (!(flags & READ_ELEMENT_STATUS_PVOLTAG) && !(flags & READ_ELEMENT_STATUS_AVOLTAG)) devid = &desc->voltag_devid.devid; else /* Have both PVOLTAG and AVOLTAG */ devid = &desc->voltag_devid.vol_tags_and_devid.devid; } if (pvol_tag) copy_voltag(&(ces->ces_pvoltag), pvol_tag); if (avol_tag) copy_voltag(&(ces->ces_pvoltag), avol_tag); if (devid != NULL) { if (devid->designator_length > 0) { bcopy((void *)devid->designator, (void *)ces->ces_designator, devid->designator_length); ces->ces_designator_length = devid->designator_length; /* * Make sure we are always NUL terminated. The * This won't matter for the binary code set, * since the user will only pay attention to the * length field. */ ces->ces_designator[devid->designator_length]= '\0'; } if (devid->piv_assoc_designator_type & READ_ELEMENT_STATUS_PIV_SET) { ces->ces_flags |= CES_PIV; ces->ces_protocol_id = READ_ELEMENT_STATUS_PROTOCOL_ID( devid->prot_code_set); } ces->ces_code_set = READ_ELEMENT_STATUS_CODE_SET(devid->prot_code_set); ces->ces_assoc = READ_ELEMENT_STATUS_ASSOCIATION( devid->piv_assoc_designator_type); ces->ces_designator_type = READ_ELEMENT_STATUS_DESIGNATOR_TYPE( devid->piv_assoc_designator_type); } else if (scsi_version > SCSI_REV_2) { /* SCSI-SPC and No devid, no designator */ ces->ces_designator_length = 0; ces->ces_designator[0] = '\0'; ces->ces_protocol_id = CES_PROTOCOL_ID_FCP_4; } if (scsi_version <= SCSI_REV_2) { if (desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_IDVALID) { ces->ces_flags |= CES_SCSIID_VALID; ces->ces_scsi_id = desc->dt_or_obsolete.scsi_2.dt_scsi_addr; } if (desc->dt_or_obsolete.scsi_2.dt_scsi_addr & READ_ELEMENT_STATUS_DT_LUVALID) { ces->ces_flags |= CES_LUN_VALID; ces->ces_scsi_lun = desc->dt_or_obsolete.scsi_2.dt_scsi_flags & READ_ELEMENT_STATUS_DT_LUNMASK; } } } static int chgetelemstatus(struct cam_periph *periph, int scsi_version, u_long cmd, struct changer_element_status_request *cesr) { struct read_element_status_header *st_hdr; struct read_element_status_page_header *pg_hdr; struct read_element_status_descriptor *desc; caddr_t data = NULL; size_t size, desclen; int avail, i, error = 0; int curdata, dvcid, sense_flags; int try_no_dvcid = 0; struct changer_element_status *user_data = NULL; struct ch_softc *softc; union ccb *ccb; int chet = cesr->cesr_element_type; int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0; softc = (struct ch_softc *)periph->softc; /* perform argument checking */ /* * Perform a range check on the cesr_element_{base,count} * request argument fields. */ if ((softc->sc_counts[chet] - cesr->cesr_element_base) <= 0 || (cesr->cesr_element_base + cesr->cesr_element_count) > softc->sc_counts[chet]) return (EINVAL); /* * Request one descriptor for the given element type. This * is used to determine the size of the descriptor so that * we can allocate enough storage for all of them. We assume * that the first one can fit into 1k. */ cam_periph_unlock(periph); data = (caddr_t)malloc(1024, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); sense_flags = SF_RETRY_UA; if (softc->quirks & CH_Q_NO_DVCID) { dvcid = 0; curdata = 0; } else { dvcid = 1; curdata = 1; /* * Don't print anything for an Illegal Request, because * these flags can cause some changers to complain. We'll * retry without them if we get an error. */ sense_flags |= SF_QUIET_IR; } retry_einval: scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet], /* curdata */ curdata, /* dvcid */ dvcid, /* count */ 1, /* data_ptr */ data, /* dxfer_len */ 1024, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ sense_flags, softc->device_stats); /* * An Illegal Request sense key (only used if there is no asc/ascq) * or 0x24,0x00 for an ASC/ASCQ both map to EINVAL. If dvcid or * curdata are set (we set both or neither), try turning them off * and see if the command is successful. */ if ((error == EINVAL) && (dvcid || curdata)) { dvcid = 0; curdata = 0; error = 0; /* At this point we want to report any Illegal Request */ sense_flags &= ~SF_QUIET_IR; try_no_dvcid = 1; goto retry_einval; } /* * In this case, we tried a read element status with dvcid and * curdata set, and it failed. We retried without those bits, and * it succeeded. Suggest to the user that he set a quirk, so we * don't go through the retry process the first time in the future. * This should only happen on changers that claim SCSI-3 or higher, * but don't support these bits. */ if ((try_no_dvcid != 0) && (error == 0)) softc->quirks |= CH_Q_NO_DVCID; if (error) goto done; cam_periph_unlock(periph); st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); desclen = scsi_2btoul(pg_hdr->edl); size = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header) + (desclen * cesr->cesr_element_count); /* * Reallocate storage for descriptors and get them from the * device. */ free(data, M_DEVBUF); data = (caddr_t)malloc(size, M_DEVBUF, M_WAITOK); cam_periph_lock(periph); scsi_read_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* voltag */ want_voltags, /* sea */ softc->sc_firsts[chet] + cesr->cesr_element_base, /* curdata */ curdata, /* dvcid */ dvcid, /* count */ cesr->cesr_element_count, /* data_ptr */ data, /* dxfer_len */ size, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_READ_ELEMENT_STATUS); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); if (error) goto done; cam_periph_unlock(periph); /* * Fill in the user status array. */ st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *)((uintptr_t)st_hdr + sizeof(struct read_element_status_header)); avail = scsi_2btoul(st_hdr->count); if (avail != cesr->cesr_element_count) { xpt_print(periph->path, "warning, READ ELEMENT STATUS avail != count\n"); } user_data = (struct changer_element_status *) malloc(avail * sizeof(struct changer_element_status), M_DEVBUF, M_WAITOK | M_ZERO); desc = (struct read_element_status_descriptor *)((uintptr_t)data + sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header)); /* * Set up the individual element status structures */ for (i = 0; i < avail; ++i) { struct changer_element_status *ces; /* * In the changer_element_status structure, fields from * the beginning to the field of ces_scsi_lun are common * between SCSI-2 and SCSI-3, while all the rest are new * from SCSI-3. In order to maintain backward compatibility * of the chio command, the ces pointer, below, is computed * such that it lines up with the structure boundary * corresponding to the SCSI version. */ ces = cmd == OCHIOGSTATUS ? (struct changer_element_status *) ((unsigned char *)user_data + i * (offsetof(struct changer_element_status,ces_scsi_lun)+1)): &user_data[i]; copy_element_status(softc, pg_hdr->flags, desc, ces, scsi_version); desc = (struct read_element_status_descriptor *) ((unsigned char *)desc + desclen); } /* Copy element status structures out to userspace. */ if (cmd == OCHIOGSTATUS) error = copyout(user_data, cesr->cesr_element_status, avail* (offsetof(struct changer_element_status, ces_scsi_lun) + 1)); else error = copyout(user_data, cesr->cesr_element_status, avail * sizeof(struct changer_element_status)); cam_periph_lock(periph); done: xpt_release_ccb(ccb); if (data != NULL) free(data, M_DEVBUF); if (user_data != NULL) free(user_data, M_DEVBUF); return (error); } static int chielem(struct cam_periph *periph, unsigned int timeout) { union ccb *ccb; struct ch_softc *softc; int error; if (!timeout) { timeout = CH_TIMEOUT_INITIALIZE_ELEMENT_STATUS; } else { timeout *= 1000; } error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_initialize_element_status(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); return(error); } static int chsetvoltag(struct cam_periph *periph, struct changer_set_voltag_request *csvr) { union ccb *ccb; struct ch_softc *softc; u_int16_t ea; u_int8_t sac; struct scsi_send_volume_tag_parameters ssvtp; int error; int i; error = 0; softc = (struct ch_softc *)periph->softc; bzero(&ssvtp, sizeof(ssvtp)); for (i=0; icsvr_type > CHET_DT) return EINVAL; if (csvr->csvr_addr > (softc->sc_counts[csvr->csvr_type] - 1)) return ENODEV; ea = softc->sc_firsts[csvr->csvr_type] + csvr->csvr_addr; if (csvr->csvr_flags & CSVR_ALTERNATE) { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_ALTERNATE; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_ALTERNATE; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_ALTERNATE; break; default: error = EINVAL; goto out; } } else { switch (csvr->csvr_flags & CSVR_MODE_MASK) { case CSVR_MODE_SET: sac = SEND_VOLUME_TAG_ASSERT_PRIMARY; break; case CSVR_MODE_REPLACE: sac = SEND_VOLUME_TAG_REPLACE_PRIMARY; break; case CSVR_MODE_CLEAR: sac = SEND_VOLUME_TAG_UNDEFINED_PRIMARY; break; default: error = EINVAL; goto out; } } memcpy(ssvtp.vitf, csvr->csvr_voltag.cv_volid, min(strlen(csvr->csvr_voltag.cv_volid), sizeof(ssvtp.vitf))); scsi_ulto2b(csvr->csvr_voltag.cv_serial, ssvtp.minvsn); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_send_volume_tag(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* element_address */ ea, /* send_action_code */ sac, /* parameters */ &ssvtp, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_SEND_VOLTAG); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); out: return error; } static int chgetparams(struct cam_periph *periph) { union ccb *ccb; struct ch_softc *softc; void *mode_buffer; int mode_buffer_len; struct page_element_address_assignment *ea; struct page_device_capabilities *cap; int error, from, dbd; u_int8_t *moves, *exchanges; error = 0; softc = (struct ch_softc *)periph->softc; ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); /* * The scsi_mode_sense_data structure is just a convenience * structure that allows us to easily calculate the worst-case * storage size of the mode sense buffer. */ mode_buffer_len = sizeof(struct scsi_mode_sense_data); mode_buffer = malloc(mode_buffer_len, M_SCSICH, M_NOWAIT); if (mode_buffer == NULL) { printf("chgetparams: couldn't malloc mode sense data\n"); return(ENOSPC); } bzero(mode_buffer, mode_buffer_len); if (softc->quirks & CH_Q_NO_DBD) dbd = FALSE; else dbd = TRUE; /* * Get the element address assignment page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_ELEMENT_ADDR_ASSIGN_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA|SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting element " "address page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } ea = (struct page_element_address_assignment *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); softc->sc_firsts[CHET_MT] = scsi_2btoul(ea->mtea); softc->sc_counts[CHET_MT] = scsi_2btoul(ea->nmte); softc->sc_firsts[CHET_ST] = scsi_2btoul(ea->fsea); softc->sc_counts[CHET_ST] = scsi_2btoul(ea->nse); softc->sc_firsts[CHET_IE] = scsi_2btoul(ea->fieea); softc->sc_counts[CHET_IE] = scsi_2btoul(ea->niee); softc->sc_firsts[CHET_DT] = scsi_2btoul(ea->fdtea); softc->sc_counts[CHET_DT] = scsi_2btoul(ea->ndte); bzero(mode_buffer, mode_buffer_len); /* * Now get the device capabilities page. */ scsi_mode_sense(&ccb->csio, /* retries */ 1, /* cbfcnp */ chdone, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* page_code */ SMS_PAGE_CTRL_CURRENT, /* page */ CH_DEVICE_CAP_PAGE, /* param_buf */ (u_int8_t *)mode_buffer, /* param_len */ mode_buffer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ CH_TIMEOUT_MODE_SENSE); error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /* sense_flags */ SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); if (error) { if (dbd) { struct scsi_mode_sense_6 *sms; sms = (struct scsi_mode_sense_6 *) ccb->csio.cdb_io.cdb_bytes; sms->byte2 &= ~SMS_DBD; error = cam_periph_runccb(ccb, cherror, /*cam_flags*/ CAM_RETRY_SELTO, /*sense_flags*/ SF_RETRY_UA, softc->device_stats); } else { /* * Since we disabled sense printing above, print * out the sense here since we got an error. */ scsi_sense_print(&ccb->csio); } if (error) { xpt_print(periph->path, "chgetparams: error getting device " "capabilities page\n"); xpt_release_ccb(ccb); free(mode_buffer, M_SCSICH); return(error); } } xpt_release_ccb(ccb); cap = (struct page_device_capabilities *) find_mode_page_6((struct scsi_mode_header_6 *)mode_buffer); bzero(softc->sc_movemask, sizeof(softc->sc_movemask)); bzero(softc->sc_exchangemask, sizeof(softc->sc_exchangemask)); moves = cap->move_from; exchanges = cap->exchange_with; for (from = CHET_MT; from <= CHET_MAX; ++from) { softc->sc_movemask[from] = moves[from]; softc->sc_exchangemask[from] = exchanges[from]; } free(mode_buffer, M_SCSICH); return(error); } static int chscsiversion(struct cam_periph *periph) { struct scsi_inquiry_data *inq_data; struct ccb_getdev *cgd; int dev_scsi_version; cam_periph_assert(periph, MA_OWNED); if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) == NULL) return (-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); if (cgd->ccb_h.status != CAM_REQ_CMP) { xpt_free_ccb((union ccb *)cgd); return -1; } inq_data = &cgd->inq_data; dev_scsi_version = inq_data->version; xpt_free_ccb((union ccb *)cgd); return dev_scsi_version; } void scsi_move_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_move_medium *scsi_cmd; scsi_cmd = (struct scsi_move_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MOVE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= MOVE_MEDIUM_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_exchange_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t src, u_int32_t dst1, u_int32_t dst2, int invert1, int invert2, u_int8_t sense_len, u_int32_t timeout) { struct scsi_exchange_medium *scsi_cmd; scsi_cmd = (struct scsi_exchange_medium *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = EXCHANGE_MEDIUM; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(src, scsi_cmd->src); scsi_ulto2b(dst1, scsi_cmd->fdst); scsi_ulto2b(dst2, scsi_cmd->sdst); if (invert1) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV1; if (invert2) scsi_cmd->invert |= EXCHANGE_MEDIUM_INV2; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_position_to_element(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t tea, u_int32_t dst, int invert, u_int8_t sense_len, u_int32_t timeout) { struct scsi_position_to_element *scsi_cmd; scsi_cmd = (struct scsi_position_to_element *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = POSITION_TO_ELEMENT; scsi_ulto2b(tea, scsi_cmd->tea); scsi_ulto2b(dst, scsi_cmd->dst); if (invert) scsi_cmd->invert |= POSITION_TO_ELEMENT_INVERT; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int voltag, u_int32_t sea, int curdata, int dvcid, u_int32_t count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_element_status *scsi_cmd; scsi_cmd = (struct scsi_read_element_status *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ELEMENT_STATUS; scsi_ulto2b(sea, scsi_cmd->sea); scsi_ulto2b(count, scsi_cmd->count); scsi_ulto3b(dxfer_len, scsi_cmd->len); if (dvcid) scsi_cmd->flags |= READ_ELEMENT_STATUS_DVCID; if (curdata) scsi_cmd->flags |= READ_ELEMENT_STATUS_CURDATA; if (voltag) scsi_cmd->byte2 |= READ_ELEMENT_STATUS_VOLTAG; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_initialize_element_status(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_initialize_element_status *scsi_cmd; scsi_cmd = (struct scsi_initialize_element_status *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INITIALIZE_ELEMENT_STATUS; cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_NONE, tag_action, /* data_ptr */ NULL, /* dxfer_len */ 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_volume_tag(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t element_address, u_int8_t send_action_code, struct scsi_send_volume_tag_parameters *parameters, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_volume_tag *scsi_cmd; scsi_cmd = (struct scsi_send_volume_tag *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_VOLUME_TAG; scsi_ulto2b(element_address, scsi_cmd->ea); scsi_cmd->sac = send_action_code; scsi_ulto2b(sizeof(*parameters), scsi_cmd->pll); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_OUT, tag_action, /* data_ptr */ (u_int8_t *) parameters, sizeof(*parameters), sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_enc.c =================================================================== --- head/sys/cam/scsi/scsi_enc.c (revision 293349) +++ head/sys/cam/scsi/scsi_enc.c (revision 293350) @@ -1,1037 +1,1037 @@ /*- * Copyright (c) 2000 Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_SCSIENC, "SCSI ENC", "SCSI ENC buffers"); /* Enclosure type independent driver */ static d_open_t enc_open; static d_close_t enc_close; static d_ioctl_t enc_ioctl; static periph_init_t enc_init; static periph_ctor_t enc_ctor; static periph_oninv_t enc_oninvalidate; static periph_dtor_t enc_dtor; static void enc_async(void *, uint32_t, struct cam_path *, void *); static enctyp enc_type(struct ccb_getdev *); SYSCTL_NODE(_kern_cam, OID_AUTO, enc, CTLFLAG_RD, 0, "CAM Enclosure Services driver"); static struct periph_driver encdriver = { enc_init, "ses", TAILQ_HEAD_INITIALIZER(encdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(enc, encdriver); static struct cdevsw enc_cdevsw = { .d_version = D_VERSION, .d_open = enc_open, .d_close = enc_close, .d_ioctl = enc_ioctl, .d_name = "ses", .d_flags = D_TRACKCLOSE, }; static void enc_init(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, enc_async, NULL, NULL); if (status != CAM_REQ_CMP) { printf("enc: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void enc_devgonecb(void *arg) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = (struct enc_softc *)periph->softc; /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < enc->open_count; i++) cam_periph_release_locked(periph); enc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void enc_oninvalidate(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; enc->enc_flags |= ENC_FLAG_INVALID; /* If the sub-driver has an invalidate routine, call it */ if (enc->enc_vec.softc_invalidate != NULL) enc->enc_vec.softc_invalidate(enc); /* * Unregister any async callbacks. */ xpt_register_async(0, enc_async, periph, periph->path); /* * Shutdown our daemon. */ enc->enc_flags |= ENC_FLAG_SHUTDOWN; if (enc->enc_daemon != NULL) { /* Signal the ses daemon to terminate. */ wakeup(enc->enc_daemon); } callout_drain(&enc->status_updater); destroy_dev_sched_cb(enc->enc_dev, enc_devgonecb, periph); } static void enc_dtor(struct cam_periph *periph) { struct enc_softc *enc; enc = periph->softc; /* If the sub-driver has a cleanup routine, call it */ if (enc->enc_vec.softc_cleanup != NULL) enc->enc_vec.softc_cleanup(enc); if (enc->enc_boot_hold_ch.ich_func != NULL) { config_intrhook_disestablish(&enc->enc_boot_hold_ch); enc->enc_boot_hold_ch.ich_func = NULL; } ENC_FREE(enc); } static void enc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch(code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; path_id_t path_id; cgd = (struct ccb_getdev *)arg; if (arg == NULL) { break; } if (enc_type(cgd) == ENC_NONE) { /* * Schedule announcement of the ENC bindings for * this device if it is managed by a SEP. */ path_id = xpt_path_path_id(path); xpt_lock_buses(); TAILQ_FOREACH(periph, &encdriver.units, unit_links) { struct enc_softc *softc; softc = (struct enc_softc *)periph->softc; if (xpt_path_path_id(periph->path) != path_id || softc == NULL || (softc->enc_flags & ENC_FLAG_INITIALIZED) == 0 || softc->enc_vec.device_found == NULL) continue; softc->enc_vec.device_found(softc); } xpt_unlock_buses(); return; } status = cam_periph_alloc(enc_ctor, enc_oninvalidate, enc_dtor, NULL, "ses", CAM_PERIPH_BIO, path, enc_async, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { printf("enc_async: Unable to probe new device due to " "status 0x%x\n", status); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static int enc_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) { - return (ENXIO); - } - if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_lock(periph); softc = (struct enc_softc *)periph->softc; if ((softc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { error = ENXIO; goto out; } if (softc->enc_flags & ENC_FLAG_INVALID) { error = ENXIO; goto out; } out: if (error != 0) cam_periph_release_locked(periph); else softc->open_count++; cam_periph_unlock(periph); return (error); } static int enc_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct enc_softc *enc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); mtx = cam_periph_mtx(periph); mtx_lock(mtx); enc = periph->softc; enc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } int enc_error(union ccb *ccb, uint32_t cflags, uint32_t sflags) { struct enc_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct enc_softc *)periph->softc; return (cam_periph_error(ccb, cflags, sflags, &softc->saved_ccb)); } static int enc_ioctl(struct cdev *dev, u_long cmd, caddr_t arg_addr, int flag, struct thread *td) { struct cam_periph *periph; encioc_enc_status_t tmp; encioc_string_t sstr; encioc_elm_status_t elms; encioc_elm_desc_t elmd; encioc_elm_devnames_t elmdn; encioc_element_t *uelm; enc_softc_t *enc; enc_cache_t *cache; void *addr; int error, i; if (arg_addr) addr = *((caddr_t *) arg_addr); else addr = NULL; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering encioctl\n")); cam_periph_lock(periph); enc = (struct enc_softc *)periph->softc; cache = &enc->enc_cache; /* * Now check to see whether we're initialized or not. * This actually should never fail as we're not supposed * to get past enc_open w/o successfully initializing * things. */ if ((enc->enc_flags & ENC_FLAG_INITIALIZED) == 0) { cam_periph_unlock(periph); return (ENXIO); } cam_periph_unlock(periph); error = 0; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("trying to do ioctl %#lx\n", cmd)); /* * If this command can change the device's state, * we must have the device open for writing. * * For commands that get information about the * device- we don't need to lock the peripheral * if we aren't running a command. The periph * also can't go away while a user process has * it open. */ switch (cmd) { case ENCIOC_GETNELM: case ENCIOC_GETELMMAP: case ENCIOC_GETENCSTAT: case ENCIOC_GETELMSTAT: case ENCIOC_GETELMDESC: case ENCIOC_GETELMDEVNAMES: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: break; default: if ((flag & FWRITE) == 0) { return (EBADF); } } /* * XXX The values read here are only valid for the current * configuration generation. We need these ioctls * to also pass in/out a generation number. */ sx_slock(&enc->enc_cache_lock); switch (cmd) { case ENCIOC_GETNELM: error = copyout(&cache->nelms, addr, sizeof (cache->nelms)); break; case ENCIOC_GETELMMAP: for (uelm = addr, i = 0; i != cache->nelms; i++) { encioc_element_t kelm; kelm.elm_idx = i; kelm.elm_subenc_id = cache->elm_map[i].subenclosure; kelm.elm_type = cache->elm_map[i].enctype; error = copyout(&kelm, &uelm[i], sizeof(kelm)); if (error) break; } break; case ENCIOC_GETENCSTAT: cam_periph_lock(periph); error = enc->enc_vec.get_enc_status(enc, 1); if (error) { cam_periph_unlock(periph); break; } tmp = cache->enc_status; cam_periph_unlock(periph); error = copyout(&tmp, addr, sizeof(tmp)); cache->enc_status = tmp; break; case ENCIOC_SETENCSTAT: error = copyin(addr, &tmp, sizeof(tmp)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.set_enc_status(enc, tmp, 1); cam_periph_unlock(periph); break; case ENCIOC_GETSTRING: case ENCIOC_SETSTRING: case ENCIOC_GETENCNAME: case ENCIOC_GETENCID: if (enc->enc_vec.handle_string == NULL) { error = EINVAL; break; } error = copyin(addr, &sstr, sizeof(sstr)); if (error) break; cam_periph_lock(periph); error = enc->enc_vec.handle_string(enc, &sstr, cmd); cam_periph_unlock(periph); break; case ENCIOC_GETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.get_elm_status(enc, &elms, 1); cam_periph_unlock(periph); if (error) break; error = copyout(&elms, addr, sizeof(elms)); break; case ENCIOC_GETELMDESC: error = copyin(addr, &elmd, sizeof(elmd)); if (error) break; if (elmd.elm_idx >= cache->nelms) { error = EINVAL; break; } if (enc->enc_vec.get_elm_desc != NULL) { error = enc->enc_vec.get_elm_desc(enc, &elmd); if (error) break; } else elmd.elm_desc_len = 0; error = copyout(&elmd, addr, sizeof(elmd)); break; case ENCIOC_GETELMDEVNAMES: if (enc->enc_vec.get_elm_devnames == NULL) { error = EINVAL; break; } error = copyin(addr, &elmdn, sizeof(elmdn)); if (error) break; if (elmdn.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = (*enc->enc_vec.get_elm_devnames)(enc, &elmdn); cam_periph_unlock(periph); if (error) break; error = copyout(&elmdn, addr, sizeof(elmdn)); break; case ENCIOC_SETELMSTAT: error = copyin(addr, &elms, sizeof(elms)); if (error) break; if (elms.elm_idx >= cache->nelms) { error = EINVAL; break; } cam_periph_lock(periph); error = enc->enc_vec.set_elm_status(enc, &elms, 1); cam_periph_unlock(periph); break; case ENCIOC_INIT: cam_periph_lock(periph); error = enc->enc_vec.init_enc(enc); cam_periph_unlock(periph); break; default: cam_periph_lock(periph); error = cam_periph_ioctl(periph, cmd, arg_addr, enc_error); cam_periph_unlock(periph); break; } sx_sunlock(&enc->enc_cache_lock); return (error); } int enc_runcmd(struct enc_softc *enc, char *cdb, int cdbl, char *dptr, int *dlenp) { int error, dlen, tdlen; ccb_flags ddf; union ccb *ccb; CAM_DEBUG(enc->periph->path, CAM_DEBUG_TRACE, ("entering enc_runcmd\n")); if (dptr) { if ((dlen = *dlenp) < 0) { dlen = -dlen; ddf = CAM_DIR_OUT; } else { ddf = CAM_DIR_IN; } } else { dlen = 0; ddf = CAM_DIR_NONE; } if (cdbl > IOCDBLEN) { cdbl = IOCDBLEN; } ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); if (enc->enc_type == ENC_SEMB_SES || enc->enc_type == ENC_SEMB_SAFT) { tdlen = min(dlen, 1020); tdlen = (tdlen + 3) & ~3; cam_fill_ataio(&ccb->ataio, 0, NULL, ddf, 0, dptr, tdlen, 30 * 1000); if (cdb[0] == RECEIVE_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x02, tdlen / 4); else if (cdb[0] == SEND_DIAGNOSTIC) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x82, tdlen / 4); else if (cdb[0] == READ_BUFFER) ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, cdb[2], 0x00, tdlen / 4); else ata_28bit_cmd(&ccb->ataio, ATA_SEP_ATTN, dlen > 0 ? dptr[0] : 0, 0x80, tdlen / 4); } else { tdlen = dlen; cam_fill_csio(&ccb->csio, 0, NULL, ddf, MSG_SIMPLE_Q_TAG, dptr, dlen, sizeof (struct scsi_sense_data), cdbl, 60 * 1000); bcopy(cdb, ccb->csio.cdb_io.cdb_bytes, cdbl); } error = cam_periph_runccb(ccb, enc_error, ENC_CFLAGS, ENC_FLAGS, NULL); if (error) { if (dptr) { *dlenp = dlen; } } else { if (dptr) { if (ccb->ccb_h.func_code == XPT_ATA_IO) *dlenp = ccb->ataio.resid; else *dlenp = ccb->csio.resid; *dlenp += tdlen - dlen; } } xpt_release_ccb(ccb); CAM_DEBUG(enc->periph->path, CAM_DEBUG_SUBTRACE, ("exiting enc_runcmd: *dlenp = %d\n", *dlenp)); return (error); } void enc_log(struct enc_softc *enc, const char *fmt, ...) { va_list ap; printf("%s%d: ", enc->periph->periph_name, enc->periph->unit_number); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } /* * The code after this point runs on many platforms, * so forgive the slightly awkward and nonconforming * appearance. */ /* * Is this a device that supports enclosure services? * * It's a pretty simple ruleset- if it is device type * 0x0D (13), it's an ENCLOSURE device. */ #define SAFTE_START 44 #define SAFTE_END 50 #define SAFTE_LEN SAFTE_END-SAFTE_START static enctyp enc_type(struct ccb_getdev *cgd) { int buflen; unsigned char *iqd; if (cgd->protocol == PROTO_SEMB) { iqd = (unsigned char *)&cgd->ident_data; if (STRNCMP(iqd + 43, "S-E-S", 5) == 0) return (ENC_SEMB_SES); else if (STRNCMP(iqd + 43, "SAF-TE", 6) == 0) return (ENC_SEMB_SAFT); return (ENC_NONE); } else if (cgd->protocol != PROTO_SCSI) return (ENC_NONE); iqd = (unsigned char *)&cgd->inq_data; buflen = min(sizeof(cgd->inq_data), SID_ADDITIONAL_LENGTH(&cgd->inq_data)); if ((iqd[0] & 0x1f) == T_ENCLOSURE) { if ((iqd[2] & 0x7) > 2) { return (ENC_SES); } else { return (ENC_SES_SCSI2); } return (ENC_NONE); } #ifdef SES_ENABLE_PASSTHROUGH if ((iqd[6] & 0x40) && (iqd[2] & 0x7) >= 2) { /* * PassThrough Device. */ return (ENC_SES_PASSTHROUGH); } #endif /* * The comparison is short for a reason- * some vendors were chopping it short. */ if (buflen < SAFTE_END - 2) { return (ENC_NONE); } if (STRNCMP((char *)&iqd[SAFTE_START], "SAF-TE", SAFTE_LEN - 2) == 0) { return (ENC_SAFT); } return (ENC_NONE); } /*================== Enclosure Monitoring/Processing Daemon ==================*/ /** * \brief Queue an update request for a given action, if needed. * * \param enc SES softc to queue the request for. * \param action Action requested. */ void enc_update_request(enc_softc_t *enc, uint32_t action) { if ((enc->pending_actions & (0x1 << action)) == 0) { enc->pending_actions |= (0x1 << action); ENC_DLOG(enc, "%s: queing requested action %d\n", __func__, action); if (enc->current_action == ENC_UPDATE_NONE) wakeup(enc->enc_daemon); } else { ENC_DLOG(enc, "%s: ignoring requested action %d - " "Already queued\n", __func__, action); } } /** * \brief Invoke the handler of the highest priority pending * state in the SES state machine. * * \param enc The SES instance invoking the state machine. */ static void enc_fsm_step(enc_softc_t *enc) { union ccb *ccb; uint8_t *buf; struct enc_fsm_state *cur_state; int error; uint32_t xfer_len; ENC_DLOG(enc, "%s enter %p\n", __func__, enc); enc->current_action = ffs(enc->pending_actions) - 1; enc->pending_actions &= ~(0x1 << enc->current_action); cur_state = &enc->enc_fsm_states[enc->current_action]; buf = NULL; if (cur_state->buf_size != 0) { cam_periph_unlock(enc->periph); buf = malloc(cur_state->buf_size, M_SCSIENC, M_WAITOK|M_ZERO); cam_periph_lock(enc->periph); } error = 0; ccb = NULL; if (cur_state->fill != NULL) { ccb = cam_periph_getccb(enc->periph, CAM_PRIORITY_NORMAL); error = cur_state->fill(enc, cur_state, ccb, buf); if (error != 0) goto done; error = cam_periph_runccb(ccb, cur_state->error, ENC_CFLAGS, ENC_FLAGS|SF_QUIET_IR, NULL); } if (ccb != NULL) { if (ccb->ccb_h.func_code == XPT_ATA_IO) xfer_len = ccb->ataio.dxfer_len - ccb->ataio.resid; else xfer_len = ccb->csio.dxfer_len - ccb->csio.resid; } else xfer_len = 0; cam_periph_unlock(enc->periph); cur_state->done(enc, cur_state, ccb, &buf, error, xfer_len); cam_periph_lock(enc->periph); done: ENC_DLOG(enc, "%s exit - result %d\n", __func__, error); ENC_FREE_AND_NULL(buf); if (ccb != NULL) xpt_release_ccb(ccb); } /** * \invariant Called with cam_periph mutex held. */ static void enc_status_updater(void *arg) { enc_softc_t *enc; enc = arg; if (enc->enc_vec.poll_status != NULL) enc->enc_vec.poll_status(enc); } static void enc_daemon(void *arg) { enc_softc_t *enc; enc = arg; cam_periph_lock(enc->periph); while ((enc->enc_flags & ENC_FLAG_SHUTDOWN) == 0) { if (enc->pending_actions == 0) { struct intr_config_hook *hook; /* * Reset callout and msleep, or * issue timed task completion * status command. */ enc->current_action = ENC_UPDATE_NONE; /* * We've been through our state machine at least * once. Allow the transition to userland. */ hook = &enc->enc_boot_hold_ch; if (hook->ich_func != NULL) { config_intrhook_disestablish(hook); hook->ich_func = NULL; } callout_reset(&enc->status_updater, 60*hz, enc_status_updater, enc); cam_periph_sleep(enc->periph, enc->enc_daemon, PUSER, "idle", 0); } else { enc_fsm_step(enc); } } enc->enc_daemon = NULL; cam_periph_unlock(enc->periph); cam_periph_release(enc->periph); kproc_exit(0); } static int enc_kproc_init(enc_softc_t *enc) { int result; callout_init_mtx(&enc->status_updater, cam_periph_mtx(enc->periph), 0); if (cam_periph_acquire(enc->periph) != CAM_REQ_CMP) return (ENXIO); result = kproc_create(enc_daemon, enc, &enc->enc_daemon, /*flags*/0, /*stackpgs*/0, "enc_daemon%d", enc->periph->unit_number); if (result == 0) { /* Do an initial load of all page data. */ cam_periph_lock(enc->periph); enc->enc_vec.poll_status(enc); cam_periph_unlock(enc->periph); } else cam_periph_release(enc->periph); return (result); } /** * \brief Interrupt configuration hook callback associated with * enc_boot_hold_ch. * * Since interrupts are always functional at the time of enclosure * configuration, there is nothing to be done when the callback occurs. * This hook is only registered to hold up boot processing while initial * eclosure processing occurs. * * \param arg The enclosure softc, but currently unused in this callback. */ static void enc_nop_confighook_cb(void *arg __unused) { } static cam_status enc_ctor(struct cam_periph *periph, void *arg) { cam_status status = CAM_REQ_CMP_ERR; int err; enc_softc_t *enc; struct ccb_getdev *cgd; char *tname; + struct make_dev_args args; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("enc_ctor: no getdev CCB, can't register device\n"); goto out; } enc = ENC_MALLOCZ(sizeof(*enc)); if (enc == NULL) { printf("enc_ctor: Unable to probe new device. " "Unable to allocate enc\n"); goto out; } enc->periph = periph; enc->current_action = ENC_UPDATE_INVALID; enc->enc_type = enc_type(cgd); sx_init(&enc->enc_cache_lock, "enccache"); switch (enc->enc_type) { case ENC_SES: case ENC_SES_SCSI2: case ENC_SES_PASSTHROUGH: case ENC_SEMB_SES: err = ses_softc_init(enc); break; case ENC_SAFT: case ENC_SEMB_SAFT: err = safte_softc_init(enc); break; case ENC_NONE: default: ENC_FREE(enc); return (CAM_REQ_CMP_ERR); } if (err) { xpt_print(periph->path, "error %d initializing\n", err); goto out; } /* * Hold off userland until we have made at least one pass * through our state machine so that physical path data is * present. */ if (enc->enc_vec.poll_status != NULL) { enc->enc_boot_hold_ch.ich_func = enc_nop_confighook_cb; enc->enc_boot_hold_ch.ich_arg = enc; config_intrhook_establish(&enc->enc_boot_hold_ch); } /* * The softc field is set only once the enc is fully initialized * so that we can rely on this field to detect partially * initialized periph objects in the AC_FOUND_DEVICE handler. */ periph->softc = enc; cam_periph_unlock(periph); if (enc->enc_vec.poll_status != NULL) { err = enc_kproc_init(enc); if (err) { xpt_print(periph->path, "error %d starting enc_daemon\n", err); goto out; } } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } - enc->enc_dev = make_dev(&enc_cdevsw, periph->unit_number, - UID_ROOT, GID_OPERATOR, 0600, "%s%d", - periph->periph_name, periph->unit_number); - + make_dev_args_init(&args); + args.mda_devsw = &enc_cdevsw; + args.mda_unit = periph->unit_number; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = periph; + err = make_dev_s(&args, &enc->enc_dev, "%s%d", periph->periph_name, + periph->unit_number); cam_periph_lock(periph); - enc->enc_dev->si_drv1 = periph; + if (err != 0) { + cam_periph_release_locked(periph); + return (CAM_REQ_CMP_ERR); + } enc->enc_flags |= ENC_FLAG_INITIALIZED; /* * Add an async callback so that we get notified if this * device goes away. */ xpt_register_async(AC_LOST_DEVICE, enc_async, periph, periph->path); switch (enc->enc_type) { default: case ENC_NONE: tname = "No ENC device"; break; case ENC_SES_SCSI2: tname = "SCSI-2 ENC Device"; break; case ENC_SES: tname = "SCSI-3 ENC Device"; break; case ENC_SES_PASSTHROUGH: tname = "ENC Passthrough Device"; break; case ENC_SAFT: tname = "SAF-TE Compliant Device"; break; case ENC_SEMB_SES: tname = "SEMB SES Device"; break; case ENC_SEMB_SAFT: tname = "SEMB SAF-TE Device"; break; } xpt_announce_periph(periph, tname); status = CAM_REQ_CMP; out: if (status != CAM_REQ_CMP) enc_dtor(periph); return (status); } Index: head/sys/cam/scsi/scsi_pass.c =================================================================== --- head/sys/cam/scsi/scsi_pass.c (revision 293349) +++ head/sys/cam/scsi/scsi_pass.c (revision 293350) @@ -1,2225 +1,2225 @@ /*- * Copyright (c) 1997, 1998, 2000 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kdtrace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { PASS_FLAG_OPEN = 0x01, PASS_FLAG_LOCKED = 0x02, PASS_FLAG_INVALID = 0x04, PASS_FLAG_INITIAL_PHYSPATH = 0x08, PASS_FLAG_ZONE_INPROG = 0x10, PASS_FLAG_ZONE_VALID = 0x20, PASS_FLAG_UNMAPPED_CAPABLE = 0x40, PASS_FLAG_ABANDONED_REF_SET = 0x80 } pass_flags; typedef enum { PASS_STATE_NORMAL } pass_state; typedef enum { PASS_CCB_BUFFER_IO, PASS_CCB_QUEUED_IO } pass_ccb_types; #define ccb_type ppriv_field0 #define ccb_ioreq ppriv_ptr1 /* * The maximum number of memory segments we preallocate. */ #define PASS_MAX_SEGS 16 typedef enum { PASS_IO_NONE = 0x00, PASS_IO_USER_SEG_MALLOC = 0x01, PASS_IO_KERN_SEG_MALLOC = 0x02, PASS_IO_ABANDONED = 0x04 } pass_io_flags; struct pass_io_req { union ccb ccb; union ccb *alloced_ccb; union ccb *user_ccb_ptr; camq_entry user_periph_links; ccb_ppriv_area user_periph_priv; struct cam_periph_map_info mapinfo; pass_io_flags flags; ccb_flags data_flags; int num_user_segs; bus_dma_segment_t user_segs[PASS_MAX_SEGS]; int num_kern_segs; bus_dma_segment_t kern_segs[PASS_MAX_SEGS]; bus_dma_segment_t *user_segptr; bus_dma_segment_t *kern_segptr; int num_bufs; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint8_t *user_bufs[CAM_PERIPH_MAXMAPS]; uint8_t *kern_bufs[CAM_PERIPH_MAXMAPS]; struct bintime start_time; TAILQ_ENTRY(pass_io_req) links; }; struct pass_softc { pass_state state; pass_flags flags; u_int8_t pd_type; union ccb saved_ccb; int open_count; u_int maxio; struct devstat *device_stats; struct cdev *dev; struct cdev *alias_dev; struct task add_physpath_task; struct task shutdown_kqueue_task; struct selinfo read_select; TAILQ_HEAD(, pass_io_req) incoming_queue; TAILQ_HEAD(, pass_io_req) active_queue; TAILQ_HEAD(, pass_io_req) abandoned_queue; TAILQ_HEAD(, pass_io_req) done_queue; struct cam_periph *periph; char zone_name[12]; char io_zone_name[12]; uma_zone_t pass_zone; uma_zone_t pass_io_zone; size_t io_zone_size; }; static d_open_t passopen; static d_close_t passclose; static d_ioctl_t passioctl; static d_ioctl_t passdoioctl; static d_poll_t passpoll; static d_kqfilter_t passkqfilter; static void passreadfiltdetach(struct knote *kn); static int passreadfilt(struct knote *kn, long hint); static periph_init_t passinit; static periph_ctor_t passregister; static periph_oninv_t passoninvalidate; static periph_dtor_t passcleanup; static periph_start_t passstart; static void pass_shutdown_kqueue(void *context, int pending); static void pass_add_physpath(void *context, int pending); static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void passdone(struct cam_periph *periph, union ccb *done_ccb); static int passcreatezone(struct cam_periph *periph); static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req); static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction); static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req); static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req); static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb); static struct periph_driver passdriver = { passinit, "pass", TAILQ_HEAD_INITIALIZER(passdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pass, passdriver); static struct cdevsw pass_cdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = passopen, .d_close = passclose, .d_ioctl = passioctl, .d_poll = passpoll, .d_kqfilter = passkqfilter, .d_name = "pass", }; static struct filterops passread_filtops = { .f_isfd = 1, .f_detach = passreadfiltdetach, .f_event = passreadfilt }; static MALLOC_DEFINE(M_SCSIPASS, "scsi_pass", "scsi passthrough buffers"); static void passinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, passasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pass: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void passrejectios(struct cam_periph *periph) { struct pass_io_req *io_req, *io_req2; struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * The user can no longer get status for I/O on the done queue, so * clean up all outstanding I/O on the done queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * The underlying device is gone, so we can't issue these I/Os. * The devfs node has been shut down, so we can't return status to * the user. Free any I/O left on the incoming queue. */ TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * Normally we would put I/Os on the abandoned queue and acquire a * reference when we saw the final close. But, the device went * away and devfs may have moved everything off to deadfs by the * time the I/O done callback is called; as a result, we won't see * any more closes. So, if we have any active I/Os, we need to put * them on the abandoned queue. When the abandoned queue is empty, * we'll release the remaining reference (see below) to the peripheral. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } /* * If we put any I/O on the abandoned queue, acquire a reference. */ if ((!TAILQ_EMPTY(&softc->abandoned_queue)) && ((softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0)) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } } static void passdevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct pass_softc *softc; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct pass_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. * Accordingly, inform all queued I/Os of their fate. */ cam_periph_release_locked(periph); passrejectios(periph); /* * We reference the SIM lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); /* * We have to remove our kqueue context from a thread because it * may sleep. It would be nice if we could get a callback from * kqueue when it is done cleaning up resources. */ taskqueue_enqueue(taskqueue_thread, &softc->shutdown_kqueue_task); } static void passoninvalidate(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, passasync, periph, periph->path); softc->flags |= PASS_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, passdevgonecb, periph); } static void passcleanup(struct cam_periph *periph) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(TAILQ_EMPTY(&softc->active_queue), ("%s called when there are commands on the active queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->abandoned_queue), ("%s called when there are commands on the abandoned queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->incoming_queue), ("%s called when there are commands on the incoming queue!\n", __func__)); KASSERT(TAILQ_EMPTY(&softc->done_queue), ("%s called when there are commands on the done queue!\n", __func__)); devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); /* * We call taskqueue_drain() for the physpath task to make sure it * is complete. We drop the lock because this can potentially * sleep. XXX KDM that is bad. Need a way to get a callback when * a taskqueue is drained. * * Note that we don't drain the kqueue shutdown task queue. This * is because we hold a reference on the periph for kqueue, and * release that reference from the kqueue shutdown task queue. So * we cannot come into this routine unless we've released that * reference. Also, because that could be the last reference, we * could be called from the cam_periph_release() call in * pass_shutdown_kqueue(). In that case, the taskqueue_drain() * would deadlock. It would be preferable if we had a way to * get a callback when a taskqueue is done. */ taskqueue_drain(taskqueue_thread, &softc->add_physpath_task); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void pass_shutdown_kqueue(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; periph = context; softc = periph->softc; knlist_clear(&softc->read_select.si_note, /*is_locked*/ 0); knlist_destroy(&softc->read_select.si_note); /* * Release the reference we held for kqueue. */ cam_periph_release(periph); } static void pass_add_physpath(void *context, int pending) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; char *physpath; /* * If we have one, create a devfs alias for our * physical path. */ periph = context; softc = periph->softc; physpath = malloc(MAXPATHLEN, M_DEVBUF, M_WAITOK); mtx = cam_periph_mtx(periph); mtx_lock(mtx); if (periph->flags & CAM_PERIPH_INVALID) goto out; if (xpt_getattr(physpath, MAXPATHLEN, "GEOM::physpath", periph->path) == 0 && strlen(physpath) != 0) { mtx_unlock(mtx); make_dev_physpath_alias(MAKEDEV_WAITOK, &softc->alias_dev, softc->dev, softc->alias_dev, physpath); mtx_lock(mtx); } out: /* * Now that we've made our alias, we no longer have to have a * reference to the device. */ if ((softc->flags & PASS_FLAG_INITIAL_PHYSPATH) == 0) softc->flags |= PASS_FLAG_INITIAL_PHYSPATH; /* * We always acquire a reference to the periph before queueing this * task queue function, so it won't go away before we run. */ while (pending-- > 0) cam_periph_release_locked(periph); mtx_unlock(mtx); free(physpath, M_DEVBUF); } static void passasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(passregister, passoninvalidate, passcleanup, passstart, "pass", CAM_PERIPH_BIO, path, passasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("passasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct pass_softc *softc; cam_status status; softc = (struct pass_softc *)periph->softc; /* * Acquire a reference to the periph before we * start the taskqueue, so that we don't run into * a situation where the periph goes away before * the task queue has a chance to run. */ status = cam_periph_acquire(periph); if (status != CAM_REQ_CMP) break; taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status passregister(struct cam_periph *periph, void *arg) { struct pass_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; - int no_tags; + struct make_dev_args args; + int error, no_tags; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("%s: no getdev CCB, can't register device\n", __func__); return(CAM_REQ_CMP_ERR); } softc = (struct pass_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT); if (softc == NULL) { printf("%s: Unable to probe new device. " "Unable to allocate softc\n", __func__); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); softc->state = PASS_STATE_NORMAL; if (cgd->protocol == PROTO_SCSI || cgd->protocol == PROTO_ATAPI) softc->pd_type = SID_TYPE(&cgd->inq_data); else if (cgd->protocol == PROTO_SATAPM) softc->pd_type = T_ENCLOSURE; else softc->pd_type = T_DIRECT; periph->softc = softc; softc->periph = periph; TAILQ_INIT(&softc->incoming_queue); TAILQ_INIT(&softc->active_queue); TAILQ_INIT(&softc->abandoned_queue); TAILQ_INIT(&softc->done_queue); snprintf(softc->zone_name, sizeof(softc->zone_name), "%s%d", periph->periph_name, periph->unit_number); snprintf(softc->io_zone_name, sizeof(softc->io_zone_name), "%s%dIO", periph->periph_name, periph->unit_number); softc->io_zone_size = MAXPHYS; knlist_init_mtx(&softc->read_select.si_note, cam_periph_mtx(periph)); bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ if (cpi.hba_misc & PIM_UNMAPPED) softc->flags |= PASS_FLAG_UNMAPPED_CAPABLE; /* * We pass in 0 for a blocksize, since we don't * know what the blocksize of this device is, if * it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("pass", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Initialize the taskqueue handler for shutting down kqueue. */ TASK_INIT(&softc->shutdown_kqueue_task, /*priority*/ 0, pass_shutdown_kqueue, periph); /* * Acquire a reference to the periph that we can release once we've * cleaned up the kqueue. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ - softc->dev = make_dev(&pass_cdevsw, periph->unit_number, - UID_ROOT, GID_OPERATOR, 0600, "%s%d", - periph->periph_name, periph->unit_number); + make_dev_args_init(&args); + args.mda_devsw = &pass_cdevsw; + args.mda_unit = periph->unit_number; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = periph; + error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, + periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + cam_periph_release_locked(periph); + return (CAM_REQ_CMP_ERR); + } /* * Hold a reference to the periph before we create the physical * path alias so it can't go away. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } cam_periph_lock(periph); - softc->dev->si_drv1 = periph; TASK_INIT(&softc->add_physpath_task, /*priority*/0, pass_add_physpath, periph); /* * See if physical path information is already available. */ taskqueue_enqueue(taskqueue_thread, &softc->add_physpath_task); /* * Add an async callback so that we get notified if * this device goes away or its physical path * (stored in the advanced info data of the EDT) has * changed. */ xpt_register_async(AC_LOST_DEVICE | AC_ADVINFO_CHANGED, passasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static int passopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; if (softc->flags & PASS_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(error); } /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EPERM); } /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { xpt_print(periph->path, "can't do nonblocking access\n"); cam_periph_release_locked(periph); cam_periph_unlock(periph); return(EINVAL); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int passclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; if (softc->open_count == 0) { struct pass_io_req *io_req, *io_req2; int need_unlock; need_unlock = 0; TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } TAILQ_FOREACH_SAFE(io_req, &softc->incoming_queue, links, io_req2) { TAILQ_REMOVE(&softc->incoming_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); } /* * If there are any active I/Os, we need to forcibly acquire a * reference to the peripheral so that we don't go away * before they complete. We'll release the reference when * the abandoned queue is empty. */ io_req = TAILQ_FIRST(&softc->active_queue); if ((io_req != NULL) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET) == 0) { cam_periph_doacquire(periph); softc->flags |= PASS_FLAG_ABANDONED_REF_SET; } /* * Since the I/O in the active queue is not under our * control, just set a flag so that we can clean it up when * it completes and put it on the abandoned queue. This * will prevent our sending spurious completions in the * event that the device is opened again before these I/Os * complete. */ TAILQ_FOREACH_SAFE(io_req, &softc->active_queue, links, io_req2) { TAILQ_REMOVE(&softc->active_queue, io_req, links); io_req->flags |= PASS_IO_ABANDONED; TAILQ_INSERT_TAIL(&softc->abandoned_queue, io_req, links); } } cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static void passstart(struct cam_periph *periph, union ccb *start_ccb) { struct pass_softc *softc; softc = (struct pass_softc *)periph->softc; switch (softc->state) { case PASS_STATE_NORMAL: { struct pass_io_req *io_req; /* * Check for any queued I/O requests that require an * allocated slot. */ io_req = TAILQ_FIRST(&softc->incoming_queue); if (io_req == NULL) { xpt_release_ccb(start_ccb); break; } TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); /* * Merge the user's CCB into the allocated CCB. */ xpt_merge_ccb(start_ccb, &io_req->ccb); start_ccb->ccb_h.ccb_type = PASS_CCB_QUEUED_IO; start_ccb->ccb_h.ccb_ioreq = io_req; start_ccb->ccb_h.cbfcnp = passdone; io_req->alloced_ccb = start_ccb; binuptime(&io_req->start_time); devstat_start_transaction(softc->device_stats, &io_req->start_time); xpt_action(start_ccb); /* * If we have any more I/O waiting, schedule ourselves again. */ if (!TAILQ_EMPTY(&softc->incoming_queue)) xpt_schedule(periph, CAM_PRIORITY_NORMAL); break; } default: break; } } static void passdone(struct cam_periph *periph, union ccb *done_ccb) { struct pass_softc *softc; struct ccb_scsiio *csio; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case PASS_CCB_QUEUED_IO: { struct pass_io_req *io_req; io_req = done_ccb->ccb_h.ccb_ioreq; #if 0 xpt_print(periph->path, "%s: called for user CCB %p\n", __func__, io_req->user_ccb_ptr); #endif if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (done_ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) && ((io_req->flags & PASS_IO_ABANDONED) == 0)) { int error; error = passerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ return; } } /* * Copy the allocated CCB contents back to the malloced CCB * so we can give status back to the user when he requests it. */ bcopy(done_ccb, &io_req->ccb, sizeof(*done_ccb)); /* * Log data/transaction completion with devstat(9). */ switch (done_ccb->ccb_h.func_code) { case XPT_SCSI_IO: devstat_end_transaction(softc->device_stats, done_ccb->csio.dxfer_len - done_ccb->csio.resid, done_ccb->csio.tag_action & 0x3, ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_ATA_IO: devstat_end_transaction(softc->device_stats, done_ccb->ataio.dxfer_len - done_ccb->ataio.resid, done_ccb->ataio.tag_action & 0x3, ((done_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (done_ccb->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, &io_req->start_time); break; case XPT_SMP_IO: /* * XXX KDM this isn't quite right, but there isn't * currently an easy way to represent a bidirectional * transfer in devstat. The only way to do it * and have the byte counts come out right would * mean that we would have to record two * transactions, one for the request and one for the * response. For now, so that we report something, * just treat the entire thing as a read. */ devstat_end_transaction(softc->device_stats, done_ccb->smpio.smp_request_len + done_ccb->smpio.smp_response_len, DEVSTAT_TAG_SIMPLE, DEVSTAT_READ, NULL, &io_req->start_time); break; default: devstat_end_transaction(softc->device_stats, 0, DEVSTAT_TAG_NONE, DEVSTAT_NO_DATA, NULL, &io_req->start_time); break; } /* * In the normal case, take the completed I/O off of the * active queue and put it on the done queue. Notitfy the * user that we have a completed I/O. */ if ((io_req->flags & PASS_IO_ABANDONED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); selwakeuppri(&softc->read_select, PRIBIO); KNOTE_LOCKED(&softc->read_select.si_note, 0); } else { /* * In the case of an abandoned I/O (final close * without fetching the I/O), take it off of the * abandoned queue and free it. */ TAILQ_REMOVE(&softc->abandoned_queue, io_req, links); passiocleanup(softc, io_req); uma_zfree(softc->pass_zone, io_req); /* * Release the done_ccb here, since we may wind up * freeing the peripheral when we decrement the * reference count below. */ xpt_release_ccb(done_ccb); /* * If the abandoned queue is empty, we can release * our reference to the periph since we won't have * any more completions coming. */ if ((TAILQ_EMPTY(&softc->abandoned_queue)) && (softc->flags & PASS_FLAG_ABANDONED_REF_SET)) { softc->flags &= ~PASS_FLAG_ABANDONED_REF_SET; cam_periph_release_locked(periph); } /* * We have already released the CCB, so we can * return. */ return; } break; } } xpt_release_ccb(done_ccb); } static int passcreatezone(struct cam_periph *periph) { struct pass_softc *softc; int error; error = 0; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); KASSERT(((softc->flags & PASS_FLAG_ZONE_VALID) == 0), ("%s called when the pass(4) zone is valid!\n", __func__)); KASSERT((softc->pass_zone == NULL), ("%s called when the pass(4) zone is allocated!\n", __func__)); if ((softc->flags & PASS_FLAG_ZONE_INPROG) == 0) { /* * We're the first context through, so we need to create * the pass(4) UMA zone for I/O requests. */ softc->flags |= PASS_FLAG_ZONE_INPROG; /* * uma_zcreate() does a blocking (M_WAITOK) allocation, * so we cannot hold a mutex while we call it. */ cam_periph_unlock(periph); softc->pass_zone = uma_zcreate(softc->zone_name, sizeof(struct pass_io_req), NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); softc->pass_io_zone = uma_zcreate(softc->io_zone_name, softc->io_zone_size, NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/ 0); cam_periph_lock(periph); if ((softc->pass_zone == NULL) || (softc->pass_io_zone == NULL)) { if (softc->pass_zone == NULL) xpt_print(periph->path, "unable to allocate " "IO Req UMA zone\n"); else xpt_print(periph->path, "unable to allocate " "IO UMA zone\n"); softc->flags &= ~PASS_FLAG_ZONE_INPROG; goto bailout; } /* * Set the flags appropriately and notify any other waiters. */ softc->flags &= PASS_FLAG_ZONE_INPROG; softc->flags |= PASS_FLAG_ZONE_VALID; wakeup(&softc->pass_zone); } else { /* * In this case, the UMA zone has not yet been created, but * another context is in the process of creating it. We * need to sleep until the creation is either done or has * failed. */ while ((softc->flags & PASS_FLAG_ZONE_INPROG) && ((softc->flags & PASS_FLAG_ZONE_VALID) == 0)) { error = msleep(&softc->pass_zone, cam_periph_mtx(periph), PRIBIO, "paszon", 0); if (error != 0) goto bailout; } /* * If the zone creation failed, no luck for the user. */ if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0){ error = ENOMEM; goto bailout; } } bailout: return (error); } static void passiocleanup(struct pass_softc *softc, struct pass_io_req *io_req) { union ccb *ccb; u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; int i, numbufs; ccb = &io_req->ccb; switch (ccb->ccb_h.func_code) { case XPT_DEV_MATCH: numbufs = min(io_req->num_bufs, 2); if (numbufs == 1) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; } break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: data_ptrs[0] = &ccb->csio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_ATA_IO: data_ptrs[0] = &ccb->ataio.data_ptr; numbufs = min(io_req->num_bufs, 1); break; case XPT_SMP_IO: numbufs = min(io_req->num_bufs, 2); data_ptrs[0] = &ccb->smpio.smp_request; data_ptrs[1] = &ccb->smpio.smp_response; break; case XPT_DEV_ADVINFO: numbufs = min(io_req->num_bufs, 1); data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; break; default: /* allow ourselves to be swapped once again */ return; break; /* NOTREACHED */ } if (io_req->flags & PASS_IO_USER_SEG_MALLOC) { free(io_req->user_segptr, M_SCSIPASS); io_req->user_segptr = NULL; } /* * We only want to free memory we malloced. */ if (io_req->data_flags == CAM_DATA_VADDR) { for (i = 0; i < io_req->num_bufs; i++) { if (io_req->kern_bufs[i] == NULL) continue; free(io_req->kern_bufs[i], M_SCSIPASS); io_req->kern_bufs[i] = NULL; } } else if (io_req->data_flags == CAM_DATA_SG) { for (i = 0; i < io_req->num_kern_segs; i++) { if ((uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr == NULL) continue; uma_zfree(softc->pass_io_zone, (uint8_t *)(uintptr_t) io_req->kern_segptr[i].ds_addr); io_req->kern_segptr[i].ds_addr = 0; } } if (io_req->flags & PASS_IO_KERN_SEG_MALLOC) { free(io_req->kern_segptr, M_SCSIPASS); io_req->kern_segptr = NULL; } if (io_req->data_flags != CAM_DATA_PADDR) { for (i = 0; i < numbufs; i++) { /* * Restore the user's buffer pointers to their * previous values. */ if (io_req->user_bufs[i] != NULL) *data_ptrs[i] = io_req->user_bufs[i]; } } } static int passcopysglist(struct cam_periph *periph, struct pass_io_req *io_req, ccb_flags direction) { bus_size_t kern_watermark, user_watermark, len_copied, len_to_copy; bus_dma_segment_t *user_sglist, *kern_sglist; int i, j, error; error = 0; kern_watermark = 0; user_watermark = 0; len_to_copy = 0; len_copied = 0; user_sglist = io_req->user_segptr; kern_sglist = io_req->kern_segptr; for (i = 0, j = 0; i < io_req->num_user_segs && j < io_req->num_kern_segs;) { uint8_t *user_ptr, *kern_ptr; len_to_copy = min(user_sglist[i].ds_len -user_watermark, kern_sglist[j].ds_len - kern_watermark); user_ptr = (uint8_t *)(uintptr_t)user_sglist[i].ds_addr; user_ptr = user_ptr + user_watermark; kern_ptr = (uint8_t *)(uintptr_t)kern_sglist[j].ds_addr; kern_ptr = kern_ptr + kern_watermark; user_watermark += len_to_copy; kern_watermark += len_to_copy; if (!useracc(user_ptr, len_to_copy, (direction == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list element %p len %zu\n", __func__, user_ptr, len_to_copy); error = EFAULT; goto bailout; } if (direction == CAM_DIR_IN) { error = copyout(kern_ptr, user_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyout of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, kern_ptr, user_ptr, error); goto bailout; } } else { error = copyin(user_ptr, kern_ptr, len_to_copy); if (error != 0) { xpt_print(periph->path, "%s: copyin of %u " "bytes from %p to %p failed with " "error %d\n", __func__, len_to_copy, user_ptr, kern_ptr, error); goto bailout; } } len_copied += len_to_copy; if (user_sglist[i].ds_len == user_watermark) { i++; user_watermark = 0; } if (kern_sglist[j].ds_len == kern_watermark) { j++; kern_watermark = 0; } } bailout: return (error); } static int passmemsetup(struct cam_periph *periph, struct pass_io_req *io_req) { union ccb *ccb; struct pass_softc *softc; int numbufs, i; uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; uint32_t lengths[CAM_PERIPH_MAXMAPS]; uint32_t dirs[CAM_PERIPH_MAXMAPS]; uint32_t num_segs; uint16_t *seg_cnt_ptr; size_t maxmap; int error; cam_periph_assert(periph, MA_NOTOWNED); softc = periph->softc; error = 0; ccb = &io_req->ccb; maxmap = 0; num_segs = 0; seg_cnt_ptr = NULL; switch(ccb->ccb_h.func_code) { case XPT_DEV_MATCH: if (ccb->cdm.match_buf_len == 0) { printf("%s: invalid match buffer length 0\n", __func__); return(EINVAL); } if (ccb->cdm.pattern_buf_len > 0) { data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; lengths[0] = ccb->cdm.pattern_buf_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; lengths[1] = ccb->cdm.match_buf_len; dirs[1] = CAM_DIR_IN; numbufs = 2; } else { data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; lengths[0] = ccb->cdm.match_buf_len; dirs[0] = CAM_DIR_IN; numbufs = 1; } io_req->data_flags = CAM_DATA_VADDR; break; case XPT_SCSI_IO: case XPT_CONT_TARGET_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * The user shouldn't be able to supply a bio. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) return (EINVAL); io_req->data_flags = ccb->ccb_h.flags & CAM_DATA_MASK; data_ptrs[0] = &ccb->csio.data_ptr; lengths[0] = ccb->csio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; num_segs = ccb->csio.sglist_cnt; seg_cnt_ptr = &ccb->csio.sglist_cnt; numbufs = 1; maxmap = softc->maxio; break; case XPT_ATA_IO: if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) return(0); /* * We only support a single virtual address for ATA I/O. */ if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) return (EINVAL); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->ataio.data_ptr; lengths[0] = ccb->ataio.dxfer_len; dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; numbufs = 1; maxmap = softc->maxio; break; case XPT_SMP_IO: io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = &ccb->smpio.smp_request; lengths[0] = ccb->smpio.smp_request_len; dirs[0] = CAM_DIR_OUT; data_ptrs[1] = &ccb->smpio.smp_response; lengths[1] = ccb->smpio.smp_response_len; dirs[1] = CAM_DIR_IN; numbufs = 2; maxmap = softc->maxio; break; case XPT_DEV_ADVINFO: if (ccb->cdai.bufsiz == 0) return (0); io_req->data_flags = CAM_DATA_VADDR; data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; lengths[0] = ccb->cdai.bufsiz; dirs[0] = CAM_DIR_IN; numbufs = 1; break; default: return(EINVAL); break; /* NOTREACHED */ } io_req->num_bufs = numbufs; /* * If there is a maximum, check to make sure that the user's * request fits within the limit. In general, we should only have * a maximum length for requests that go to hardware. Otherwise it * is whatever we're able to malloc. */ for (i = 0; i < numbufs; i++) { io_req->user_bufs[i] = *data_ptrs[i]; io_req->dirs[i] = dirs[i]; io_req->lengths[i] = lengths[i]; if (maxmap == 0) continue; if (lengths[i] <= maxmap) continue; xpt_print(periph->path, "%s: data length %u > max allowed %u " "bytes\n", __func__, lengths[i], maxmap); error = EINVAL; goto bailout; } switch (io_req->data_flags) { case CAM_DATA_VADDR: /* Map or copy the buffer into kernel address space */ for (i = 0; i < numbufs; i++) { uint8_t *tmp_buf; /* * If for some reason no length is specified, we * don't need to allocate anything. */ if (io_req->lengths[i] == 0) continue; /* * Make sure that the user's buffer is accessible * to that process. */ if (!useracc(io_req->user_bufs[i], io_req->lengths[i], (io_req->dirs[i] == CAM_DIR_IN) ? VM_PROT_WRITE : VM_PROT_READ)) { xpt_print(periph->path, "%s: user address %p " "length %u is not accessible\n", __func__, io_req->user_bufs[i], io_req->lengths[i]); error = EFAULT; goto bailout; } tmp_buf = malloc(lengths[i], M_SCSIPASS, M_WAITOK | M_ZERO); io_req->kern_bufs[i] = tmp_buf; *data_ptrs[i] = tmp_buf; #if 0 xpt_print(periph->path, "%s: malloced %p len %u, user " "buffer %p, operation: %s\n", __func__, tmp_buf, lengths[i], io_req->user_bufs[i], (dirs[i] == CAM_DIR_IN) ? "read" : "write"); #endif /* * We only need to copy in if the user is writing. */ if (dirs[i] != CAM_DIR_OUT) continue; error = copyin(io_req->user_bufs[i], io_req->kern_bufs[i], lengths[i]); if (error != 0) { xpt_print(periph->path, "%s: copy of user " "buffer from %p to %p failed with " "error %d\n", __func__, io_req->user_bufs[i], io_req->kern_bufs[i], error); goto bailout; } } break; case CAM_DATA_PADDR: /* Pass down the pointer as-is */ break; case CAM_DATA_SG: { size_t sg_length, size_to_go, alloc_size; uint32_t num_segs_needed; /* * Copy the user S/G list in, and then copy in the * individual segments. */ /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { xpt_print(periph->path, "%s: cannot currently handle " "more than one S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG flag set, " "but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* * We allocate buffers in io_zone_size increments for an * S/G list. This will generally be MAXPHYS. */ if (lengths[0] <= softc->io_zone_size) num_segs_needed = 1; else { num_segs_needed = lengths[0] / softc->io_zone_size; if ((lengths[0] % softc->io_zone_size) != 0) num_segs_needed++; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = num_segs_needed; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; /* * If we have enough segments allocated by default to handle * the length of the user's S/G list, */ if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; if (!useracc(*data_ptrs[0], sg_length, VM_PROT_READ)) { xpt_print(periph->path, "%s: unable to access user " "S/G list at %p\n", __func__, *data_ptrs[0]); error = EFAULT; goto bailout; } error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } if (num_segs_needed > PASS_MAX_SEGS) { io_req->kern_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs_needed, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_KERN_SEG_MALLOC; } else { io_req->kern_segptr = io_req->kern_segs; } /* * Allocate the kernel S/G list. */ for (size_to_go = lengths[0], i = 0; size_to_go > 0 && i < num_segs_needed; i++, size_to_go -= alloc_size) { uint8_t *kern_ptr; alloc_size = min(size_to_go, softc->io_zone_size); kern_ptr = uma_zalloc(softc->pass_io_zone, M_WAITOK); io_req->kern_segptr[i].ds_addr = (bus_addr_t)(uintptr_t)kern_ptr; io_req->kern_segptr[i].ds_len = alloc_size; } if (size_to_go > 0) { printf("%s: size_to_go = %zu, software error!\n", __func__, size_to_go); error = EINVAL; goto bailout; } *data_ptrs[0] = (uint8_t *)io_req->kern_segptr; *seg_cnt_ptr = io_req->num_kern_segs; /* * We only need to copy data here if the user is writing. */ if (dirs[0] == CAM_DIR_OUT) error = passcopysglist(periph, io_req, dirs[0]); break; } case CAM_DATA_SG_PADDR: { size_t sg_length; /* * We shouldn't see this, but check just in case. */ if (numbufs != 1) { printf("%s: cannot currently handle more than one " "S/G list per CCB\n", __func__); error = EINVAL; goto bailout; } /* * We have to have at least one segment. */ if (num_segs == 0) { xpt_print(periph->path, "%s: CAM_DATA_SG_PADDR flag " "set, but sglist_cnt=0!\n", __func__); error = EINVAL; goto bailout; } /* * Make sure the user specified the total length and didn't * just leave it to us to decode the S/G list. */ if (lengths[0] == 0) { xpt_print(periph->path, "%s: no dxfer_len specified, " "but CAM_DATA_SG flag is set!\n", __func__); error = EINVAL; goto bailout; } /* Figure out the size of the S/G list */ sg_length = num_segs * sizeof(bus_dma_segment_t); io_req->num_user_segs = num_segs; io_req->num_kern_segs = io_req->num_user_segs; /* Save the user's S/G list pointer for later restoration */ io_req->user_bufs[0] = *data_ptrs[0]; if (num_segs > PASS_MAX_SEGS) { io_req->user_segptr = malloc(sizeof(bus_dma_segment_t) * num_segs, M_SCSIPASS, M_WAITOK | M_ZERO); io_req->flags |= PASS_IO_USER_SEG_MALLOC; } else io_req->user_segptr = io_req->user_segs; io_req->kern_segptr = io_req->user_segptr; error = copyin(*data_ptrs[0], io_req->user_segptr, sg_length); if (error != 0) { xpt_print(periph->path, "%s: copy of user S/G list " "from %p to %p failed with error %d\n", __func__, *data_ptrs[0], io_req->user_segptr, error); goto bailout; } break; } default: case CAM_DATA_BIO: /* * A user shouldn't be attaching a bio to the CCB. It * isn't a user-accessible structure. */ error = EINVAL; break; } bailout: if (error != 0) passiocleanup(softc, io_req); return (error); } static int passmemdone(struct cam_periph *periph, struct pass_io_req *io_req) { struct pass_softc *softc; union ccb *ccb; int error; int i; error = 0; softc = (struct pass_softc *)periph->softc; ccb = &io_req->ccb; switch (io_req->data_flags) { case CAM_DATA_VADDR: /* * Copy back to the user buffer if this was a read. */ for (i = 0; i < io_req->num_bufs; i++) { if (io_req->dirs[i] != CAM_DIR_IN) continue; error = copyout(io_req->kern_bufs[i], io_req->user_bufs[i], io_req->lengths[i]); if (error != 0) { xpt_print(periph->path, "Unable to copy %u " "bytes from %p to user address %p\n", io_req->lengths[i], io_req->kern_bufs[i], io_req->user_bufs[i]); goto bailout; } } break; case CAM_DATA_PADDR: /* Do nothing. The pointer is a physical address already */ break; case CAM_DATA_SG: /* * Copy back to the user buffer if this was a read. * Restore the user's S/G list buffer pointer. */ if (io_req->dirs[0] == CAM_DIR_IN) error = passcopysglist(periph, io_req, io_req->dirs[0]); break; case CAM_DATA_SG_PADDR: /* * Restore the user's S/G list buffer pointer. No need to * copy. */ break; default: case CAM_DATA_BIO: error = EINVAL; break; } bailout: /* * Reset the user's pointers to their original values and free * allocated memory. */ passiocleanup(softc, io_req); return (error); } static int passioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = passdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, passdoioctl); } return (error); } static int passdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int error; uint32_t priority; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return(ENXIO); - cam_periph_lock(periph); softc = (struct pass_softc *)periph->softc; error = 0; switch (cmd) { case CAMIOCOMMAND: { union ccb *inccb; union ccb *ccb; int ccb_malloced; inccb = (union ccb *)addr; /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (inccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", inccb->ccb_h.func_code); error = ENODEV; break; } /* Compatibility for RL/priority-unaware code. */ priority = inccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Non-immediate CCBs need a CCB from the per-device pool * of CCBs, which is scheduled by the transport layer. * Immediate CCBs and user-supplied CCBs should just be * malloced. */ if ((inccb->ccb_h.func_code & XPT_FC_QUEUED) && ((inccb->ccb_h.func_code & XPT_FC_USER_CCB) == 0)) { ccb = cam_periph_getccb(periph, priority); ccb_malloced = 0; } else { ccb = xpt_alloc_ccb_nowait(); if (ccb != NULL) xpt_setup_ccb(&ccb->ccb_h, periph->path, priority); ccb_malloced = 1; } if (ccb == NULL) { xpt_print(periph->path, "unable to allocate CCB\n"); error = ENOMEM; break; } error = passsendccb(periph, ccb, inccb); if (ccb_malloced) xpt_free_ccb(ccb); else xpt_release_ccb(ccb); break; } case CAMIOQUEUE: { struct pass_io_req *io_req; union ccb **user_ccb, *ccb; xpt_opcode fc; if ((softc->flags & PASS_FLAG_ZONE_VALID) == 0) { error = passcreatezone(periph); if (error != 0) goto bailout; } /* * We're going to do a blocking allocation for this I/O * request, so we have to drop the lock. */ cam_periph_unlock(periph); io_req = uma_zalloc(softc->pass_zone, M_WAITOK | M_ZERO); ccb = &io_req->ccb; user_ccb = (union ccb **)addr; /* * Unlike the CAMIOCOMMAND ioctl above, we only have a * pointer to the user's CCB, so we have to copy the whole * thing in to a buffer we have allocated (above) instead * of allowing the ioctl code to malloc a buffer and copy * it in. * * This is an advantage for this asynchronous interface, * since we don't want the memory to get freed while the * CCB is outstanding. */ #if 0 xpt_print(periph->path, "Copying user CCB %p to " "kernel address %p\n", *user_ccb, ccb); #endif error = copyin(*user_ccb, ccb, sizeof(*ccb)); if (error != 0) { xpt_print(periph->path, "Copy of user CCB %p to " "kernel address %p failed with error %d\n", *user_ccb, ccb, error); uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); break; } /* * Some CCB types, like scan bus and scan lun can only go * through the transport layer device. */ if (ccb->ccb_h.func_code & XPT_FC_XPT_ONLY) { xpt_print(periph->path, "CCB function code %#x is " "restricted to the XPT device\n", ccb->ccb_h.func_code); uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); error = ENODEV; break; } /* * Save the user's CCB pointer as well as his linked list * pointers and peripheral private area so that we can * restore these later. */ io_req->user_ccb_ptr = *user_ccb; io_req->user_periph_links = ccb->ccb_h.periph_links; io_req->user_periph_priv = ccb->ccb_h.periph_priv; /* * Now that we've saved the user's values, we can set our * own peripheral private entry. */ ccb->ccb_h.ccb_ioreq = io_req; /* Compatibility for RL/priority-unaware code. */ priority = ccb->ccb_h.pinfo.priority; if (priority <= CAM_PRIORITY_OOB) priority += CAM_PRIORITY_OOB + 1; /* * Setup fields in the CCB like the path and the priority. * The path in particular cannot be done in userland, since * it is a pointer to a kernel data structure. */ xpt_setup_ccb_flags(&ccb->ccb_h, periph->path, priority, ccb->ccb_h.flags); /* * Setup our done routine. There is no way for the user to * have a valid pointer here. */ ccb->ccb_h.cbfcnp = passdone; fc = ccb->ccb_h.func_code; /* * If this function code has memory that can be mapped in * or out, we need to call passmemsetup(). */ if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO)) { error = passmemsetup(periph, io_req); if (error != 0) { uma_zfree(softc->pass_zone, io_req); cam_periph_lock(periph); break; } } else io_req->mapinfo.num_bufs_used = 0; cam_periph_lock(periph); /* * Everything goes on the incoming queue initially. */ TAILQ_INSERT_TAIL(&softc->incoming_queue, io_req, links); /* * If the CCB is queued, and is not a user CCB, then * we need to allocate a slot for it. Call xpt_schedule() * so that our start routine will get called when a CCB is * available. */ if ((fc & XPT_FC_QUEUED) && ((fc & XPT_FC_USER_CCB) == 0)) { xpt_schedule(periph, priority); break; } /* * At this point, the CCB in question is either an * immediate CCB (like XPT_DEV_ADVINFO) or it is a user CCB * and therefore should be malloced, not allocated via a slot. * Remove the CCB from the incoming queue and add it to the * active queue. */ TAILQ_REMOVE(&softc->incoming_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->active_queue, io_req, links); xpt_action(ccb); /* * If this is not a queued CCB (i.e. it is an immediate CCB), * then it is already done. We need to put it on the done * queue for the user to fetch. */ if ((fc & XPT_FC_QUEUED) == 0) { TAILQ_REMOVE(&softc->active_queue, io_req, links); TAILQ_INSERT_TAIL(&softc->done_queue, io_req, links); } break; } case CAMIOGET: { union ccb **user_ccb; struct pass_io_req *io_req; int old_error; user_ccb = (union ccb **)addr; old_error = 0; io_req = TAILQ_FIRST(&softc->done_queue); if (io_req == NULL) { error = ENOENT; break; } /* * Remove the I/O from the done queue. */ TAILQ_REMOVE(&softc->done_queue, io_req, links); /* * We have to drop the lock during the copyout because the * copyout can result in VM faults that require sleeping. */ cam_periph_unlock(periph); /* * Do any needed copies (e.g. for reads) and revert the * pointers in the CCB back to the user's pointers. */ error = passmemdone(periph, io_req); old_error = error; io_req->ccb.ccb_h.periph_links = io_req->user_periph_links; io_req->ccb.ccb_h.periph_priv = io_req->user_periph_priv; #if 0 xpt_print(periph->path, "Copying to user CCB %p from " "kernel address %p\n", *user_ccb, &io_req->ccb); #endif error = copyout(&io_req->ccb, *user_ccb, sizeof(union ccb)); if (error != 0) { xpt_print(periph->path, "Copy to user CCB %p from " "kernel address %p failed with error %d\n", *user_ccb, &io_req->ccb, error); } /* * Prefer the first error we got back, and make sure we * don't overwrite bad status with good. */ if (old_error != 0) error = old_error; cam_periph_lock(periph); /* * At this point, if there was an error, we could potentially * re-queue the I/O and try again. But why? The error * would almost certainly happen again. We might as well * not leak memory. */ uma_zfree(softc->pass_zone, io_req); break; } default: error = cam_periph_ioctl(periph, cmd, addr, passerror); break; } bailout: cam_periph_unlock(periph); return(error); } static int passpoll(struct cdev *dev, int poll_events, struct thread *td) { struct cam_periph *periph; struct pass_softc *softc; int revents; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - softc = (struct pass_softc *)periph->softc; revents = poll_events & (POLLOUT | POLLWRNORM); if ((poll_events & (POLLIN | POLLRDNORM)) != 0) { cam_periph_lock(periph); if (!TAILQ_EMPTY(&softc->done_queue)) { revents |= poll_events & (POLLIN | POLLRDNORM); } cam_periph_unlock(periph); if (revents == 0) selrecord(td, &softc->read_select); } return (revents); } static int passkqfilter(struct cdev *dev, struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - softc = (struct pass_softc *)periph->softc; kn->kn_hook = (caddr_t)periph; kn->kn_fop = &passread_filtops; knlist_add(&softc->read_select.si_note, kn, 0); return (0); } static void passreadfiltdetach(struct knote *kn) { struct cam_periph *periph; struct pass_softc *softc; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; knlist_remove(&softc->read_select.si_note, kn, 0); } static int passreadfilt(struct knote *kn, long hint) { struct cam_periph *periph; struct pass_softc *softc; int retval; periph = (struct cam_periph *)kn->kn_hook; softc = (struct pass_softc *)periph->softc; cam_periph_assert(periph, MA_OWNED); if (TAILQ_EMPTY(&softc->done_queue)) retval = 0; else retval = 1; return (retval); } /* * Generally, "ccb" should be the CCB supplied by the kernel. "inccb" * should be the CCB that is copied in from the user. */ static int passsendccb(struct cam_periph *periph, union ccb *ccb, union ccb *inccb) { struct pass_softc *softc; struct cam_periph_map_info mapinfo; xpt_opcode fc; int error; softc = (struct pass_softc *)periph->softc; /* * There are some fields in the CCB header that need to be * preserved, the rest we get from the user. */ xpt_merge_ccb(ccb, inccb); /* */ ccb->ccb_h.cbfcnp = passdone; /* * Let cam_periph_mapmem do a sanity check on the data pointer format. * Even if no data transfer is needed, it's a cheap check and it * simplifies the code. */ fc = ccb->ccb_h.func_code; if ((fc == XPT_SCSI_IO) || (fc == XPT_ATA_IO) || (fc == XPT_SMP_IO) || (fc == XPT_DEV_MATCH) || (fc == XPT_DEV_ADVINFO)) { bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); /* * cam_periph_mapmem returned an error, we can't continue. * Return the error to the user. */ if (error) return(error); } else /* Ensure that the unmap call later on is a no-op. */ mapinfo.num_bufs_used = 0; /* * If the user wants us to perform any error recovery, then honor * that request. Otherwise, it's up to the user to perform any * error recovery. */ cam_periph_runccb(ccb, passerror, /* cam_flags */ CAM_RETRY_SELTO, /* sense_flags */ ((ccb->ccb_h.flags & CAM_PASS_ERR_RECOVER) ? SF_RETRY_UA : SF_NO_RECOVERY) | SF_NO_PRINT, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); ccb->ccb_h.cbfcnp = NULL; ccb->ccb_h.periph_priv = inccb->ccb_h.periph_priv; bcopy(ccb, inccb, sizeof(union ccb)); return(0); } static int passerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct cam_periph *periph; struct pass_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pass_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } Index: head/sys/cam/scsi/scsi_pt.c =================================================================== --- head/sys/cam/scsi/scsi_pt.c (revision 293349) +++ head/sys/cam/scsi/scsi_pt.c (revision 293350) @@ -1,632 +1,639 @@ /*- * Implementation of SCSI Processor Target Peripheral driver for CAM. * * Copyright (c) 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_pt.h" typedef enum { PT_STATE_PROBE, PT_STATE_NORMAL } pt_state; typedef enum { PT_FLAG_NONE = 0x00, PT_FLAG_OPEN = 0x01, PT_FLAG_DEVICE_INVALID = 0x02, PT_FLAG_RETRY_UA = 0x04 } pt_flags; typedef enum { PT_CCB_BUFFER_IO = 0x01, PT_CCB_RETRY_UA = 0x04, PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA } pt_ccb_state; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct pt_softc { struct bio_queue_head bio_queue; struct devstat *device_stats; LIST_HEAD(, ccb_hdr) pending_ccbs; pt_state state; pt_flags flags; union ccb saved_ccb; int io_timeout; struct cdev *dev; }; static d_open_t ptopen; static d_close_t ptclose; static d_strategy_t ptstrategy; static periph_init_t ptinit; static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static periph_ctor_t ptctor; static periph_oninv_t ptoninvalidate; static periph_dtor_t ptdtor; static periph_start_t ptstart; static void ptdone(struct cam_periph *periph, union ccb *done_ccb); static d_ioctl_t ptioctl; static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout); static struct periph_driver ptdriver = { ptinit, "pt", TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(pt, ptdriver); static struct cdevsw pt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ptopen, .d_close = ptclose, .d_read = physread, .d_write = physwrite, .d_ioctl = ptioctl, .d_strategy = ptstrategy, .d_name = "pt", }; #ifndef SCSI_PT_DEFAULT_TIMEOUT #define SCSI_PT_DEFAULT_TIMEOUT 60 #endif static int ptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); if (softc->flags & PT_FLAG_DEVICE_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return(ENXIO); } if ((softc->flags & PT_FLAG_OPEN) == 0) softc->flags |= PT_FLAG_OPEN; else { error = EBUSY; cam_periph_release(periph); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptopen: dev=%s\n", devtoname(dev))); cam_periph_unlock(periph); return (error); } static int ptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); softc->flags &= ~PT_FLAG_OPEN; cam_periph_release_locked(periph); cam_periph_unlock(periph); return (0); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void ptstrategy(struct bio *bp) { struct cam_periph *periph; struct pt_softc *softc; periph = (struct cam_periph *)bp->bio_dev->si_drv1; bp->bio_resid = bp->bio_bcount; if (periph == NULL) { biofinish(bp, NULL, ENXIO); return; } cam_periph_lock(periph); softc = (struct pt_softc *)periph->softc; /* * If the device has been made invalid, error out */ if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } /* * Place it in the queue of disk activities for this disk */ bioq_insert_tail(&softc->bio_queue, bp); /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static void ptinit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, ptasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("pt: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; + struct make_dev_args args; + int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); cam_periph_unlock(periph); + + make_dev_args_init(&args); + args.mda_devsw = &pt_cdevsw; + args.mda_unit = periph->unit_number; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = periph; + error = make_dev_s(&args, &softc->dev, "%s%d", periph->periph_name, + periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + return (CAM_REQ_CMP_ERR); + } + softc->device_stats = devstat_new_entry("pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_OTHER); - softc->dev = make_dev(&pt_cdevsw, periph->unit_number, UID_ROOT, - GID_OPERATOR, 0600, "%s%d", periph->periph_name, - periph->unit_number); cam_periph_lock(periph); - softc->dev->si_drv1 = periph; /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE, ptasync, periph, periph->path); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); } static void ptoninvalidate(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, ptasync, periph, periph->path); softc->flags |= PT_FLAG_DEVICE_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); } static void ptdtor(struct cam_periph *periph) { struct pt_softc *softc; softc = (struct pt_softc *)periph->softc; devstat_remove_entry(softc->device_stats); cam_periph_unlock(periph); destroy_dev(softc->dev); cam_periph_lock(periph); free(softc, M_DEVBUF); } static void ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_PROCESSOR) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, ptstart, "pt", CAM_PERIPH_BIO, path, ptasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("ptasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_SENT_BDR: case AC_BUS_RESET: { struct pt_softc *softc; struct ccb_hdr *ccbh; softc = (struct pt_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= PT_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= PT_CCB_RETRY_UA; } /* FALLTHROUGH */ default: cam_periph_async(periph, code, path, arg); break; } } static void ptstart(struct cam_periph *periph, union ccb *start_ccb) { struct pt_softc *softc; struct bio *bp; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptstart\n")); /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else { bioq_remove(&softc->bio_queue, bp); devstat_start_transaction_bio(softc->device_stats, bp); scsi_send_receive(&start_ccb->csio, /*retries*/4, ptdone, MSG_SIMPLE_Q_TAG, bp->bio_cmd == BIO_READ, /*byte2*/0, bp->bio_bcount, bp->bio_data, /*sense_len*/SSD_FULL_SIZE, /*timeout*/softc->io_timeout); start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO_UA; /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } } } static void ptdone(struct cam_periph *periph, union ccb *done_ccb) { struct pt_softc *softc; struct ccb_scsiio *csio; softc = (struct pt_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("ptdone\n")); csio = &done_ccb->csio; switch (csio->ccb_h.ccb_state) { case PT_CCB_BUFFER_IO: case PT_CCB_BUFFER_IO_UA: { struct bio *bp; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = pterror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } if (error != 0) { if (error == ENXIO) { /* * Catastrophic error. Mark our device * as invalid. */ xpt_print(periph->path, "Invalidating device\n"); softc->flags |= PT_FLAG_DEVICE_INVALID; } /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ bioq_flush(&softc->bio_queue, NULL, EIO); bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) { /* Short transfer ??? */ bp->bio_flags |= BIO_ERROR; } } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else { bp->bio_resid = csio->resid; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } /* * Block out any asynchronous callbacks * while we touch the pending ccb list. */ LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); biofinish(bp, softc->device_stats, 0); break; } } xpt_release_ccb(done_ccb); } static int pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct pt_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct pt_softc *)periph->softc; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static int ptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct cam_periph *periph; struct pt_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return(ENXIO); - softc = (struct pt_softc *)periph->softc; cam_periph_lock(periph); switch(cmd) { case PTIOCGETTIMEOUT: if (softc->io_timeout >= 1000) *(int *)addr = softc->io_timeout / 1000; else *(int *)addr = 0; break; case PTIOCSETTIMEOUT: if (*(int *)addr < 1) { error = EINVAL; break; } softc->io_timeout = *(int *)addr * 1000; break; default: error = cam_periph_ioctl(periph, cmd, addr, pterror); break; } cam_periph_unlock(periph); return(error); } void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int tag_action, int readop, u_int byte2, u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, u_int32_t timeout) { struct scsi_send_receive *scsi_cmd; scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = readop ? RECEIVE : SEND; scsi_cmd->byte2 = byte2; scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, tag_action, data_ptr, xfer_len, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_sa.c =================================================================== --- head/sys/cam/scsi/scsi_sa.c (revision 293349) +++ head/sys/cam/scsi/scsi_sa.c (revision 293350) @@ -1,5870 +1,5885 @@ /*- * Implementation of SCSI Sequential Access Peripheral driver for CAM. * * Copyright (c) 1999, 2000 Matthew Jacob * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #ifdef _KERNEL #include #include #endif #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #endif #include #include #ifndef _KERNEL #include #include #endif #include #include #include #include #include #include #include #include #ifdef _KERNEL #include #ifndef SA_IO_TIMEOUT #define SA_IO_TIMEOUT 32 #endif #ifndef SA_SPACE_TIMEOUT #define SA_SPACE_TIMEOUT 1 * 60 #endif #ifndef SA_REWIND_TIMEOUT #define SA_REWIND_TIMEOUT 2 * 60 #endif #ifndef SA_ERASE_TIMEOUT #define SA_ERASE_TIMEOUT 4 * 60 #endif #ifndef SA_REP_DENSITY_TIMEOUT #define SA_REP_DENSITY_TIMEOUT 90 #endif #define SCSIOP_TIMEOUT (60 * 1000) /* not an option */ #define IO_TIMEOUT (SA_IO_TIMEOUT * 60 * 1000) #define REWIND_TIMEOUT (SA_REWIND_TIMEOUT * 60 * 1000) #define ERASE_TIMEOUT (SA_ERASE_TIMEOUT * 60 * 1000) #define SPACE_TIMEOUT (SA_SPACE_TIMEOUT * 60 * 1000) #define REP_DENSITY_TIMEOUT (SA_REP_DENSITY_TIMEOUT * 60 * 1000) /* * Additional options that can be set for config: SA_1FM_AT_EOT */ #ifndef UNUSED_PARAMETER #define UNUSED_PARAMETER(x) x = x #endif #define QFRLS(ccb) \ if (((ccb)->ccb_h.status & CAM_DEV_QFRZN) != 0) \ cam_release_devq((ccb)->ccb_h.path, 0, 0, 0, FALSE) /* * Driver states */ static MALLOC_DEFINE(M_SCSISA, "SCSI sa", "SCSI sequential access buffers"); typedef enum { SA_STATE_NORMAL, SA_STATE_ABNORMAL } sa_state; #define ccb_pflags ppriv_field0 #define ccb_bp ppriv_ptr1 /* bits in ccb_pflags */ #define SA_POSITION_UPDATED 0x1 typedef enum { SA_FLAG_OPEN = 0x0001, SA_FLAG_FIXED = 0x0002, SA_FLAG_TAPE_LOCKED = 0x0004, SA_FLAG_TAPE_MOUNTED = 0x0008, SA_FLAG_TAPE_WP = 0x0010, SA_FLAG_TAPE_WRITTEN = 0x0020, SA_FLAG_EOM_PENDING = 0x0040, SA_FLAG_EIO_PENDING = 0x0080, SA_FLAG_EOF_PENDING = 0x0100, SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING| SA_FLAG_EOF_PENDING), SA_FLAG_INVALID = 0x0200, SA_FLAG_COMP_ENABLED = 0x0400, SA_FLAG_COMP_SUPP = 0x0800, SA_FLAG_COMP_UNSUPP = 0x1000, SA_FLAG_TAPE_FROZEN = 0x2000, SA_FLAG_PROTECT_SUPP = 0x4000, SA_FLAG_COMPRESSION = (SA_FLAG_COMP_SUPP|SA_FLAG_COMP_ENABLED| SA_FLAG_COMP_UNSUPP), SA_FLAG_SCTX_INIT = 0x8000 } sa_flags; typedef enum { SA_MODE_REWIND = 0x00, SA_MODE_NOREWIND = 0x01, SA_MODE_OFFLINE = 0x02 } sa_mode; typedef enum { SA_PARAM_NONE = 0x000, SA_PARAM_BLOCKSIZE = 0x001, SA_PARAM_DENSITY = 0x002, SA_PARAM_COMPRESSION = 0x004, SA_PARAM_BUFF_MODE = 0x008, SA_PARAM_NUMBLOCKS = 0x010, SA_PARAM_WP = 0x020, SA_PARAM_SPEED = 0x040, SA_PARAM_DENSITY_EXT = 0x080, SA_PARAM_LBP = 0x100, SA_PARAM_ALL = 0x1ff } sa_params; typedef enum { SA_QUIRK_NONE = 0x000, SA_QUIRK_NOCOMP = 0x001, /* Can't deal with compression at all*/ SA_QUIRK_FIXED = 0x002, /* Force fixed mode */ SA_QUIRK_VARIABLE = 0x004, /* Force variable mode */ SA_QUIRK_2FM = 0x008, /* Needs Two File Marks at EOD */ SA_QUIRK_1FM = 0x010, /* No more than 1 File Mark at EOD */ SA_QUIRK_NODREAD = 0x020, /* Don't try and dummy read density */ SA_QUIRK_NO_MODESEL = 0x040, /* Don't do mode select at all */ SA_QUIRK_NO_CPAGE = 0x080, /* Don't use DEVICE COMPRESSION page */ SA_QUIRK_NO_LONG_POS = 0x100 /* No long position information */ } sa_quirks; #define SA_QUIRK_BIT_STRING \ "\020" \ "\001NOCOMP" \ "\002FIXED" \ "\003VARIABLE" \ "\0042FM" \ "\0051FM" \ "\006NODREAD" \ "\007NO_MODESEL" \ "\010NO_CPAGE" \ "\011NO_LONG_POS" #define SAMODE(z) (dev2unit(z) & 0x3) #define SA_IS_CTRL(z) (dev2unit(z) & (1 << 4)) #define SA_NOT_CTLDEV 0 #define SA_CTLDEV 1 #define SA_ATYPE_R 0 #define SA_ATYPE_NR 1 #define SA_ATYPE_ER 2 #define SA_NUM_ATYPES 3 #define SAMINOR(ctl, access) \ ((ctl << 4) | (access & 0x3)) struct sa_devs { struct cdev *ctl_dev; struct cdev *r_dev; struct cdev *nr_dev; struct cdev *er_dev; }; #define SASBADDBASE(sb, indent, data, xfmt, name, type, xsize, desc) \ sbuf_printf(sb, "%*s<%s type=\"%s\" size=\"%zd\" " \ "fmt=\"%s\" desc=\"%s\">" #xfmt "\n", indent, "", \ #name, #type, xsize, #xfmt, desc ? desc : "", data, #name); #define SASBADDINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ NULL) #define SASBADDINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, int, sizeof(data), \ desc) #define SASBADDUINT(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ NULL) #define SASBADDUINTDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, uint, sizeof(data), \ desc) #define SASBADDFIXEDSTR(sb, indent, data, fmt, name) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ NULL) #define SASBADDFIXEDSTRDESC(sb, indent, data, fmt, name, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, sizeof(data), \ desc) #define SASBADDVARSTR(sb, indent, data, fmt, name, maxlen) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, NULL) #define SASBADDVARSTRDESC(sb, indent, data, fmt, name, maxlen, desc) \ SASBADDBASE(sb, indent, data, fmt, name, str, maxlen, desc) #define SASBADDNODE(sb, indent, name) { \ sbuf_printf(sb, "%*s<%s type=\"%s\">\n", indent, "", #name, \ "node"); \ indent += 2; \ } #define SASBADDNODENUM(sb, indent, name, num) { \ sbuf_printf(sb, "%*s<%s type=\"%s\" num=\"%d\">\n", indent, "", \ #name, "node", num); \ indent += 2; \ } #define SASBENDNODE(sb, indent, name) { \ indent -= 2; \ sbuf_printf(sb, "%*s\n", indent, "", #name); \ } #define SA_DENSITY_TYPES 4 struct sa_prot_state { int initialized; uint32_t prot_method; uint32_t pi_length; uint32_t lbp_w; uint32_t lbp_r; uint32_t rbdp; }; struct sa_prot_info { struct sa_prot_state cur_prot_state; struct sa_prot_state pending_prot_state; }; /* * A table mapping protection parameters to their types and values. */ struct sa_prot_map { char *name; mt_param_set_type param_type; off_t offset; uint32_t min_val; uint32_t max_val; uint32_t *value; } sa_prot_table[] = { { "prot_method", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, prot_method), /*min_val*/ 0, /*max_val*/ 255, NULL }, { "pi_length", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, pi_length), /*min_val*/ 0, /*max_val*/ SA_CTRL_DP_PI_LENGTH_MASK, NULL }, { "lbp_w", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_w), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "lbp_r", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, lbp_r), /*min_val*/ 0, /*max_val*/ 1, NULL }, { "rbdp", MT_PARAM_SET_UNSIGNED, __offsetof(struct sa_prot_state, rbdp), /*min_val*/ 0, /*max_val*/ 1, NULL } }; #define SA_NUM_PROT_ENTS sizeof(sa_prot_table)/sizeof(sa_prot_table[0]) #define SA_PROT_ENABLED(softc) ((softc->flags & SA_FLAG_PROTECT_SUPP) \ && (softc->prot_info.cur_prot_state.initialized != 0) \ && (softc->prot_info.cur_prot_state.prot_method != 0)) #define SA_PROT_LEN(softc) softc->prot_info.cur_prot_state.pi_length struct sa_softc { sa_state state; sa_flags flags; sa_quirks quirks; u_int si_flags; struct cam_periph *periph; struct bio_queue_head bio_queue; int queue_count; struct devstat *device_stats; struct sa_devs devs; int open_count; int num_devs_to_destroy; int blk_gran; int blk_mask; int blk_shift; u_int32_t max_blk; u_int32_t min_blk; u_int32_t maxio; u_int32_t cpi_maxio; int allow_io_split; u_int32_t comp_algorithm; u_int32_t saved_comp_algorithm; u_int32_t media_blksize; u_int32_t last_media_blksize; u_int32_t media_numblks; u_int8_t media_density; u_int8_t speed; u_int8_t scsi_rev; u_int8_t dsreg; /* mtio mt_dsreg, redux */ int buffer_mode; int filemarks; union ccb saved_ccb; int last_resid_was_io; uint8_t density_type_bits[SA_DENSITY_TYPES]; int density_info_valid[SA_DENSITY_TYPES]; uint8_t density_info[SA_DENSITY_TYPES][SRDS_MAX_LENGTH]; struct sa_prot_info prot_info; int sili; int eot_warn; /* * Current position information. -1 means that the given value is * unknown. fileno and blkno are always calculated. blkno is * relative to the previous file mark. rep_fileno and rep_blkno * are as reported by the drive, if it supports the long form * report for the READ POSITION command. rep_blkno is relative to * the beginning of the partition. * * bop means that the drive is at the beginning of the partition. * eop means that the drive is between early warning and end of * partition, inside the current partition. * bpew means that the position is in a PEWZ (Programmable Early * Warning Zone) */ daddr_t partition; /* Absolute from BOT */ daddr_t fileno; /* Relative to beginning of partition */ daddr_t blkno; /* Relative to last file mark */ daddr_t rep_blkno; /* Relative to beginning of partition */ daddr_t rep_fileno; /* Relative to beginning of partition */ int bop; /* Beginning of Partition */ int eop; /* End of Partition */ int bpew; /* Beyond Programmable Early Warning */ /* * Latched Error Info */ struct { struct scsi_sense_data _last_io_sense; u_int64_t _last_io_resid; u_int8_t _last_io_cdb[CAM_MAX_CDBLEN]; struct scsi_sense_data _last_ctl_sense; u_int64_t _last_ctl_resid; u_int8_t _last_ctl_cdb[CAM_MAX_CDBLEN]; #define last_io_sense errinfo._last_io_sense #define last_io_resid errinfo._last_io_resid #define last_io_cdb errinfo._last_io_cdb #define last_ctl_sense errinfo._last_ctl_sense #define last_ctl_resid errinfo._last_ctl_resid #define last_ctl_cdb errinfo._last_ctl_cdb } errinfo; /* * Misc other flags/state */ u_int32_t : 29, open_rdonly : 1, /* open read-only */ open_pending_mount : 1, /* open pending mount */ ctrl_mode : 1; /* control device open */ struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; struct sa_quirk_entry { struct scsi_inquiry_pattern inq_pat; /* matching pattern */ sa_quirks quirks; /* specific quirk type */ u_int32_t prefblk; /* preferred blocksize when in fixed mode */ }; static struct sa_quirk_entry sa_quirk_table[] = { { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "OnStream", "ADR*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_NODREAD | SA_QUIRK_1FM|SA_QUIRK_NO_MODESEL, 32768 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 06408*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python 25601*", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "Python*", "*"}, SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 150*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525 25462", "-011"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM|SA_QUIRK_NODREAD, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", "VIPER 2525*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 }, #if 0 { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C15*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_NO_CPAGE, 0, }, #endif { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "C56*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T20*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "T4000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "HP", "HP-88780*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", "*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "M4 DATA", "123107 SCSI*", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { /* jreynold@primenet.com */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT8000N*", "*"}, SA_QUIRK_1FM, 0 }, { /* mike@sentex.net */ { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "Seagate", "STT20000*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "SEAGATE", "DAT 06241-XXX", "*"}, SA_QUIRK_VARIABLE|SA_QUIRK_2FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3600", "U07:"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 3800", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4100", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " TDC 4200", "*"}, SA_QUIRK_NOCOMP|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", " SLR*", "*"}, SA_QUIRK_1FM, 0 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "5525ES*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 512 }, { { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "WANGTEK", "51000*", "*"}, SA_QUIRK_FIXED|SA_QUIRK_1FM, 1024 } }; static d_open_t saopen; static d_close_t saclose; static d_strategy_t sastrategy; static d_ioctl_t saioctl; static periph_init_t sainit; static periph_ctor_t saregister; static periph_oninv_t saoninvalidate; static periph_dtor_t sacleanup; static periph_start_t sastart; static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void sadone(struct cam_periph *periph, union ccb *start_ccb); static int saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static int samarkswanted(struct cam_periph *); static int sacheckeod(struct cam_periph *periph); static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *comp_page, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable); static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot); static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm, u_int32_t sense_flags); static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params); static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params); static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb); static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents); static struct sa_prot_map *safindprotent(char *name, struct sa_prot_map *table, int table_ents); static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params); static struct sa_param_ent *safindparament(struct mtparamset *ps); static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy); static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g); static int saparamget(struct sa_softc *softc, struct sbuf *sb); static void saprevent(struct cam_periph *periph, int action); static int sarewind(struct cam_periph *periph); static int saspace(struct cam_periph *periph, int count, scsi_space_code code); static void sadevgonecb(void *arg); static void sasetupdev(struct sa_softc *softc, struct cdev *dev); static int samount(struct cam_periph *, int, struct cdev *); static int saretension(struct cam_periph *periph); static int sareservereleaseunit(struct cam_periph *periph, int reserve); static int saloadunload(struct cam_periph *periph, int load); static int saerase(struct cam_periph *periph, int longerase); static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed); static int sagetpos(struct cam_periph *periph); static int sardpos(struct cam_periph *periph, int, u_int32_t *); static int sasetpos(struct cam_periph *periph, int, struct mtlocate *); static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density); static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb); #ifndef SA_DEFAULT_IO_SPLIT #define SA_DEFAULT_IO_SPLIT 0 #endif static int sa_allow_io_split = SA_DEFAULT_IO_SPLIT; /* * Tunable to allow the user to set a global allow_io_split value. Note * that this WILL GO AWAY in FreeBSD 11.0. Silently splitting the I/O up * is bad behavior, because it hides the true tape block size from the * application. */ static SYSCTL_NODE(_kern_cam, OID_AUTO, sa, CTLFLAG_RD, 0, "CAM Sequential Access Tape Driver"); SYSCTL_INT(_kern_cam_sa, OID_AUTO, allow_io_split, CTLFLAG_RDTUN, &sa_allow_io_split, 0, "Default I/O split value"); static struct periph_driver sadriver = { sainit, "sa", TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(sa, sadriver); /* For 2.2-stable support */ #ifndef D_TAPE #define D_TAPE 0 #endif static struct cdevsw sa_cdevsw = { .d_version = D_VERSION, .d_open = saopen, .d_close = saclose, .d_read = physread, .d_write = physwrite, .d_ioctl = saioctl, .d_strategy = sastrategy, .d_name = "sa", .d_flags = D_TAPE | D_TRACKCLOSE, }; static int saopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int error; periph = (struct cam_periph *)dev->si_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saopen(%s): softc=0x%x\n", devtoname(dev), softc->flags)); if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 1; softc->open_count++; cam_periph_unlock(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } if (softc->flags & SA_FLAG_OPEN) { error = EBUSY; } else if (softc->flags & SA_FLAG_INVALID) { error = ENXIO; } else { /* * Preserve whether this is a read_only open. */ softc->open_rdonly = (flags & O_RDWR) == O_RDONLY; /* * The function samount ensures media is loaded and ready. * It also does a device RESERVE if the tape isn't yet mounted. * * If the mount fails and this was a non-blocking open, * make this a 'open_pending_mount' action. */ error = samount(periph, flags, dev); if (error && (flags & O_NONBLOCK)) { softc->flags |= SA_FLAG_OPEN; softc->open_pending_mount = 1; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } } if (error) { cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } saprevent(periph, PR_PREVENT); softc->flags |= SA_FLAG_OPEN; softc->open_count++; cam_periph_unhold(periph); cam_periph_unlock(periph); return (error); } static int saclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; int mode, error, writing, tmp, i; int closedbits = SA_FLAG_OPEN; mode = SAMODE(dev); periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE|CAM_DEBUG_INFO, ("saclose(%s): softc=0x%x\n", devtoname(dev), softc->flags)); softc->open_rdonly = 0; if (SA_IS_CTRL(dev)) { softc->ctrl_mode = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if (softc->open_pending_mount) { softc->flags &= ~SA_FLAG_OPEN; softc->open_pending_mount = 0; softc->open_count--; cam_periph_unlock(periph); cam_periph_release(periph); return (0); } if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { cam_periph_unlock(periph); return (error); } /* * Were we writing the tape? */ writing = (softc->flags & SA_FLAG_TAPE_WRITTEN) != 0; /* * See whether or not we need to write filemarks. If this * fails, we probably have to assume we've lost tape * position. */ error = sacheckeod(periph); if (error) { xpt_print(periph->path, "failed to write terminating filemark(s)\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } /* * Whatever we end up doing, allow users to eject tapes from here on. */ saprevent(periph, PR_ALLOW); /* * Decide how to end... */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { closedbits |= SA_FLAG_TAPE_FROZEN; } else switch (mode) { case SA_MODE_OFFLINE: /* * An 'offline' close is an unconditional release of * frozen && mount conditions, irrespective of whether * these operations succeeded. The reason for this is * to allow at least some kind of programmatic way * around our state getting all fouled up. If somebody * issues an 'offline' command, that will be allowed * to clear state. */ (void) sarewind(periph); (void) saloadunload(periph, FALSE); closedbits |= SA_FLAG_TAPE_MOUNTED|SA_FLAG_TAPE_FROZEN; break; case SA_MODE_REWIND: /* * If the rewind fails, return an error- if anyone cares, * but not overwriting any previous error. * * We don't clear the notion of mounted here, but we do * clear the notion of frozen if we successfully rewound. */ tmp = sarewind(periph); if (tmp) { if (error != 0) error = tmp; } else { closedbits |= SA_FLAG_TAPE_FROZEN; } break; case SA_MODE_NOREWIND: /* * If we're not rewinding/unloading the tape, find out * whether we need to back up over one of two filemarks * we wrote (if we wrote two filemarks) so that appends * from this point on will be sane. */ if (error == 0 && writing && (softc->quirks & SA_QUIRK_2FM)) { tmp = saspace(periph, -1, SS_FILEMARKS); if (tmp) { xpt_print(periph->path, "unable to backspace " "over one of double filemarks at end of " "tape\n"); xpt_print(periph->path, "it is possible that " "this device needs a SA_QUIRK_1FM quirk set" "for it\n"); softc->flags |= SA_FLAG_TAPE_FROZEN; } } break; default: xpt_print(periph->path, "unknown mode 0x%x in saclose\n", mode); /* NOTREACHED */ break; } /* * We wish to note here that there are no more filemarks to be written. */ softc->filemarks = 0; softc->flags &= ~SA_FLAG_TAPE_WRITTEN; /* * And we are no longer open for business. */ softc->flags &= ~closedbits; softc->open_count--; /* * Invalidate any density information that depends on having tape * media in the drive. */ for (i = 0; i < SA_DENSITY_TYPES; i++) { if (softc->density_type_bits[i] & SRDS_MEDIA) softc->density_info_valid[i] = 0; } /* * Inform users if tape state if frozen.... */ if (softc->flags & SA_FLAG_TAPE_FROZEN) { xpt_print(periph->path, "tape is now frozen- use an OFFLINE, " "REWIND or MTEOM command to clear this state.\n"); } /* release the device if it is no longer mounted */ if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) sareservereleaseunit(periph, FALSE); cam_periph_unhold(periph); cam_periph_unlock(periph); cam_periph_release(periph); return (error); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void sastrategy(struct bio *bp) { struct cam_periph *periph; struct sa_softc *softc; bp->bio_resid = bp->bio_bcount; if (SA_IS_CTRL(bp->bio_dev)) { biofinish(bp, NULL, EINVAL); return; } periph = (struct cam_periph *)bp->bio_dev->si_drv1; - if (periph == NULL) { - biofinish(bp, NULL, ENXIO); - return; - } cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; if (softc->flags & SA_FLAG_INVALID) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } if (softc->flags & SA_FLAG_TAPE_FROZEN) { cam_periph_unlock(periph); biofinish(bp, NULL, EPERM); return; } /* * This should actually never occur as the write(2) * system call traps attempts to write to a read-only * file descriptor. */ if (bp->bio_cmd == BIO_WRITE && softc->open_rdonly) { cam_periph_unlock(periph); biofinish(bp, NULL, EBADF); return; } if (softc->open_pending_mount) { int error = samount(periph, 0, bp->bio_dev); if (error) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } saprevent(periph, PR_PREVENT); softc->open_pending_mount = 0; } /* * If it's a null transfer, return immediately */ if (bp->bio_bcount == 0) { cam_periph_unlock(periph); biodone(bp); return; } /* valid request? */ if (softc->flags & SA_FLAG_FIXED) { /* * Fixed block device. The byte count must * be a multiple of our block size. */ if (((softc->blk_mask != ~0) && ((bp->bio_bcount & softc->blk_mask) != 0)) || ((softc->blk_mask == ~0) && ((bp->bio_bcount % softc->min_blk) != 0))) { xpt_print(periph->path, "Invalid request. Fixed block " "device requests must be a multiple of %d bytes\n", softc->min_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } } else if ((bp->bio_bcount > softc->max_blk) || (bp->bio_bcount < softc->min_blk) || (bp->bio_bcount & softc->blk_mask) != 0) { xpt_print_path(periph->path); printf("Invalid request. Variable block " "device requests must be "); if (softc->blk_mask != 0) { printf("a multiple of %d ", (0x1 << softc->blk_gran)); } printf("between %d and %d bytes\n", softc->min_blk, softc->max_blk); cam_periph_unlock(periph); biofinish(bp, NULL, EINVAL); return; } /* * Place it at the end of the queue. */ bioq_insert_tail(&softc->bio_queue, bp); softc->queue_count++; #if 0 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queuing a %ld %s byte %s\n", bp->bio_bcount, (softc->flags & SA_FLAG_FIXED)? "fixed" : "variable", (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif if (softc->queue_count > 1) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastrategy: queue count now %d\n", softc->queue_count)); } /* * Schedule ourselves for performing the work. */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); cam_periph_unlock(periph); return; } static int sasetsili(struct cam_periph *periph, struct mtparamset *ps, int num_params) { uint32_t sili_blocksize; struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "sili is a signed parameter"); goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid sili value %jd", (intmax_t)ps->value.value_signed); goto bailout_error; } /* * We only set the SILI flag in variable block * mode. You'll get a check condition in fixed * block mode if things don't line up in any case. */ if (softc->flags & SA_FLAG_FIXED) { snprintf(ps->error_str, sizeof(ps->error_str), "can't set sili bit in fixed block mode"); goto bailout_error; } if (softc->sili == ps->value.value_signed) goto bailout; if (ps->value.value_signed == 1) sili_blocksize = 4; else sili_blocksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, sili_blocksize, 0, 0, SF_QUIET_IR); if (error != 0) { snprintf(ps->error_str, sizeof(ps->error_str), "sasetparams() returned error %d", error); goto bailout_error; } softc->sili = ps->value.value_signed; bailout: ps->status = MT_PARAM_STATUS_OK; return (error); bailout_error: ps->status = MT_PARAM_STATUS_ERROR; if (error == 0) error = EINVAL; return (error); } static int saseteotwarn(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; int error; error = 0; softc = (struct sa_softc *)periph->softc; if (ps->value_type != MT_PARAM_SET_SIGNED) { snprintf(ps->error_str, sizeof(ps->error_str), "eot_warn is a signed parameter"); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } if ((ps->value.value_signed < 0) || (ps->value.value_signed > 1)) { snprintf(ps->error_str, sizeof(ps->error_str), "invalid eot_warn value %jd\n", (intmax_t)ps->value.value_signed); ps->status = MT_PARAM_STATUS_ERROR; goto bailout; } softc->eot_warn = ps->value.value_signed; ps->status = MT_PARAM_STATUS_OK; bailout: if (ps->status != MT_PARAM_STATUS_OK) error = EINVAL; return (error); } static void safillprot(struct sa_softc *softc, int *indent, struct sbuf *sb) { int tmpint; SASBADDNODE(sb, *indent, protection); if (softc->flags & SA_FLAG_PROTECT_SUPP) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, protection_supported, "Set to 1 if protection information is supported"); if ((tmpint != 0) && (softc->prot_info.cur_prot_state.initialized != 0)) { struct sa_prot_state *prot; prot = &softc->prot_info.cur_prot_state; SASBADDUINTDESC(sb, *indent, prot->prot_method, %u, prot_method, "Current Protection Method"); SASBADDUINTDESC(sb, *indent, prot->pi_length, %u, pi_length, "Length of Protection Information"); SASBADDUINTDESC(sb, *indent, prot->lbp_w, %u, lbp_w, "Check Protection on Writes"); SASBADDUINTDESC(sb, *indent, prot->lbp_r, %u, lbp_r, "Check and Include Protection on Reads"); SASBADDUINTDESC(sb, *indent, prot->rbdp, %u, rbdp, "Transfer Protection Information for RECOVER " "BUFFERED DATA command"); } SASBENDNODE(sb, *indent, protection); } static void sapopulateprots(struct sa_prot_state *cur_state, struct sa_prot_map *new_table, int table_ents) { int i; bcopy(sa_prot_table, new_table, min(table_ents * sizeof(*new_table), sizeof(sa_prot_table))); table_ents = min(table_ents, SA_NUM_PROT_ENTS); for (i = 0; i < table_ents; i++) new_table[i].value = (uint32_t *)((uint8_t *)cur_state + new_table[i].offset); return; } static struct sa_prot_map * safindprotent(char *name, struct sa_prot_map *table, int table_ents) { char *prot_name = "protection."; int i, prot_len; prot_len = strlen(prot_name); /* * This shouldn't happen, but we check just in case. */ if (strncmp(name, prot_name, prot_len) != 0) goto bailout; for (i = 0; i < table_ents; i++) { if (strcmp(&name[prot_len], table[i].name) != 0) continue; return (&table[i]); } bailout: return (NULL); } static int sasetprotents(struct cam_periph *periph, struct mtparamset *ps, int num_params) { struct sa_softc *softc; struct sa_prot_map prot_ents[SA_NUM_PROT_ENTS]; struct sa_prot_state new_state; int error; int i; softc = (struct sa_softc *)periph->softc; error = 0; /* * Make sure that this tape drive supports protection information. * Otherwise we can't set anything. */ if ((softc->flags & SA_FLAG_PROTECT_SUPP) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information is not supported for this device"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * We can't operate with physio(9) splitting enabled, because there * is no way to insure (especially in variable block mode) that * what the user writes (with a checksum block at the end) will * make it into the sa(4) driver intact. */ if ((softc->si_flags & SI_NOSPLIT) == 0) { snprintf(ps[0].error_str, sizeof(ps[0].error_str), "Protection information cannot be enabled with I/O " "splitting"); ps[0].status = MT_PARAM_STATUS_ERROR; goto bailout; } /* * Take the current cached protection state and use that as the * basis for our new entries. */ bcopy(&softc->prot_info.cur_prot_state, &new_state, sizeof(new_state)); /* * Populate the table mapping property names to pointers into the * state structure. */ sapopulateprots(&new_state, prot_ents, SA_NUM_PROT_ENTS); /* * For each parameter the user passed in, make sure the name, type * and value are valid. */ for (i = 0; i < num_params; i++) { struct sa_prot_map *ent; ent = safindprotent(ps[i].value_name, prot_ents, SA_NUM_PROT_ENTS); if (ent == NULL) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Invalid protection entry name %s", ps[i].value_name); error = EINVAL; goto bailout; } if (ent->param_type != ps[i].value_type) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Supplied type %d does not match actual type %d", ps[i].value_type, ent->param_type); error = EINVAL; goto bailout; } if ((ps[i].value.value_unsigned < ent->min_val) || (ps[i].value.value_unsigned > ent->max_val)) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Value %ju is outside valid range %u - %u", (uintmax_t)ps[i].value.value_unsigned, ent->min_val, ent->max_val); error = EINVAL; goto bailout; } *(ent->value) = ps[i].value.value_unsigned; } /* * Actually send the protection settings to the drive. */ error = sasetprot(periph, &new_state); if (error != 0) { for (i = 0; i < num_params; i++) { ps[i].status = MT_PARAM_STATUS_ERROR; snprintf(ps[i].error_str, sizeof(ps[i].error_str), "Unable to set parameter, see dmesg(8)"); } goto bailout; } /* * Let the user know that his settings were stored successfully. */ for (i = 0; i < num_params; i++) ps[i].status = MT_PARAM_STATUS_OK; bailout: return (error); } /* * Entry handlers generally only handle a single entry. Node handlers will * handle a contiguous range of parameters to set in a single call. */ typedef enum { SA_PARAM_TYPE_ENTRY, SA_PARAM_TYPE_NODE } sa_param_type; struct sa_param_ent { char *name; sa_param_type param_type; int (*set_func)(struct cam_periph *periph, struct mtparamset *ps, int num_params); } sa_param_table[] = { {"sili", SA_PARAM_TYPE_ENTRY, sasetsili }, {"eot_warn", SA_PARAM_TYPE_ENTRY, saseteotwarn }, {"protection.", SA_PARAM_TYPE_NODE, sasetprotents } }; static struct sa_param_ent * safindparament(struct mtparamset *ps) { unsigned int i; for (i = 0; i < sizeof(sa_param_table) /sizeof(sa_param_table[0]); i++){ /* * For entries, we compare all of the characters. For * nodes, we only compare the first N characters. The node * handler will decode the rest. */ if (sa_param_table[i].param_type == SA_PARAM_TYPE_ENTRY) { if (strcmp(ps->value_name, sa_param_table[i].name) != 0) continue; } else { if (strncmp(ps->value_name, sa_param_table[i].name, strlen(sa_param_table[i].name)) != 0) continue; } return (&sa_param_table[i]); } return (NULL); } /* * Go through a list of parameters, coalescing contiguous parameters with * the same parent node into a single call to a set_func. */ static int saparamsetlist(struct cam_periph *periph, struct mtsetlist *list, int need_copy) { int i, contig_ents; int error; struct mtparamset *params, *first; struct sa_param_ent *first_ent; error = 0; params = NULL; if (list->num_params == 0) /* Nothing to do */ goto bailout; /* * Verify that the user has the correct structure size. */ if ((list->num_params * sizeof(struct mtparamset)) != list->param_len) { xpt_print(periph->path, "%s: length of params %d != " "sizeof(struct mtparamset) %zd * num_params %d\n", __func__, list->param_len, sizeof(struct mtparamset), list->num_params); error = EINVAL; goto bailout; } if (need_copy != 0) { /* * XXX KDM will dropping the lock cause an issue here? */ cam_periph_unlock(periph); params = malloc(list->param_len, M_SCSISA, M_WAITOK | M_ZERO); error = copyin(list->params, params, list->param_len); cam_periph_lock(periph); if (error != 0) goto bailout; } else { params = list->params; } contig_ents = 0; first = NULL; first_ent = NULL; for (i = 0; i < list->num_params; i++) { struct sa_param_ent *ent; ent = safindparament(¶ms[i]); if (ent == NULL) { snprintf(params[i].error_str, sizeof(params[i].error_str), "%s: cannot find parameter %s", __func__, params[i].value_name); params[i].status = MT_PARAM_STATUS_ERROR; break; } if (first != NULL) { if (first_ent == ent) { /* * We're still in a contiguous list of * parameters that can be handled by one * node handler. */ contig_ents++; continue; } else { error = first_ent->set_func(periph, first, contig_ents); first = NULL; first_ent = NULL; contig_ents = 0; if (error != 0) { error = 0; break; } } } if (ent->param_type == SA_PARAM_TYPE_NODE) { first = ¶ms[i]; first_ent = ent; contig_ents = 1; } else { error = ent->set_func(periph, ¶ms[i], 1); if (error != 0) { error = 0; break; } } } if (first != NULL) first_ent->set_func(periph, first, contig_ents); bailout: if (need_copy != 0) { if (error != EFAULT) { cam_periph_unlock(periph); copyout(params, list->params, list->param_len); cam_periph_lock(periph); } free(params, M_SCSISA); } return (error); } static int sagetparams_common(struct cdev *dev, struct cam_periph *periph) { struct sa_softc *softc; u_int8_t write_protect; int comp_enabled, comp_supported, error; softc = (struct sa_softc *)periph->softc; if (softc->open_pending_mount) return (0); /* The control device may issue getparams() if there are no opens. */ if (SA_IS_CTRL(dev) && (softc->flags & SA_FLAG_OPEN) != 0) return (0); error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error) return (error); if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; else softc->flags &= ~SA_FLAG_TAPE_WP; softc->flags &= ~SA_FLAG_COMPRESSION; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; return (0); } #define PENDING_MOUNT_CHECK(softc, periph, dev) \ if (softc->open_pending_mount) { \ error = samount(periph, 0, dev); \ if (error) { \ break; \ } \ saprevent(periph, PR_PREVENT); \ softc->open_pending_mount = 0; \ } static int saioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct cam_periph *periph; struct sa_softc *softc; scsi_space_code spaceop; int didlockperiph = 0; int mode; int error = 0; mode = SAMODE(dev); error = 0; /* shut up gcc */ spaceop = 0; /* shut up gcc */ periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - cam_periph_lock(periph); softc = (struct sa_softc *)periph->softc; /* * Check for control mode accesses. We allow MTIOCGET and * MTIOCERRSTAT (but need to be the only one open in order * to clear latched status), and MTSETBSIZE, MTSETDNSTY * and MTCOMP (but need to be the only one accessing this * device to run those). */ if (SA_IS_CTRL(dev)) { switch (cmd) { case MTIOCGETEOTMODEL: case MTIOCGET: case MTIOCEXTGET: case MTIOCPARAMGET: case MTIOCRBLIM: break; case MTIOCERRSTAT: /* * If the periph isn't already locked, lock it * so our MTIOCERRSTAT can reset latched error stats. * * If the periph is already locked, skip it because * we're just getting status and it'll be up to the * other thread that has this device open to do * an MTIOCERRSTAT that would clear latched status. */ if ((periph->flags & CAM_PERIPH_LOCKED) == 0) { error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; } break; case MTIOCTOP: { struct mtop *mt = (struct mtop *) arg; /* * Check to make sure it's an OP we can perform * with no media inserted. */ switch (mt->mt_op) { case MTSETBSIZ: case MTSETDNSTY: case MTCOMP: mt = NULL; /* FALLTHROUGH */ default: break; } if (mt != NULL) { break; } /* FALLTHROUGH */ } case MTIOCSETEOTMODEL: /* * We need to acquire the peripheral here rather * than at open time because we are sharing writable * access to data structures. */ error = cam_periph_hold(periph, PRIBIO|PCATCH); if (error != 0) { cam_periph_unlock(periph); return (error); } didlockperiph = 1; break; default: cam_periph_unlock(periph); return (EINVAL); } } /* * Find the device that the user is talking about */ switch (cmd) { case MTIOCGET: { struct mtget *g = (struct mtget *)arg; error = sagetparams_common(dev, periph); if (error) break; bzero(g, sizeof(struct mtget)); g->mt_type = MT_ISAR; if (softc->flags & SA_FLAG_COMP_UNSUPP) { g->mt_comp = MT_COMP_UNSUPP; g->mt_comp0 = MT_COMP_UNSUPP; g->mt_comp1 = MT_COMP_UNSUPP; g->mt_comp2 = MT_COMP_UNSUPP; g->mt_comp3 = MT_COMP_UNSUPP; } else { if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) { g->mt_comp = MT_COMP_DISABLED; } else { g->mt_comp = softc->comp_algorithm; } g->mt_comp0 = softc->comp_algorithm; g->mt_comp1 = softc->comp_algorithm; g->mt_comp2 = softc->comp_algorithm; g->mt_comp3 = softc->comp_algorithm; } g->mt_density = softc->media_density; g->mt_density0 = softc->media_density; g->mt_density1 = softc->media_density; g->mt_density2 = softc->media_density; g->mt_density3 = softc->media_density; g->mt_blksiz = softc->media_blksize; g->mt_blksiz0 = softc->media_blksize; g->mt_blksiz1 = softc->media_blksize; g->mt_blksiz2 = softc->media_blksize; g->mt_blksiz3 = softc->media_blksize; g->mt_fileno = softc->fileno; g->mt_blkno = softc->blkno; g->mt_dsreg = (short) softc->dsreg; /* * Yes, we know that this is likely to overflow */ if (softc->last_resid_was_io) { if ((g->mt_resid = (short) softc->last_io_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_io_resid = 0; } } } else { if ((g->mt_resid = (short)softc->last_ctl_resid) != 0) { if (SA_IS_CTRL(dev) == 0 || didlockperiph) { softc->last_ctl_resid = 0; } } } error = 0; break; } case MTIOCEXTGET: case MTIOCPARAMGET: { struct mtextget *g = (struct mtextget *)arg; char *tmpstr2; struct sbuf *sb; /* * Report drive status using an XML format. */ /* * XXX KDM will dropping the lock cause any problems here? */ cam_periph_unlock(periph); sb = sbuf_new(NULL, NULL, g->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Unable to allocate %d bytes for status info", g->alloc_len); cam_periph_lock(periph); goto extget_bailout; } cam_periph_lock(periph); if (cmd == MTIOCEXTGET) error = saextget(dev, periph, sb, g); else error = saparamget(softc, sb); if (error != 0) goto extget_bailout; error = sbuf_finish(sb); if (error == ENOMEM) { g->status = MT_EXT_GET_NEED_MORE_SPACE; error = 0; } else if (error != 0) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %d returned from sbuf_finish()", error); } else g->status = MT_EXT_GET_OK; error = 0; tmpstr2 = sbuf_data(sb); g->fill_len = strlen(tmpstr2) + 1; cam_periph_unlock(periph); error = copyout(tmpstr2, g->status_xml, g->fill_len); cam_periph_lock(periph); extget_bailout: sbuf_delete(sb); break; } case MTIOCPARAMSET: { struct mtsetlist list; struct mtparamset *ps = (struct mtparamset *)arg; bzero(&list, sizeof(list)); list.num_params = 1; list.param_len = sizeof(*ps); list.params = ps; error = saparamsetlist(periph, &list, /*need_copy*/ 0); break; } case MTIOCSETLIST: { struct mtsetlist *list = (struct mtsetlist *)arg; error = saparamsetlist(periph, list, /*need_copy*/ 1); break; } case MTIOCERRSTAT: { struct scsi_tape_errors *sep = &((union mterrstat *)arg)->scsi_errstat; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: MTIOCERRSTAT\n")); bzero(sep, sizeof(*sep)); sep->io_resid = softc->last_io_resid; bcopy((caddr_t) &softc->last_io_sense, sep->io_sense, sizeof (sep->io_sense)); bcopy((caddr_t) &softc->last_io_cdb, sep->io_cdb, sizeof (sep->io_cdb)); sep->ctl_resid = softc->last_ctl_resid; bcopy((caddr_t) &softc->last_ctl_sense, sep->ctl_sense, sizeof (sep->ctl_sense)); bcopy((caddr_t) &softc->last_ctl_cdb, sep->ctl_cdb, sizeof (sep->ctl_cdb)); if ((SA_IS_CTRL(dev) == 0 && !softc->open_pending_mount) || didlockperiph) bzero((caddr_t) &softc->errinfo, sizeof (softc->errinfo)); error = 0; break; } case MTIOCTOP: { struct mtop *mt; int count; PENDING_MOUNT_CHECK(softc, periph, dev); mt = (struct mtop *)arg; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("saioctl: op=0x%x count=0x%x\n", mt->mt_op, mt->mt_count)); count = mt->mt_count; switch (mt->mt_op) { case MTWEOF: /* write an end-of-file marker */ /* * We don't need to clear the SA_FLAG_TAPE_WRITTEN * flag because by keeping track of filemarks * we have last written we know whether or not * we need to write more when we close the device. */ error = sawritefilemarks(periph, count, FALSE, FALSE); break; case MTWEOFI: /* write an end-of-file marker without waiting */ error = sawritefilemarks(periph, count, FALSE, TRUE); break; case MTWSS: /* write a setmark */ error = sawritefilemarks(periph, count, TRUE, FALSE); break; case MTBSR: /* backward space record */ case MTFSR: /* forward space record */ case MTBSF: /* backward space file */ case MTFSF: /* forward space file */ case MTBSS: /* backward space setmark */ case MTFSS: /* forward space setmark */ case MTEOD: /* space to end of recorded medium */ { int nmarks; spaceop = SS_FILEMARKS; nmarks = softc->filemarks; error = sacheckeod(periph); if (error) { xpt_print(periph->path, "EOD check prior to spacing failed\n"); softc->flags |= SA_FLAG_EIO_PENDING; break; } nmarks -= softc->filemarks; switch(mt->mt_op) { case MTBSR: count = -count; /* FALLTHROUGH */ case MTFSR: spaceop = SS_BLOCKS; break; case MTBSF: count = -count; /* FALLTHROUGH */ case MTFSF: break; case MTBSS: count = -count; /* FALLTHROUGH */ case MTFSS: spaceop = SS_SETMARKS; break; case MTEOD: spaceop = SS_EOD; count = 0; nmarks = 0; break; default: error = EINVAL; break; } if (error) break; nmarks = softc->filemarks; /* * XXX: Why are we checking again? */ error = sacheckeod(periph); if (error) break; nmarks -= softc->filemarks; error = saspace(periph, count - nmarks, spaceop); /* * At this point, clear that we've written the tape * and that we've written any filemarks. We really * don't know what the applications wishes to do next- * the sacheckeod's will make sure we terminated the * tape correctly if we'd been writing, but the next * action the user application takes will set again * whether we need to write filemarks. */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->filemarks = 0; break; } case MTREW: /* rewind */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); error = sarewind(periph); /* see above */ softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTERASE: /* erase */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saerase(periph, count); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTRETENS: /* re-tension tape */ PENDING_MOUNT_CHECK(softc, periph, dev); error = saretension(periph); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; break; case MTOFFL: /* rewind and put the drive offline */ PENDING_MOUNT_CHECK(softc, periph, dev); (void) sacheckeod(periph); /* see above */ softc->flags &= ~SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; error = sarewind(periph); /* clear the frozen flag anyway */ softc->flags &= ~SA_FLAG_TAPE_FROZEN; /* * Be sure to allow media removal before ejecting. */ saprevent(periph, PR_ALLOW); if (error == 0) { error = saloadunload(periph, FALSE); if (error == 0) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; } } break; case MTLOAD: error = saloadunload(periph, TRUE); break; case MTNOP: /* no operation, sets status only */ case MTCACHE: /* enable controller cache */ case MTNOCACHE: /* disable controller cache */ error = 0; break; case MTSETBSIZ: /* Set block size for device */ PENDING_MOUNT_CHECK(softc, periph, dev); if ((softc->sili != 0) && (count != 0)) { xpt_print(periph->path, "Can't enter fixed " "block mode with SILI enabled\n"); error = EINVAL; break; } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count, 0, 0, 0); if (error == 0) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = count; if (count) { softc->flags |= SA_FLAG_FIXED; if (powerof2(count)) { softc->blk_shift = ffs(count) - 1; softc->blk_mask = count - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; } else { softc->flags &= ~SA_FLAG_FIXED; if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } /* * Make the user's desire 'persistent'. */ softc->quirks |= SA_QUIRK_VARIABLE; softc->quirks &= ~SA_QUIRK_FIXED; } } break; case MTSETDNSTY: /* Set density for device and mode */ PENDING_MOUNT_CHECK(softc, periph, dev); if (count > UCHAR_MAX) { error = EINVAL; break; } else { error = sasetparams(periph, SA_PARAM_DENSITY, 0, count, 0, 0); } break; case MTCOMP: /* enable compression */ PENDING_MOUNT_CHECK(softc, periph, dev); /* * Some devices don't support compression, and * don't like it if you ask them for the * compression page. */ if ((softc->quirks & SA_QUIRK_NOCOMP) || (softc->flags & SA_FLAG_COMP_UNSUPP)) { error = ENODEV; break; } error = sasetparams(periph, SA_PARAM_COMPRESSION, 0, 0, count, SF_NO_PRINT); break; default: error = EINVAL; } break; } case MTIOCIEOT: case MTIOCEEOT: error = 0; break; case MTIOCRDSPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 0, (u_int32_t *) arg); break; case MTIOCRDHPOS: PENDING_MOUNT_CHECK(softc, periph, dev); error = sardpos(periph, 1, (u_int32_t *) arg); break; case MTIOCSLOCATE: case MTIOCHLOCATE: { struct mtlocate locate_info; int hard; bzero(&locate_info, sizeof(locate_info)); locate_info.logical_id = *((uint32_t *)arg); if (cmd == MTIOCSLOCATE) hard = 0; else hard = 1; PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, hard, &locate_info); break; } case MTIOCEXTLOCATE: PENDING_MOUNT_CHECK(softc, periph, dev); error = sasetpos(periph, /*hard*/ 0, (struct mtlocate *)arg); softc->flags &= ~(SA_FLAG_TAPE_WRITTEN|SA_FLAG_TAPE_FROZEN); softc->flags &= ~SA_FLAG_ERR_PENDING; softc->filemarks = 0; break; case MTIOCGETEOTMODEL: error = 0; if (softc->quirks & SA_QUIRK_1FM) mode = 1; else mode = 2; *((u_int32_t *) arg) = mode; break; case MTIOCSETEOTMODEL: error = 0; switch (*((u_int32_t *) arg)) { case 1: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_1FM; break; case 2: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: error = EINVAL; break; } break; case MTIOCRBLIM: { struct mtrblim *rblim; rblim = (struct mtrblim *)arg; rblim->granularity = softc->blk_gran; rblim->min_block_length = softc->min_blk; rblim->max_block_length = softc->max_blk; break; } default: error = cam_periph_ioctl(periph, cmd, arg, saerror); break; } /* * Check to see if we cleared a frozen state */ if (error == 0 && (softc->flags & SA_FLAG_TAPE_FROZEN)) { switch(cmd) { case MTIOCRDSPOS: case MTIOCRDHPOS: case MTIOCSLOCATE: case MTIOCHLOCATE: /* * XXX KDM look at this. */ softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->flags &= ~SA_FLAG_TAPE_FROZEN; xpt_print(periph->path, "tape state now unfrozen.\n"); break; default: break; } } if (didlockperiph) { cam_periph_unhold(periph); } cam_periph_unlock(periph); return (error); } static void sainit(void) { cam_status status; /* * Install a global async callback. */ status = xpt_register_async(AC_FOUND_DEVICE, saasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sa: Failed to attach master async callback " "due to status 0x%x!\n", status); } } static void sadevgonecb(void *arg) { struct cam_periph *periph; struct mtx *mtx; struct sa_softc *softc; periph = (struct cam_periph *)arg; softc = (struct sa_softc *)periph->softc; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc->num_devs_to_destroy--; if (softc->num_devs_to_destroy == 0) { int i; /* * When we have gotten all of our callbacks, we will get * no more close calls from devfs. So if we have any * dangling opens, we need to release the reference held * for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for devfs, all of our * instances are gone now. */ cam_periph_release_locked(periph); } /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void saoninvalidate(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, saasync, periph, periph->path); softc->flags |= SA_FLAG_INVALID; /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ bioq_flush(&softc->bio_queue, NULL, ENXIO); softc->queue_count = 0; /* * Tell devfs that all of our devices have gone away, and ask for a * callback when it has cleaned up its state. */ destroy_dev_sched_cb(softc->devs.ctl_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.r_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.nr_dev, sadevgonecb, periph); destroy_dev_sched_cb(softc->devs.er_dev, sadevgonecb, periph); } static void sacleanup(struct cam_periph *periph) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; cam_periph_unlock(periph); if ((softc->flags & SA_FLAG_SCTX_INIT) != 0 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); cam_periph_lock(periph); devstat_remove_entry(softc->device_stats); free(softc, M_SCSISA); } static void saasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_SEQUENTIAL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(saregister, saoninvalidate, sacleanup, sastart, "sa", CAM_PERIPH_BIO, path, saasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("saasync: Unable to probe new device " "due to status 0x%x\n", status); break; } default: cam_periph_async(periph, code, path, arg); break; } } static void sasetupdev(struct sa_softc *softc, struct cdev *dev) { - dev->si_drv1 = softc->periph; + dev->si_iosize_max = softc->maxio; dev->si_flags |= softc->si_flags; /* * Keep a count of how many non-alias devices we have created, * so we can make sure we clean them all up on shutdown. Aliases * are cleaned up when we destroy the device they're an alias for. */ if ((dev->si_flags & SI_ALIAS) == 0) softc->num_devs_to_destroy++; } static void sasysctlinit(void *context, int pending) { struct cam_periph *periph; struct sa_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; /* * If the periph is invalid, no need to setup the sysctls. */ if (periph->flags & CAM_PERIPH_INVALID) goto bailout; softc = (struct sa_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM SA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%u", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= SA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_sa), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (softc->sysctl_tree == NULL) goto bailout; SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "allow_io_split", CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &softc->allow_io_split, 0, "Allow Splitting I/O"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "maxio", CTLFLAG_RD, &softc->maxio, 0, "Maximum I/O size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "cpi_maxio", CTLFLAG_RD, &softc->cpi_maxio, 0, "Maximum Controller I/O size"); bailout: /* * Release the reference that was held when this task was enqueued. */ cam_periph_release(periph); } static cam_status saregister(struct cam_periph *periph, void *arg) { struct sa_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; + struct make_dev_args args; caddr_t match; char tmpstr[80]; + int error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("saregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = (struct sa_softc *) malloc(sizeof (*softc), M_SCSISA, M_NOWAIT | M_ZERO); if (softc == NULL) { printf("saregister: Unable to probe new device. " "Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->scsi_rev = SID_ANSI_REV(&cgd->inq_data); softc->state = SA_STATE_NORMAL; softc->fileno = (daddr_t) -1; softc->blkno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = -1; softc->eop = -1; softc->bpew = -1; bioq_init(&softc->bio_queue); softc->periph = periph; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)sa_quirk_table, sizeof(sa_quirk_table)/sizeof(*sa_quirk_table), sizeof(*sa_quirk_table), scsi_inquiry_match); if (match != NULL) { softc->quirks = ((struct sa_quirk_entry *)match)->quirks; softc->last_media_blksize = ((struct sa_quirk_entry *)match)->prefblk; } else softc->quirks = SA_QUIRK_NONE; /* * Long format data for READ POSITION was introduced in SSC, which * was after SCSI-2. (Roughly equivalent to SCSI-3.) If the drive * reports that it is SCSI-2 or older, it is unlikely to support * long position data, but it might. Some drives from that era * claim to be SCSI-2, but do support long position information. * So, instead of immediately disabling long position information * for SCSI-2 devices, we'll try one pass through sagetpos(), and * then disable long position information if we get an error. */ if (cgd->inq_data.version <= SCSI_REV_CCS) softc->quirks |= SA_QUIRK_NO_LONG_POS; if (cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) { struct ccb_dev_advinfo cdai; struct scsi_vpd_extended_inquiry_data ext_inq; bzero(&ext_inq, sizeof(ext_inq)); xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.flags = CDAI_FLAG_NONE; cdai.buftype = CDAI_TYPE_EXT_INQ; cdai.bufsiz = sizeof(ext_inq); cdai.buf = (uint8_t *)&ext_inq; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if ((cdai.ccb_h.status == CAM_REQ_CMP) && (ext_inq.flags1 & SVPD_EID_SA_SPT_LBP)) softc->flags |= SA_FLAG_PROTECT_SUPP; } bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); /* * The SA driver supports a blocksize, but we don't know the * blocksize until we media is inserted. So, set a flag to * indicate that the blocksize is unavailable right now. */ cam_periph_unlock(periph); softc->device_stats = devstat_new_entry("sa", periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_TAPE); /* * Load the default value that is either compiled in, or loaded * in the global kern.cam.sa.allow_io_split tunable. */ softc->allow_io_split = sa_allow_io_split; /* * Load a per-instance tunable, if it exists. NOTE that this * tunable WILL GO AWAY in FreeBSD 11.0. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.sa.%u.allow_io_split", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->allow_io_split); /* * If maxio isn't set, we fall back to DFLTPHYS. Otherwise we take * the smaller of cpi.maxio or MAXPHYS. */ if (cpi.maxio == 0) softc->maxio = DFLTPHYS; else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; else softc->maxio = cpi.maxio; /* * Record the controller's maximum I/O size so we can report it to * the user later. */ softc->cpi_maxio = cpi.maxio; /* * By default we tell physio that we do not want our I/O split. * The user needs to have a 1:1 mapping between the size of his * write to a tape character device and the size of the write * that actually goes down to the drive. */ if (softc->allow_io_split == 0) softc->si_flags = SI_NOSPLIT; else softc->si_flags = 0; TASK_INIT(&softc->sysctl_task, 0, sasysctlinit, periph); /* * If the SIM supports unmapped I/O, let physio know that we can * handle unmapped buffers. */ if (cpi.hba_misc & PIM_UNMAPPED) softc->si_flags |= SI_UNMAPPED; /* * Acquire a reference to the periph before we create the devfs * instances for it. We'll release this reference once the devfs * instances have been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } - softc->devs.ctl_dev = make_dev(&sa_cdevsw, SAMINOR(SA_CTLDEV, - SA_ATYPE_R), UID_ROOT, GID_OPERATOR, - 0660, "%s%d.ctl", periph->periph_name, periph->unit_number); + make_dev_args_init(&args); + args.mda_devsw = &sa_cdevsw; + args.mda_si_drv1 = softc->periph; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0660; + + args.mda_unit = SAMINOR(SA_CTLDEV, SA_ATYPE_R); + error = make_dev_s(&args, &softc->devs.ctl_dev, "%s%d.ctl", + periph->periph_name, periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + return (CAM_REQ_CMP_ERR); + } sasetupdev(softc, softc->devs.ctl_dev); - softc->devs.r_dev = make_dev(&sa_cdevsw, SAMINOR(SA_NOT_CTLDEV, - SA_ATYPE_R), UID_ROOT, GID_OPERATOR, - 0660, "%s%d", periph->periph_name, periph->unit_number); + args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_R); + error = make_dev_s(&args, &softc->devs.r_dev, "%s%d", + periph->periph_name, periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + return (CAM_REQ_CMP_ERR); + } sasetupdev(softc, softc->devs.r_dev); - softc->devs.nr_dev = make_dev(&sa_cdevsw, SAMINOR(SA_NOT_CTLDEV, - SA_ATYPE_NR), UID_ROOT, GID_OPERATOR, - 0660, "n%s%d", periph->periph_name, periph->unit_number); + args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_NR); + error = make_dev_s(&args, &softc->devs.nr_dev, "n%s%d", + periph->periph_name, periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + return (CAM_REQ_CMP_ERR); + } sasetupdev(softc, softc->devs.nr_dev); - softc->devs.er_dev = make_dev(&sa_cdevsw, SAMINOR(SA_NOT_CTLDEV, - SA_ATYPE_ER), UID_ROOT, GID_OPERATOR, - 0660, "e%s%d", periph->periph_name, periph->unit_number); - sasetupdev(softc, softc->devs.er_dev); + args.mda_unit = SAMINOR(SA_NOT_CTLDEV, SA_ATYPE_ER); + error = make_dev_s(&args, &softc->devs.er_dev, "e%s%d", + periph->periph_name, periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + return (CAM_REQ_CMP_ERR); + } + sasetupdev(softc, softc->devs.er_dev); cam_periph_lock(periph); softc->density_type_bits[0] = 0; softc->density_type_bits[1] = SRDS_MEDIA; softc->density_type_bits[2] = SRDS_MEDIUM_TYPE; softc->density_type_bits[3] = SRDS_MEDIUM_TYPE | SRDS_MEDIA; /* * Bump the peripheral refcount for the sysctl thread, in case we * get invalidated before the thread has a chance to run. */ cam_periph_acquire(periph); taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add an async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, saasync, periph, periph->path); xpt_announce_periph(periph, NULL); xpt_announce_quirks(periph, softc->quirks, SA_QUIRK_BIT_STRING); return (CAM_REQ_CMP); } static void sastart(struct cam_periph *periph, union ccb *start_ccb) { struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("sastart\n")); switch (softc->state) { case SA_STATE_NORMAL: { /* Pull a buffer from the queue and get going on it */ struct bio *bp; /* * See if there is a buf with work for us to do.. */ bp = bioq_first(&softc->bio_queue); if (bp == NULL) { xpt_release_ccb(start_ccb); } else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) { struct bio *done_bp; again: softc->queue_count--; bioq_remove(&softc->bio_queue, bp); bp->bio_resid = bp->bio_bcount; done_bp = bp; if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) { /* * We have two different behaviors for * writes when we hit either Early Warning * or the PEWZ (Programmable Early Warning * Zone). The default behavior is that * for all writes that are currently * queued after the write where we saw the * early warning, we will return the write * with the residual equal to the count. * i.e. tell the application that 0 bytes * were written. * * The alternate behavior, which is enabled * when eot_warn is set, is that in * addition to setting the residual equal * to the count, we will set the error * to ENOSPC. * * In either case, once queued writes are * cleared out, we clear the error flag * (see below) and the application is free to * attempt to write more. */ if (softc->eot_warn != 0) { bp->bio_flags |= BIO_ERROR; bp->bio_error = ENOSPC; } else bp->bio_error = 0; } else if ((softc->flags & SA_FLAG_EOF_PENDING) != 0) { /* * This can only happen if we're reading * in fixed length mode. In this case, * we dump the rest of the list the * same way. */ bp->bio_error = 0; if (bioq_first(&softc->bio_queue) != NULL) { biodone(done_bp); goto again; } } else if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } bp = bioq_first(&softc->bio_queue); /* * Only if we have no other buffers queued up * do we clear the pending error flag. */ if (bp == NULL) softc->flags &= ~SA_FLAG_ERR_PENDING; CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("sastart- ERR_PENDING now 0x%x, bp is %sNULL, " "%d more buffers queued up\n", (softc->flags & SA_FLAG_ERR_PENDING), (bp != NULL)? "not " : " ", softc->queue_count)); xpt_release_ccb(start_ccb); biodone(done_bp); } else { u_int32_t length; bioq_remove(&softc->bio_queue, bp); softc->queue_count--; length = bp->bio_bcount; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->blk_shift != 0) { length = length >> softc->blk_shift; } else if (softc->media_blksize != 0) { length = length / softc->media_blksize; } else { bp->bio_error = EIO; xpt_print(periph->path, "zero blocksize" " for FIXED length writes?\n"); biodone(bp); break; } #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d fixed record %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } else { #if 0 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, ("issuing a %d variable byte %s\n", length, (bp->bio_cmd == BIO_READ)? "read" : "write")); #endif } devstat_start_transaction_bio(softc->device_stats, bp); /* * Some people have theorized that we should * suppress illegal length indication if we are * running in variable block mode so that we don't * have to request sense every time our requested * block size is larger than the written block. * The residual information from the ccb allows * us to identify this situation anyway. The only * problem with this is that we will not get * information about blocks that are larger than * our read buffer unless we set the block size * in the mode page to something other than 0. * * I believe that this is a non-issue. If user apps * don't adjust their read size to match our record * size, that's just life. Anyway, the typical usage * would be to issue, e.g., 64KB reads and occasionally * have to do deal with 512 byte or 1KB intermediate * records. * * That said, though, we now support setting the * SILI bit on reads, and we set the blocksize to 4 * bytes when we do that. This gives us * compatibility with software that wants this, * although the only real difference between that * and not setting the SILI bit on reads is that we * won't get a check condition on reads where our * request size is larger than the block on tape. * That probably only makes a real difference in * non-packetized SCSI, where you have to go back * to the drive to request sense and thus incur * more latency. */ softc->dsreg = (bp->bio_cmd == BIO_READ)? MTIO_DSREG_RD : MTIO_DSREG_WR; scsi_sa_read_write(&start_ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, (bp->bio_cmd == BIO_READ ? SCSI_RW_READ : SCSI_RW_WRITE) | ((bp->bio_flags & BIO_UNMAPPED) != 0 ? SCSI_RW_BIO : 0), softc->sili, (softc->flags & SA_FLAG_FIXED) != 0, length, (bp->bio_flags & BIO_UNMAPPED) != 0 ? (void *)bp : bp->bio_data, bp->bio_bcount, SSD_FULL_SIZE, IO_TIMEOUT); start_ccb->ccb_h.ccb_pflags &= ~SA_POSITION_UPDATED; start_ccb->ccb_h.ccb_bp = bp; bp = bioq_first(&softc->bio_queue); xpt_action(start_ccb); } if (bp != NULL) { /* Have more work to do, so ensure we stay scheduled */ xpt_schedule(periph, CAM_PRIORITY_NORMAL); } break; } case SA_STATE_ABNORMAL: default: panic("state 0x%x in sastart", softc->state); break; } } static void sadone(struct cam_periph *periph, union ccb *done_ccb) { struct sa_softc *softc; struct ccb_scsiio *csio; struct bio *bp; int error; softc = (struct sa_softc *)periph->softc; csio = &done_ccb->csio; softc->dsreg = MTIO_DSREG_REST; bp = (struct bio *)done_ccb->ccb_h.ccb_bp; error = 0; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if ((error = saerror(done_ccb, 0, 0)) == ERESTART) { /* * A retry was scheduled, so just return. */ return; } } if (error == EIO) { /* * Catastrophic error. Mark the tape as frozen * (we no longer know tape position). * * Return all queued I/O with EIO, and unfreeze * our queue so that future transactions that * attempt to fix this problem can get to the * device. * */ softc->flags |= SA_FLAG_TAPE_FROZEN; bioq_flush(&softc->bio_queue, NULL, EIO); } if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_error = error; bp->bio_flags |= BIO_ERROR; /* * In the error case, position is updated in saerror. */ } else { bp->bio_resid = csio->resid; bp->bio_error = 0; if (csio->resid != 0) { bp->bio_flags |= BIO_ERROR; } if (bp->bio_cmd == BIO_WRITE) { softc->flags |= SA_FLAG_TAPE_WRITTEN; softc->filemarks = 0; } if (!(csio->ccb_h.ccb_pflags & SA_POSITION_UPDATED) && (softc->blkno != (daddr_t) -1)) { if ((softc->flags & SA_FLAG_FIXED) != 0) { u_int32_t l; if (softc->blk_shift != 0) { l = bp->bio_bcount >> softc->blk_shift; } else { l = bp->bio_bcount / softc->media_blksize; } softc->blkno += (daddr_t) l; } else { softc->blkno++; } } } /* * If we had an error (immediate or pending), * release the device queue now. */ if (error || (softc->flags & SA_FLAG_ERR_PENDING)) cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); if (error || bp->bio_resid) { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("error %d resid %ld count %ld\n", error, bp->bio_resid, bp->bio_bcount)); } biofinish(bp, softc->device_stats, 0); xpt_release_ccb(done_ccb); } /* * Mount the tape (make sure it's ready for I/O). */ static int samount(struct cam_periph *periph, int oflags, struct cdev *dev) { struct sa_softc *softc; union ccb *ccb; int error; /* * oflags can be checked for 'kind' of open (read-only check) - later * dev can be checked for a control-mode or compression open - later */ UNUSED_PARAMETER(oflags); UNUSED_PARAMETER(dev); softc = (struct sa_softc *)periph->softc; /* * This should determine if something has happend since the last * open/mount that would invalidate the mount. We do *not* want * to retry this command- we just want the status. But we only * do this if we're mounted already- if we're not mounted, * we don't care about the unit read state and can instead use * this opportunity to attempt to reserve the tape unit. */ if (softc->flags & SA_FLAG_TAPE_MOUNTED) { ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error == ENXIO) { softc->flags &= ~SA_FLAG_TAPE_MOUNTED; scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } else if (error) { /* * We don't need to freeze the tape because we * will now attempt to rewind/load it. */ softc->flags &= ~SA_FLAG_TAPE_MOUNTED; if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { xpt_print(periph->path, "error %d on TUR in samount\n", error); } } } else { error = sareservereleaseunit(periph, TRUE); if (error) { return (error); } ccb = cam_periph_getccb(periph, 1); scsi_test_unit_ready(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, IO_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { struct scsi_read_block_limits_data *rblim = NULL; int comp_enabled, comp_supported; u_int8_t write_protect, guessing = 0; /* * Clear out old state. */ softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN| SA_FLAG_ERR_PENDING|SA_FLAG_COMPRESSION); softc->filemarks = 0; /* * *Very* first off, make sure we're loaded to BOT. */ scsi_load_unload(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, 1, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); /* * In case this doesn't work, do a REWIND instead */ if (error) { scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); } if (error) { xpt_release_ccb(ccb); goto exit; } /* * Do a dummy test read to force access to the * media so that the drive will really know what's * there. We actually don't really care what the * blocksize on tape is and don't expect to really * read a full record. */ rblim = (struct scsi_read_block_limits_data *) malloc(8192, M_SCSISA, M_NOWAIT); if (rblim == NULL) { xpt_print(periph->path, "no memory for test read\n"); xpt_release_ccb(ccb); error = ENOMEM; goto exit; } if ((softc->quirks & SA_QUIRK_NODREAD) == 0) { scsi_sa_read_write(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, 1, FALSE, 0, 8192, (void *) rblim, 8192, SSD_FULL_SIZE, IO_TIMEOUT); (void) cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); scsi_rewind(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); if (error) { xpt_print(periph->path, "unable to rewind after test read\n"); xpt_release_ccb(ccb); goto exit; } } /* * Next off, determine block limits. */ scsi_read_block_limits(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, rblim, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, CAM_RETRY_SELTO, SF_NO_PRINT | SF_RETRY_UA, softc->device_stats); xpt_release_ccb(ccb); if (error != 0) { /* * If it's less than SCSI-2, READ BLOCK LIMITS is not * a MANDATORY command. Anyway- it doesn't matter- * we can proceed anyway. */ softc->blk_gran = 0; softc->max_blk = ~0; softc->min_blk = 0; } else { if (softc->scsi_rev >= SCSI_REV_SPC) { softc->blk_gran = RBL_GRAN(rblim); } else { softc->blk_gran = 0; } /* * We take max_blk == min_blk to mean a default to * fixed mode- but note that whatever we get out of * sagetparams below will actually determine whether * we are actually *in* fixed mode. */ softc->max_blk = scsi_3btoul(rblim->maximum); softc->min_blk = scsi_2btoul(rblim->minimum); } /* * Next, perform a mode sense to determine * current density, blocksize, compression etc. */ error = sagetparams(periph, SA_PARAM_ALL, &softc->media_blksize, &softc->media_density, &softc->media_numblks, &softc->buffer_mode, &write_protect, &softc->speed, &comp_supported, &comp_enabled, &softc->comp_algorithm, NULL, NULL, 0, 0); if (error != 0) { /* * We could work a little harder here. We could * adjust our attempts to get information. It * might be an ancient tape drive. If someone * nudges us, we'll do that. */ goto exit; } /* * If no quirk has determined that this is a device that is * preferred to be in fixed or variable mode, now is the time * to find out. */ if ((softc->quirks & (SA_QUIRK_FIXED|SA_QUIRK_VARIABLE)) == 0) { guessing = 1; /* * This could be expensive to find out. Luckily we * only need to do this once. If we start out in * 'default' mode, try and set ourselves to one * of the densities that would determine a wad * of other stuff. Go from highest to lowest. */ if (softc->media_density == SCSI_DEFAULT_DENSITY) { int i; static u_int8_t ctry[] = { SCSI_DENSITY_HALFINCH_PE, SCSI_DENSITY_HALFINCH_6250C, SCSI_DENSITY_HALFINCH_6250, SCSI_DENSITY_HALFINCH_1600, SCSI_DENSITY_HALFINCH_800, SCSI_DENSITY_QIC_4GB, SCSI_DENSITY_QIC_2GB, SCSI_DENSITY_QIC_525_320, SCSI_DENSITY_QIC_150, SCSI_DENSITY_QIC_120, SCSI_DENSITY_QIC_24, SCSI_DENSITY_QIC_11_9TRK, SCSI_DENSITY_QIC_11_4TRK, SCSI_DENSITY_QIC_1320, SCSI_DENSITY_QIC_3080, 0 }; for (i = 0; ctry[i]; i++) { error = sasetparams(periph, SA_PARAM_DENSITY, 0, ctry[i], 0, SF_NO_PRINT); if (error == 0) { softc->media_density = ctry[i]; break; } } } switch (softc->media_density) { case SCSI_DENSITY_QIC_11_4TRK: case SCSI_DENSITY_QIC_11_9TRK: case SCSI_DENSITY_QIC_24: case SCSI_DENSITY_QIC_120: case SCSI_DENSITY_QIC_150: case SCSI_DENSITY_QIC_525_320: case SCSI_DENSITY_QIC_1320: case SCSI_DENSITY_QIC_3080: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 512; break; case SCSI_DENSITY_QIC_4GB: case SCSI_DENSITY_QIC_2GB: softc->quirks &= ~SA_QUIRK_2FM; softc->quirks |= SA_QUIRK_FIXED|SA_QUIRK_1FM; softc->last_media_blksize = 1024; break; default: softc->last_media_blksize = softc->media_blksize; softc->quirks |= SA_QUIRK_VARIABLE; break; } } /* * If no quirk has determined that this is a device that needs * to have 2 Filemarks at EOD, now is the time to find out. */ if ((softc->quirks & SA_QUIRK_2FM) == 0) { switch (softc->media_density) { case SCSI_DENSITY_HALFINCH_800: case SCSI_DENSITY_HALFINCH_1600: case SCSI_DENSITY_HALFINCH_6250: case SCSI_DENSITY_HALFINCH_6250C: case SCSI_DENSITY_HALFINCH_PE: softc->quirks &= ~SA_QUIRK_1FM; softc->quirks |= SA_QUIRK_2FM; break; default: break; } } /* * Now validate that some info we got makes sense. */ if ((softc->max_blk < softc->media_blksize) || (softc->min_blk > softc->media_blksize && softc->media_blksize)) { xpt_print(periph->path, "BLOCK LIMITS (%d..%d) could not match current " "block settings (%d)- adjusting\n", softc->min_blk, softc->max_blk, softc->media_blksize); softc->max_blk = softc->min_blk = softc->media_blksize; } /* * Now put ourselves into the right frame of mind based * upon quirks... */ tryagain: /* * If we want to be in FIXED mode and our current blocksize * is not equal to our last blocksize (if nonzero), try and * set ourselves to this last blocksize (as the 'preferred' * block size). The initial quirkmatch at registry sets the * initial 'last' blocksize. If, for whatever reason, this * 'last' blocksize is zero, set the blocksize to 512, * or min_blk if that's larger. */ if ((softc->quirks & SA_QUIRK_FIXED) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0 && (softc->media_blksize != softc->last_media_blksize)) { softc->media_blksize = softc->last_media_blksize; if (softc->media_blksize == 0) { softc->media_blksize = 512; if (softc->media_blksize < softc->min_blk) { softc->media_blksize = softc->min_blk; } } error = sasetparams(periph, SA_PARAM_BLOCKSIZE, softc->media_blksize, 0, 0, SF_NO_PRINT); if (error) { xpt_print(periph->path, "unable to set fixed blocksize to %d\n", softc->media_blksize); goto exit; } } if ((softc->quirks & SA_QUIRK_VARIABLE) && (softc->media_blksize != 0)) { softc->last_media_blksize = softc->media_blksize; softc->media_blksize = 0; error = sasetparams(periph, SA_PARAM_BLOCKSIZE, 0, 0, 0, SF_NO_PRINT); if (error) { /* * If this fails and we were guessing, just * assume that we got it wrong and go try * fixed block mode. Don't even check against * density code at this point. */ if (guessing) { softc->quirks &= ~SA_QUIRK_VARIABLE; softc->quirks |= SA_QUIRK_FIXED; if (softc->last_media_blksize == 0) softc->last_media_blksize = 512; goto tryagain; } xpt_print(periph->path, "unable to set variable blocksize\n"); goto exit; } } /* * Now that we have the current block size, * set up some parameters for sastart's usage. */ if (softc->media_blksize) { softc->flags |= SA_FLAG_FIXED; if (powerof2(softc->media_blksize)) { softc->blk_shift = ffs(softc->media_blksize) - 1; softc->blk_mask = softc->media_blksize - 1; } else { softc->blk_mask = ~0; softc->blk_shift = 0; } } else { /* * The SCSI-3 spec allows 0 to mean "unspecified". * The SCSI-1 spec allows 0 to mean 'infinite'. * * Either works here. */ if (softc->max_blk == 0) { softc->max_blk = ~0; } softc->blk_shift = 0; if (softc->blk_gran != 0) { softc->blk_mask = softc->blk_gran - 1; } else { softc->blk_mask = 0; } } if (write_protect) softc->flags |= SA_FLAG_TAPE_WP; if (comp_supported) { if (softc->saved_comp_algorithm == 0) softc->saved_comp_algorithm = softc->comp_algorithm; softc->flags |= SA_FLAG_COMP_SUPP; if (comp_enabled) softc->flags |= SA_FLAG_COMP_ENABLED; } else softc->flags |= SA_FLAG_COMP_UNSUPP; if ((softc->buffer_mode == SMH_SA_BUF_MODE_NOBUF) && (softc->quirks & SA_QUIRK_NO_MODESEL) == 0) { error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0, SF_NO_PRINT); if (error == 0) { softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF; } else { xpt_print(periph->path, "unable to set buffered mode\n"); } error = 0; /* not an error */ } if (error == 0) { softc->flags |= SA_FLAG_TAPE_MOUNTED; } exit: if (rblim != NULL) free(rblim, M_SCSISA); if (error != 0) { softc->dsreg = MTIO_DSREG_NIL; } else { softc->fileno = softc->blkno = 0; softc->rep_fileno = softc->rep_blkno = -1; softc->partition = 0; softc->dsreg = MTIO_DSREG_REST; } #ifdef SA_1FM_AT_EOD if ((softc->quirks & SA_QUIRK_2FM) == 0) softc->quirks |= SA_QUIRK_1FM; #else if ((softc->quirks & SA_QUIRK_1FM) == 0) softc->quirks |= SA_QUIRK_2FM; #endif } else xpt_release_ccb(ccb); /* * If we return an error, we're not mounted any more, * so release any device reservation. */ if (error != 0) { (void) sareservereleaseunit(periph, FALSE); } else { /* * Clear I/O residual. */ softc->last_io_resid = 0; softc->last_ctl_resid = 0; } return (error); } /* * How many filemarks do we need to write if we were to terminate the * tape session right now? Note that this can be a negative number */ static int samarkswanted(struct cam_periph *periph) { int markswanted; struct sa_softc *softc; softc = (struct sa_softc *)periph->softc; markswanted = 0; if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) { markswanted++; if (softc->quirks & SA_QUIRK_2FM) markswanted++; } markswanted -= softc->filemarks; return (markswanted); } static int sacheckeod(struct cam_periph *periph) { int error; int markswanted; markswanted = samarkswanted(periph); if (markswanted > 0) { error = sawritefilemarks(periph, markswanted, FALSE, FALSE); } else { error = 0; } return (error); } static int saerror(union ccb *ccb, u_int32_t cflgs, u_int32_t sflgs) { static const char *toobig = "%d-byte tape record bigger than supplied buffer\n"; struct cam_periph *periph; struct sa_softc *softc; struct ccb_scsiio *csio; struct scsi_sense_data *sense; uint64_t resid = 0; int64_t info = 0; cam_status status; int error_code, sense_key, asc, ascq, error, aqvalid, stream_valid; int sense_len; uint8_t stream_bits; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sa_softc *)periph->softc; csio = &ccb->csio; sense = &csio->sense_data; sense_len = csio->sense_len - csio->sense_resid; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (asc != -1 && ascq != -1) aqvalid = 1; else aqvalid = 0; if (scsi_get_stream_info(sense, sense_len, NULL, &stream_bits) == 0) stream_valid = 1; else stream_valid = 0; error = 0; status = csio->ccb_h.status & CAM_STATUS_MASK; /* * Calculate/latch up, any residuals... We do this in a funny 2-step * so we can print stuff here if we have CAM_DEBUG enabled for this * unit. */ if (status == CAM_SCSI_STATUS_ERROR) { if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &resid, &info) == 0) { if ((softc->flags & SA_FLAG_FIXED) != 0) resid *= softc->media_blksize; } else { resid = csio->dxfer_len; info = resid; if ((softc->flags & SA_FLAG_FIXED) != 0) { if (softc->media_blksize) info /= softc->media_blksize; } } if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { bcopy((caddr_t) sense, (caddr_t) &softc->last_io_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_io_cdb, (int) csio->cdb_len); softc->last_io_resid = resid; softc->last_resid_was_io = 1; } else { bcopy((caddr_t) sense, (caddr_t) &softc->last_ctl_sense, sizeof (struct scsi_sense_data)); bcopy(csio->cdb_io.cdb_bytes, softc->last_ctl_cdb, (int) csio->cdb_len); softc->last_ctl_resid = resid; softc->last_resid_was_io = 0; } CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("CDB[0]=0x%x Key 0x%x " "ASC/ASCQ 0x%x/0x%x CAM STATUS 0x%x flags 0x%x resid %jd " "dxfer_len %d\n", csio->cdb_io.cdb_bytes[0] & 0xff, sense_key, asc, ascq, status, (stream_valid) ? stream_bits : 0, (intmax_t)resid, csio->dxfer_len)); } else { CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Cam Status 0x%x\n", status)); } switch (status) { case CAM_REQ_CMP: return (0); case CAM_SCSI_STATUS_ERROR: /* * If a read/write command, we handle it here. */ if (csio->cdb_io.cdb_bytes[0] == SA_READ || csio->cdb_io.cdb_bytes[0] == SA_WRITE) { break; } /* * If this was just EOM/EOP, Filemark, Setmark or ILI detected * on a non read/write command, we assume it's not an error * and propagate the residule and return. */ if ((aqvalid && asc == 0 && ascq > 0 && ascq <= 5) || (aqvalid == 0 && sense_key == SSD_KEY_NO_SENSE)) { csio->resid = resid; QFRLS(ccb); return (0); } /* * Otherwise, we let the common code handle this. */ return (cam_periph_error(ccb, cflgs, sflgs, &softc->saved_ccb)); /* * XXX: To Be Fixed * We cannot depend upon CAM honoring retry counts for these. */ case CAM_SCSI_BUS_RESET: case CAM_BDR_SENT: if (ccb->ccb_h.retry_count <= 0) { return (EIO); } /* FALLTHROUGH */ default: return (cam_periph_error(ccb, cflgs, sflgs, &softc->saved_ccb)); } /* * Handle filemark, end of tape, mismatched record sizes.... * From this point out, we're only handling read/write cases. * Handle writes && reads differently. */ if (csio->cdb_io.cdb_bytes[0] == SA_WRITE) { if (sense_key == SSD_KEY_VOLUME_OVERFLOW) { csio->resid = resid; error = ENOSPC; } else if ((stream_valid != 0) && (stream_bits & SSD_EOM)) { softc->flags |= SA_FLAG_EOM_PENDING; /* * Grotesque as it seems, the few times * I've actually seen a non-zero resid, * the tape drive actually lied and had * written all the data!. */ csio->resid = 0; } } else { csio->resid = resid; if (sense_key == SSD_KEY_BLANK_CHECK) { if (softc->quirks & SA_QUIRK_1FM) { error = 0; softc->flags |= SA_FLAG_EOM_PENDING; } else { error = EIO; } } else if ((stream_valid != 0) && (stream_bits & SSD_FILEMARK)){ if (softc->flags & SA_FLAG_FIXED) { error = -1; softc->flags |= SA_FLAG_EOF_PENDING; } /* * Unconditionally, if we detected a filemark on a read, * mark that we've run moved a file ahead. */ if (softc->fileno != (daddr_t) -1) { softc->fileno++; softc->blkno = 0; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } /* * Incorrect Length usually applies to read, but can apply to writes. */ if (error == 0 && (stream_valid != 0) && (stream_bits & SSD_ILI)) { if (info < 0) { xpt_print(csio->ccb_h.path, toobig, csio->dxfer_len - info); csio->resid = csio->dxfer_len; error = EIO; } else { csio->resid = resid; if (softc->flags & SA_FLAG_FIXED) { softc->flags |= SA_FLAG_EIO_PENDING; } /* * Bump the block number if we hadn't seen a filemark. * Do this independent of errors (we've moved anyway). */ if ((stream_valid == 0) || (stream_bits & SSD_FILEMARK) == 0) { if (softc->blkno != (daddr_t) -1) { softc->blkno++; csio->ccb_h.ccb_pflags |= SA_POSITION_UPDATED; } } } } if (error <= 0) { /* * Unfreeze the queue if frozen as we're not returning anything * to our waiters that would indicate an I/O error has occurred * (yet). */ QFRLS(ccb); error = 0; } return (error); } static int sagetparams(struct cam_periph *periph, sa_params params_to_get, u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, sa_comp_t *tcs, struct scsi_control_data_prot_subpage *prot_page, int dp_size, int prot_changeable) { union ccb *ccb; void *mode_buffer; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; int mode_buffer_len; struct sa_softc *softc; u_int8_t cpage; int error; cam_status status; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); if (softc->quirks & SA_QUIRK_NO_CPAGE) cpage = SA_DEVICE_CONFIGURATION_PAGE; else cpage = SA_DATA_COMPRESSION_PAGE; retry: mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_get & SA_PARAM_COMPRESSION) { if (softc->quirks & SA_QUIRK_NOCOMP) { *comp_supported = FALSE; params_to_get &= ~SA_PARAM_COMPRESSION; } else mode_buffer_len += sizeof (sa_comp_t); } /* XXX Fix M_NOWAIT */ mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { xpt_release_ccb(ccb); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; /* it is safe to retry this */ scsi_mode_sense(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, (params_to_get & SA_PARAM_COMPRESSION) ? cpage : SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; if (error == EINVAL && (params_to_get & SA_PARAM_COMPRESSION) != 0) { /* * Hmm. Let's see if we can try another page... * If we've already done that, give up on compression * for this device and remember this for the future * and attempt the request without asking for compression * info. */ if (cpage == SA_DATA_COMPRESSION_PAGE) { cpage = SA_DEVICE_CONFIGURATION_PAGE; goto retry; } softc->quirks |= SA_QUIRK_NOCOMP; free(mode_buffer, M_SCSISA); goto retry; } else if (status == CAM_SCSI_STATUS_ERROR) { /* Tell the user about the fatal error. */ scsi_sense_print(&ccb->csio); goto sagetparamsexit; } /* * If the user only wants the compression information, and * the device doesn't send back the block descriptor, it's * no big deal. If the user wants more than just * compression, though, and the device doesn't pass back the * block descriptor, we need to send another mode sense to * get the block descriptor. */ if ((mode_hdr->blk_desc_len == 0) && (params_to_get & SA_PARAM_COMPRESSION) && (params_to_get & ~(SA_PARAM_COMPRESSION))) { /* * Decrease the mode buffer length by the size of * the compression page, to make sure the data * there doesn't get overwritten. */ mode_buffer_len -= sizeof (sa_comp_t); /* * Now move the compression page that we presumably * got back down the memory chunk a little bit so * it doesn't get spammed. */ bcopy(&mode_hdr[0], &mode_hdr[1], sizeof (sa_comp_t)); bzero(&mode_hdr[0], sizeof (mode_hdr[0])); /* * Now, we issue another mode sense and just ask * for the block descriptor, etc. */ scsi_mode_sense(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SMS_PAGE_CTRL_CURRENT, SMS_VENDOR_SPECIFIC_PAGE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) goto sagetparamsexit; } if (params_to_get & SA_PARAM_BLOCKSIZE) *blocksize = scsi_3btoul(mode_blk->blklen); if (params_to_get & SA_PARAM_NUMBLOCKS) *numblocks = scsi_3btoul(mode_blk->nblocks); if (params_to_get & SA_PARAM_BUFF_MODE) *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK; if (params_to_get & SA_PARAM_DENSITY) *density = mode_blk->density; if (params_to_get & SA_PARAM_WP) *write_protect = (mode_hdr->dev_spec & SMH_SA_WP)? TRUE : FALSE; if (params_to_get & SA_PARAM_SPEED) *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK; if (params_to_get & SA_PARAM_COMPRESSION) { sa_comp_t *ntcs = (sa_comp_t *) &mode_blk[1]; if (cpage == SA_DATA_COMPRESSION_PAGE) { struct scsi_data_compression_page *cp = &ntcs->dcomp; *comp_supported = (cp->dce_and_dcc & SA_DCP_DCC)? TRUE : FALSE; *comp_enabled = (cp->dce_and_dcc & SA_DCP_DCE)? TRUE : FALSE; *comp_algorithm = scsi_4btoul(cp->comp_algorithm); } else { struct scsi_dev_conf_page *cp = &ntcs->dconf; /* * We don't really know whether this device supports * Data Compression if the algorithm field is * zero. Just say we do. */ *comp_supported = TRUE; *comp_enabled = (cp->sel_comp_alg != SA_COMP_NONE)? TRUE : FALSE; *comp_algorithm = cp->sel_comp_alg; } if (tcs != NULL) bcopy(ntcs, tcs, sizeof (sa_comp_t)); } if ((params_to_get & SA_PARAM_DENSITY_EXT) && (softc->scsi_rev >= SCSI_REV_SPC)) { int i; for (i = 0; i < SA_DENSITY_TYPES; i++) { scsi_report_density_support(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*media*/ softc->density_type_bits[i] & SRDS_MEDIA, /*medium_type*/ softc->density_type_bits[i] & SRDS_MEDIUM_TYPE, /*data_ptr*/ softc->density_info[i], /*length*/ sizeof(softc->density_info[i]), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ REP_DENSITY_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); status = ccb->ccb_h.status & CAM_STATUS_MASK; /* * Some tape drives won't support this command at * all, but hopefully we'll minimize that with the * check for SPC or greater support above. If they * don't support the default report (neither the * MEDIA or MEDIUM_TYPE bits set), then there is * really no point in continuing on to look for * other reports. */ if ((error != 0) || (status != CAM_REQ_CMP)) { error = 0; softc->density_info_valid[i] = 0; if (softc->density_type_bits[i] == 0) break; else continue; } softc->density_info_valid[i] = ccb->csio.dxfer_len - ccb->csio.resid; } } /* * Get logical block protection parameters if the drive supports it. */ if ((params_to_get & SA_PARAM_LBP) && (softc->flags & SA_FLAG_PROTECT_SUPP)) { struct scsi_mode_header_10 *mode10_hdr; struct scsi_control_data_prot_subpage *dp_page; struct scsi_mode_sense_10 *cdb; struct sa_prot_state *prot; int dp_len, returned_len; if (dp_size == 0) dp_size = sizeof(*dp_page); dp_len = sizeof(*mode10_hdr) + dp_size; mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto sagetparamsexit; } scsi_mode_sense_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*dbd*/ TRUE, /*page_code*/ (prot_changeable == 0) ? SMS_PAGE_CTRL_CURRENT : SMS_PAGE_CTRL_CHANGEABLE, /*page*/ SMS_CONTROL_MODE_PAGE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); /* * XXX KDM we need to be able to set the subpage in the * fill function. */ cdb = (struct scsi_mode_sense_10 *)ccb->csio.cdb_io.cdb_bytes; cdb->subpage = SA_CTRL_DP_SUBPAGE_CODE; error = cam_periph_runccb(ccb, saerror, 0, SF_NO_PRINT, softc->device_stats); if (error != 0) { free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if (status != CAM_REQ_CMP) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } /* * The returned data length at least has to be long enough * for us to look at length in the mode page header. */ returned_len = ccb->csio.dxfer_len - ccb->csio.resid; if (returned_len < sizeof(mode10_hdr->data_length)) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } returned_len = min(returned_len, sizeof(mode10_hdr->data_length) + scsi_2btoul(mode10_hdr->data_length)); dp_page = (struct scsi_control_data_prot_subpage *) &mode10_hdr[1]; /* * We also have to have enough data to include the prot_bits * in the subpage. */ if (returned_len < (sizeof(*mode10_hdr) + __offsetof(struct scsi_control_data_prot_subpage, prot_bits) + sizeof(dp_page->prot_bits))) { error = EINVAL; free(mode10_hdr, M_SCSISA); goto sagetparamsexit; } prot = &softc->prot_info.cur_prot_state; prot->prot_method = dp_page->prot_method; prot->pi_length = dp_page->pi_length & SA_CTRL_DP_PI_LENGTH_MASK; prot->lbp_w = (dp_page->prot_bits & SA_CTRL_DP_LBP_W) ? 1 :0; prot->lbp_r = (dp_page->prot_bits & SA_CTRL_DP_LBP_R) ? 1 :0; prot->rbdp = (dp_page->prot_bits & SA_CTRL_DP_RBDP) ? 1 :0; prot->initialized = 1; if (prot_page != NULL) bcopy(dp_page, prot_page, min(sizeof(*prot_page), sizeof(*dp_page))); free(mode10_hdr, M_SCSISA); } if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Mode Sense Data="); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } sagetparamsexit: xpt_release_ccb(ccb); free(mode_buffer, M_SCSISA); return (error); } /* * Set protection information to the pending protection information stored * in the softc. */ static int sasetprot(struct cam_periph *periph, struct sa_prot_state *new_prot) { struct sa_softc *softc; struct scsi_control_data_prot_subpage *dp_page, *dp_changeable; struct scsi_mode_header_10 *mode10_hdr, *mode10_changeable; union ccb *ccb; uint8_t current_speed; size_t dp_size, dp_page_length; int dp_len, buff_mode; int error; softc = (struct sa_softc *)periph->softc; mode10_hdr = NULL; mode10_changeable = NULL; ccb = NULL; /* * Start off with the size set to the actual length of the page * that we have defined. */ dp_size = sizeof(*dp_changeable); dp_page_length = dp_size - __offsetof(struct scsi_control_data_prot_subpage, prot_method); retry_length: dp_len = sizeof(*mode10_changeable) + dp_size; mode10_changeable = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_changeable == NULL) { error = ENOMEM; goto bailout; } dp_changeable = (struct scsi_control_data_prot_subpage *)&mode10_changeable[1]; /* * First get the data protection page changeable parameters mask. * We need to know which parameters the drive supports changing. * We also need to know what the drive claims that its page length * is. The reason is that IBM drives in particular are very picky * about the page length. They want it (the length set in the * page structure itself) to be 28 bytes, and they want the * parameter list length specified in the mode select header to be * 40 bytes. So, to work with IBM drives as well as any other tape * drive, find out what the drive claims the page length is, and * make sure that we match that. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_changeable, dp_size, /*prot_changeable*/ 1); if (error != 0) goto bailout; if (scsi_2btoul(dp_changeable->length) > dp_page_length) { dp_page_length = scsi_2btoul(dp_changeable->length); dp_size = dp_page_length + __offsetof(struct scsi_control_data_prot_subpage, prot_method); free(mode10_changeable, M_SCSISA); mode10_changeable = NULL; goto retry_length; } mode10_hdr = malloc(dp_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode10_hdr == NULL) { error = ENOMEM; goto bailout; } dp_page = (struct scsi_control_data_prot_subpage *)&mode10_hdr[1]; /* * Now grab the actual current settings in the page. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, /*prot_changeable*/ 0); if (error != 0) goto bailout; /* These two fields need to be 0 for MODE SELECT */ scsi_ulto2b(0, mode10_hdr->data_length); mode10_hdr->medium_type = 0; /* We are not including a block descriptor */ scsi_ulto2b(0, mode10_hdr->blk_desc_len); mode10_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode10_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } /* * For each field, make sure that the drive allows changing it * before bringing in the user's setting. */ if (dp_changeable->prot_method != 0) dp_page->prot_method = new_prot->prot_method; if (dp_changeable->pi_length & SA_CTRL_DP_PI_LENGTH_MASK) { dp_page->pi_length &= ~SA_CTRL_DP_PI_LENGTH_MASK; dp_page->pi_length |= (new_prot->pi_length & SA_CTRL_DP_PI_LENGTH_MASK); } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_W) { if (new_prot->lbp_w) dp_page->prot_bits |= SA_CTRL_DP_LBP_W; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_W; } if (dp_changeable->prot_bits & SA_CTRL_DP_LBP_R) { if (new_prot->lbp_r) dp_page->prot_bits |= SA_CTRL_DP_LBP_R; else dp_page->prot_bits &= ~SA_CTRL_DP_LBP_R; } if (dp_changeable->prot_bits & SA_CTRL_DP_RBDP) { if (new_prot->rbdp) dp_page->prot_bits |= SA_CTRL_DP_RBDP; else dp_page->prot_bits &= ~SA_CTRL_DP_RBDP; } ccb = cam_periph_getccb(periph, 1); scsi_mode_select_len(&ccb->csio, /*retries*/ 5, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*scsi_page_fmt*/ TRUE, /*save_pages*/ FALSE, /*param_buf*/ (uint8_t *)mode10_hdr, /*param_len*/ dp_len, /*minimum_cmd_size*/ 10, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error != 0) goto bailout; if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = EINVAL; goto bailout; } /* * The operation was successful. We could just copy the settings * the user requested, but just in case the drive ignored some of * our settings, let's ask for status again. */ error = sagetparams(periph, SA_PARAM_SPEED | SA_PARAM_LBP, NULL, NULL, NULL, &buff_mode, NULL, ¤t_speed, NULL, NULL, NULL, NULL, dp_page, dp_size, 0); bailout: if (ccb != NULL) xpt_release_ccb(ccb); free(mode10_hdr, M_SCSISA); free(mode10_changeable, M_SCSISA); return (error); } /* * The purpose of this function is to set one of four different parameters * for a tape drive: * - blocksize * - density * - compression / compression algorithm * - buffering mode * * The assumption is that this will be called from saioctl(), and therefore * from a process context. Thus the waiting malloc calls below. If that * assumption ever changes, the malloc calls should be changed to be * NOWAIT mallocs. * * Any or all of the four parameters may be set when this function is * called. It should handle setting more than one parameter at once. */ static int sasetparams(struct cam_periph *periph, sa_params params_to_set, u_int32_t blocksize, u_int8_t density, u_int32_t calg, u_int32_t sense_flags) { struct sa_softc *softc; u_int32_t current_blocksize; u_int32_t current_calg; u_int8_t current_density; u_int8_t current_speed; int comp_enabled, comp_supported; void *mode_buffer; int mode_buffer_len; struct scsi_mode_header_6 *mode_hdr; struct scsi_mode_blk_desc *mode_blk; sa_comp_t *ccomp, *cpage; int buff_mode; union ccb *ccb = NULL; int error; softc = (struct sa_softc *)periph->softc; ccomp = malloc(sizeof (sa_comp_t), M_SCSISA, M_NOWAIT); if (ccomp == NULL) return (ENOMEM); /* * Since it doesn't make sense to set the number of blocks, or * write protection, we won't try to get the current value. We * always want to get the blocksize, so we can set it back to the * proper value. */ error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE | SA_PARAM_SPEED, ¤t_blocksize, ¤t_density, NULL, &buff_mode, NULL, ¤t_speed, &comp_supported, &comp_enabled, ¤t_calg, ccomp, NULL, 0, 0); if (error != 0) { free(ccomp, M_SCSISA); return (error); } mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); if (params_to_set & SA_PARAM_COMPRESSION) mode_buffer_len += sizeof (sa_comp_t); mode_buffer = malloc(mode_buffer_len, M_SCSISA, M_NOWAIT | M_ZERO); if (mode_buffer == NULL) { free(ccomp, M_SCSISA); return (ENOMEM); } mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; ccb = cam_periph_getccb(periph, 1); retry: if (params_to_set & SA_PARAM_COMPRESSION) { if (mode_blk) { cpage = (sa_comp_t *)&mode_blk[1]; } else { cpage = (sa_comp_t *)&mode_hdr[1]; } bcopy(ccomp, cpage, sizeof (sa_comp_t)); cpage->hdr.pagecode &= ~0x80; } else cpage = NULL; /* * If the caller wants us to set the blocksize, use the one they * pass in. Otherwise, use the blocksize we got back from the * mode select above. */ if (mode_blk) { if (params_to_set & SA_PARAM_BLOCKSIZE) scsi_ulto3b(blocksize, mode_blk->blklen); else scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } } /* * For mode selects, these two fields must be zero. */ mode_hdr->data_length = 0; mode_hdr->medium_type = 0; /* set the speed to the current value */ mode_hdr->dev_spec = current_speed; /* if set, set single-initiator buffering mode */ if (softc->buffer_mode == SMH_SA_BUF_MODE_SIBUF) { mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; } if (mode_blk) mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc); else mode_hdr->blk_desc_len = 0; /* * First, if the user wants us to set the compression algorithm or * just turn compression on, check to make sure that this drive * supports compression. */ if (params_to_set & SA_PARAM_COMPRESSION) { /* * If the compression algorithm is 0, disable compression. * If the compression algorithm is non-zero, enable * compression and set the compression type to the * specified compression algorithm, unless the algorithm is * MT_COMP_ENABLE. In that case, we look at the * compression algorithm that is currently set and if it is * non-zero, we leave it as-is. If it is zero, and we have * saved a compression algorithm from a time when * compression was enabled before, set the compression to * the saved value. */ switch (ccomp->hdr.pagecode & ~0x80) { case SA_DEVICE_CONFIGURATION_PAGE: { struct scsi_dev_conf_page *dcp = &cpage->dconf; if (calg == 0) { dcp->sel_comp_alg = SA_COMP_NONE; break; } if (calg != MT_COMP_ENABLE) { dcp->sel_comp_alg = calg; } else if (dcp->sel_comp_alg == SA_COMP_NONE && softc->saved_comp_algorithm != 0) { dcp->sel_comp_alg = softc->saved_comp_algorithm; } break; } case SA_DATA_COMPRESSION_PAGE: if (ccomp->dcomp.dce_and_dcc & SA_DCP_DCC) { struct scsi_data_compression_page *dcp = &cpage->dcomp; if (calg == 0) { /* * Disable compression, but leave the * decompression and the capability bit * alone. */ dcp->dce_and_dcc = SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; break; } /* enable compression && decompression */ dcp->dce_and_dcc = SA_DCP_DCE | SA_DCP_DCC; dcp->dde_and_red |= SA_DCP_DDE; /* * If there, use compression algorithm from caller. * Otherwise, if there's a saved compression algorithm * and there is no current algorithm, use the saved * algorithm. Else parrot back what we got and hope * for the best. */ if (calg != MT_COMP_ENABLE) { scsi_ulto4b(calg, dcp->comp_algorithm); scsi_ulto4b(calg, dcp->decomp_algorithm); } else if (scsi_4btoul(dcp->comp_algorithm) == 0 && softc->saved_comp_algorithm != 0) { scsi_ulto4b(softc->saved_comp_algorithm, dcp->comp_algorithm); scsi_ulto4b(softc->saved_comp_algorithm, dcp->decomp_algorithm); } break; } /* * Compression does not appear to be supported- * at least via the DATA COMPRESSION page. It * would be too much to ask us to believe that * the page itself is supported, but incorrectly * reports an ability to manipulate data compression, * so we'll assume that this device doesn't support * compression. We can just fall through for that. */ /* FALLTHROUGH */ default: /* * The drive doesn't seem to support compression, * so turn off the set compression bit. */ params_to_set &= ~SA_PARAM_COMPRESSION; xpt_print(periph->path, "device does not seem to support compression\n"); /* * If that was the only thing the user wanted us to set, * clean up allocated resources and return with * 'operation not supported'. */ if (params_to_set == SA_PARAM_NONE) { free(mode_buffer, M_SCSISA); xpt_release_ccb(ccb); return (ENODEV); } /* * That wasn't the only thing the user wanted us to set. * So, decrease the stated mode buffer length by the * size of the compression mode page. */ mode_buffer_len -= sizeof(sa_comp_t); } } /* It is safe to retry this operation */ scsi_mode_select(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, (params_to_set & SA_PARAM_COMPRESSION)? TRUE : FALSE, FALSE, mode_buffer, mode_buffer_len, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { int idx; char *xyz = mode_buffer; xpt_print_path(periph->path); printf("Err%d, Mode Select Data=", error); for (idx = 0; idx < mode_buffer_len; idx++) printf(" 0x%02x", xyz[idx] & 0xff); printf("\n"); } if (error) { /* * If we can, try without setting density/blocksize. */ if (mode_blk) { if ((params_to_set & (SA_PARAM_DENSITY|SA_PARAM_BLOCKSIZE)) == 0) { mode_blk = NULL; goto retry; } } else { mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; cpage = (sa_comp_t *)&mode_blk[1]; } /* * If we were setting the blocksize, and that failed, we * want to set it to its original value. If we weren't * setting the blocksize, we don't want to change it. */ scsi_ulto3b(current_blocksize, mode_blk->blklen); /* * Set density if requested, else preserve old density. * SCSI_SAME_DENSITY only applies to SCSI-2 or better * devices, else density we've latched up in our softc. */ if (params_to_set & SA_PARAM_DENSITY) { mode_blk->density = current_density; } else if (softc->scsi_rev > SCSI_REV_CCS) { mode_blk->density = SCSI_SAME_DENSITY; } else { mode_blk->density = softc->media_density; } if (params_to_set & SA_PARAM_COMPRESSION) bcopy(ccomp, cpage, sizeof (sa_comp_t)); /* * The retry count is the only CCB field that might have been * changed that we care about, so reset it back to 1. */ ccb->ccb_h.retry_count = 1; cam_periph_runccb(ccb, saerror, 0, sense_flags, softc->device_stats); } xpt_release_ccb(ccb); if (ccomp != NULL) free(ccomp, M_SCSISA); if (params_to_set & SA_PARAM_COMPRESSION) { if (error) { softc->flags &= ~SA_FLAG_COMP_ENABLED; /* * Even if we get an error setting compression, * do not say that we don't support it. We could * have been wrong, or it may be media specific. * softc->flags &= ~SA_FLAG_COMP_SUPP; */ softc->saved_comp_algorithm = softc->comp_algorithm; softc->comp_algorithm = 0; } else { softc->flags |= SA_FLAG_COMP_ENABLED; softc->comp_algorithm = calg; } } free(mode_buffer, M_SCSISA); return (error); } static int saextget(struct cdev *dev, struct cam_periph *periph, struct sbuf *sb, struct mtextget *g) { int indent, error; char tmpstr[80]; struct sa_softc *softc; int tmpint; uint32_t maxio_tmp; struct ccb_getdev cgd; softc = (struct sa_softc *)periph->softc; error = 0; error = sagetparams_common(dev, periph); if (error) goto extget_bailout; if (!SA_IS_CTRL(dev) && !softc->open_pending_mount) sagetpos(periph); indent = 0; SASBADDNODE(sb, indent, mtextget); /* * Basic CAM peripheral information. */ SASBADDVARSTR(sb, indent, periph->periph_name, %s, periph_name, strlen(periph->periph_name) + 1); SASBADDUINT(sb, indent, periph->unit_number, %u, unit_number); xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if ((cgd.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { g->status = MT_EXT_GET_ERROR; snprintf(g->error_str, sizeof(g->error_str), "Error %#x returned for XPT_GDEV_TYPE CCB", cgd.ccb_h.status); goto extget_bailout; } cam_strvis(tmpstr, cgd.inq_data.vendor, sizeof(cgd.inq_data.vendor), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, vendor, sizeof(cgd.inq_data.vendor) + 1, "SCSI Vendor ID"); cam_strvis(tmpstr, cgd.inq_data.product, sizeof(cgd.inq_data.product), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, product, sizeof(cgd.inq_data.product) + 1, "SCSI Product ID"); cam_strvis(tmpstr, cgd.inq_data.revision, sizeof(cgd.inq_data.revision), sizeof(tmpstr)); SASBADDVARSTRDESC(sb, indent, tmpstr, %s, revision, sizeof(cgd.inq_data.revision) + 1, "SCSI Revision"); if (cgd.serial_num_len > 0) { char *tmpstr2; size_t ts2_len; int ts2_malloc; ts2_len = 0; if (cgd.serial_num_len > sizeof(tmpstr)) { ts2_len = cgd.serial_num_len + 1; ts2_malloc = 1; tmpstr2 = malloc(ts2_len, M_SCSISA, M_WAITOK | M_ZERO); } else { ts2_len = sizeof(tmpstr); ts2_malloc = 0; tmpstr2 = tmpstr; } cam_strvis(tmpstr2, cgd.serial_num, cgd.serial_num_len, ts2_len); SASBADDVARSTRDESC(sb, indent, tmpstr2, %s, serial_num, (ssize_t)cgd.serial_num_len + 1, "Serial Number"); if (ts2_malloc != 0) free(tmpstr2, M_SCSISA); } else { /* * We return a serial_num element in any case, but it will * be empty if the device has no serial number. */ tmpstr[0] = '\0'; SASBADDVARSTRDESC(sb, indent, tmpstr, %s, serial_num, (ssize_t)0, "Serial Number"); } SASBADDUINTDESC(sb, indent, softc->maxio, %u, maxio, "Maximum I/O size allowed by driver and controller"); SASBADDUINTDESC(sb, indent, softc->cpi_maxio, %u, cpi_maxio, "Maximum I/O size reported by controller"); SASBADDUINTDESC(sb, indent, softc->max_blk, %u, max_blk, "Maximum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->min_blk, %u, min_blk, "Minimum block size supported by tape drive and media"); SASBADDUINTDESC(sb, indent, softc->blk_gran, %u, blk_gran, "Block granularity supported by tape drive and media"); maxio_tmp = min(softc->max_blk, softc->maxio); SASBADDUINTDESC(sb, indent, maxio_tmp, %u, max_effective_iosize, "Maximum possible I/O size"); SASBADDINTDESC(sb, indent, softc->flags & SA_FLAG_FIXED ? 1 : 0, %d, fixed_mode, "Set to 1 for fixed block mode, 0 for variable block"); /* * XXX KDM include SIM, bus, target, LUN? */ if (softc->flags & SA_FLAG_COMP_UNSUPP) tmpint = 0; else tmpint = 1; SASBADDINTDESC(sb, indent, tmpint, %d, compression_supported, "Set to 1 if compression is supported, 0 if not"); if (softc->flags & SA_FLAG_COMP_ENABLED) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, indent, tmpint, %d, compression_enabled, "Set to 1 if compression is enabled, 0 if not"); SASBADDUINTDESC(sb, indent, softc->comp_algorithm, %u, compression_algorithm, "Numeric compression algorithm"); safillprot(softc, &indent, sb); SASBADDUINTDESC(sb, indent, softc->media_blksize, %u, media_blocksize, "Block size reported by drive or set by user"); SASBADDINTDESC(sb, indent, (intmax_t)softc->fileno, %jd, calculated_fileno, "Calculated file number, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->blkno, %jd, calculated_rel_blkno, "Calculated block number relative to file, " "set to -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_fileno, %jd, reported_fileno, "File number reported by drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->rep_blkno, %jd, reported_blkno, "Block number relative to BOP/BOT reported by " "drive, -1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->partition, %jd, partition, "Current partition number, 0 is the default"); SASBADDINTDESC(sb, indent, softc->bop, %d, bop, "Set to 1 if drive is at the beginning of partition/tape, 0 if " "not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->eop, %d, eop, "Set to 1 if drive is past early warning, 0 if not, -1 if unknown"); SASBADDINTDESC(sb, indent, softc->bpew, %d, bpew, "Set to 1 if drive is past programmable early warning, 0 if not, " "-1 if unknown"); SASBADDINTDESC(sb, indent, (intmax_t)softc->last_io_resid, %jd, residual, "Residual for the last I/O"); /* * XXX KDM should we send a string with the current driver * status already decoded instead of a numeric value? */ SASBADDINTDESC(sb, indent, softc->dsreg, %d, dsreg, "Current state of the driver"); safilldensitysb(softc, &indent, sb); SASBENDNODE(sb, indent, mtextget); extget_bailout: return (error); } static int saparamget(struct sa_softc *softc, struct sbuf *sb) { int indent; indent = 0; SASBADDNODE(sb, indent, mtparamget); SASBADDINTDESC(sb, indent, softc->sili, %d, sili, "Suppress an error on underlength variable reads"); SASBADDINTDESC(sb, indent, softc->eot_warn, %d, eot_warn, "Return an error to warn that end of tape is approaching"); safillprot(softc, &indent, sb); SASBENDNODE(sb, indent, mtparamget); return (0); } static void saprevent(struct cam_periph *periph, int action) { struct sa_softc *softc; union ccb *ccb; int error, sf; softc = (struct sa_softc *)periph->softc; if ((action == PR_ALLOW) && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0) return; if ((action == PR_PREVENT) && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0) return; /* * We can be quiet about illegal requests. */ if (CAM_DEBUGGED(periph->path, CAM_DEBUG_INFO)) { sf = 0; } else sf = SF_QUIET_IR; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_prevent(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, SCSIOP_TIMEOUT); error = cam_periph_runccb(ccb, saerror, 0, sf, softc->device_stats); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~SA_FLAG_TAPE_LOCKED; else softc->flags |= SA_FLAG_TAPE_LOCKED; } xpt_release_ccb(ccb); } static int sarewind(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_rewind(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = MTIO_DSREG_REW; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; softc->rep_fileno = softc->rep_blkno = (daddr_t) 0; } else { softc->fileno = softc->blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } return (error); } static int saspace(struct cam_periph *periph, int count, scsi_space_code code) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* This cannot be retried */ scsi_space(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, code, count, SSD_FULL_SIZE, SPACE_TIMEOUT); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = (count < 0)? MTIO_DSREG_REV : MTIO_DSREG_FWD; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If a spacing operation has failed, we need to invalidate * this mount. * * If the spacing operation was setmarks or to end of recorded data, * we no longer know our relative position. * * If the spacing operations was spacing files in reverse, we * take account of the residual, but still check against less * than zero- if we've gone negative, we must have hit BOT. * * If the spacing operations was spacing records in reverse and * we have a residual, we've either hit BOT or hit a filemark. * In the former case, we know our new record number (0). In * the latter case, we have absolutely no idea what the real * record number is- we've stopped between the end of the last * record in the previous file and the filemark that stopped * our spacing backwards. */ if (error) { softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_blkno = softc->partition = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else if (code == SS_SETMARKS || code == SS_EOD) { softc->fileno = softc->blkno = (daddr_t) -1; } else if (code == SS_FILEMARKS && softc->fileno != (daddr_t) -1) { softc->fileno += (count - softc->last_ctl_resid); if (softc->fileno < 0) /* we must of hit BOT */ softc->fileno = 0; softc->blkno = 0; } else if (code == SS_BLOCKS && softc->blkno != (daddr_t) -1) { softc->blkno += (count - softc->last_ctl_resid); if (count < 0) { if (softc->last_ctl_resid || softc->blkno < 0) { if (softc->fileno == 0) { softc->blkno = 0; } else { softc->blkno = (daddr_t) -1; } } } } if (error == 0) sagetpos(periph); return (error); } static int sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks, int immed) { union ccb *ccb; struct sa_softc *softc; int error, nwm = 0; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); /* * Clear residual because we will be using it. */ softc->last_ctl_resid = 0; softc->dsreg = MTIO_DSREG_FMK; /* this *must* not be retried */ scsi_write_filemarks(&ccb->csio, 0, sadone, MSG_SIMPLE_Q_TAG, immed, setmarks, nmarks, SSD_FULL_SIZE, IO_TIMEOUT); softc->dsreg = MTIO_DSREG_REST; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); if (error == 0 && nmarks) { struct sa_softc *softc = (struct sa_softc *)periph->softc; nwm = nmarks - softc->last_ctl_resid; softc->filemarks += nwm; } xpt_release_ccb(ccb); /* * Update relative positions (if we're doing that). */ if (error) { softc->fileno = softc->blkno = softc->partition = (daddr_t) -1; } else if (softc->fileno != (daddr_t) -1) { softc->fileno += nwm; softc->blkno = 0; } /* * Ask the tape drive for position information. */ sagetpos(periph); /* * If we got valid position information, since we just wrote a file * mark, we know we're at the file mark and block 0 after that * filemark. */ if (softc->rep_fileno != (daddr_t) -1) { softc->fileno = softc->rep_fileno; softc->blkno = 0; } return (error); } static int sagetpos(struct cam_periph *periph) { union ccb *ccb; struct scsi_tape_position_long_data long_pos; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; if (softc->quirks & SA_QUIRK_NO_LONG_POS) { softc->rep_fileno = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; return (EOPNOTSUPP); } bzero(&long_pos, sizeof(long_pos)); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_read_position_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*service_action*/ SA_RPOS_LONG_FORM, /*data_ptr*/ (uint8_t *)&long_pos, /*length*/ sizeof(long_pos), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_QUIET_IR, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (long_pos.flags & SA_RPOS_LONG_MPU) { /* * If the drive doesn't know what file mark it is * on, our calculated filemark isn't going to be * accurate either. */ softc->fileno = (daddr_t) -1; softc->rep_fileno = (daddr_t) -1; } else { softc->fileno = softc->rep_fileno = scsi_8btou64(long_pos.logical_file_num); } if (long_pos.flags & SA_RPOS_LONG_LONU) { softc->partition = (daddr_t) -1; softc->rep_blkno = (daddr_t) -1; /* * If the tape drive doesn't know its block * position, we can't claim to know it either. */ softc->blkno = (daddr_t) -1; } else { softc->partition = scsi_4btoul(long_pos.partition); softc->rep_blkno = scsi_8btou64(long_pos.logical_object_num); } if (long_pos.flags & SA_RPOS_LONG_BOP) softc->bop = 1; else softc->bop = 0; if (long_pos.flags & SA_RPOS_LONG_EOP) softc->eop = 1; else softc->eop = 0; if (long_pos.flags & SA_RPOS_LONG_BPEW) softc->bpew = 1; else softc->bpew = 0; } else if (error == EINVAL) { /* * If this drive returned an invalid-request type error, * then it likely doesn't support the long form report. */ softc->quirks |= SA_QUIRK_NO_LONG_POS; } if (error != 0) { softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; softc->partition = (daddr_t) -1; softc->bop = softc->eop = softc->bpew = -1; } xpt_release_ccb(ccb); return (error); } static int sardpos(struct cam_periph *periph, int hard, u_int32_t *blkptr) { struct scsi_tape_position_data loc; union ccb *ccb; struct sa_softc *softc = (struct sa_softc *)periph->softc; int error; /* * We try and flush any buffered writes here if we were writing * and we're trying to get hardware block position. It eats * up performance substantially, but I'm wary of drive firmware. * * I think that *logical* block position is probably okay- * but hardware block position might have to wait for data * to hit media to be valid. Caveat Emptor. */ if (hard && (softc->flags & SA_FLAG_TAPE_WRITTEN)) { error = sawritefilemarks(periph, 0, 0, 0); if (error && error != EACCES) return (error); } ccb = cam_periph_getccb(periph, 1); scsi_read_position(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, hard, &loc, SSD_FULL_SIZE, SCSIOP_TIMEOUT); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; if (error == 0) { if (loc.flags & SA_RPOS_UNCERTAIN) { error = EINVAL; /* nothing is certain */ } else { *blkptr = scsi_4btoul(loc.firstblk); } } xpt_release_ccb(ccb); return (error); } static int sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info) { union ccb *ccb; struct sa_softc *softc; int locate16; int immed, cp; int error; /* * We used to try and flush any buffered writes here. * Now we push this onto user applications to either * flush the pending writes themselves (via a zero count * WRITE FILEMARKS command) or they can trust their tape * drive to do this correctly for them. */ softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); cp = locate_info->flags & MT_LOCATE_FLAG_CHANGE_PART ? 1 : 0; immed = locate_info->flags & MT_LOCATE_FLAG_IMMED ? 1 : 0; /* * Determine whether we have to use LOCATE or LOCATE16. The hard * bit is only possible with LOCATE, but the new ioctls do not * allow setting that bit. So we can't get into the situation of * having the hard bit set with a block address that is larger than * 32-bits. */ if (hard != 0) locate16 = 0; else if ((locate_info->dest_type != MT_LOCATE_DEST_OBJECT) || (locate_info->block_address_mode != MT_LOCATE_BAM_IMPLICIT) || (locate_info->logical_id > SA_SPOS_MAX_BLK)) locate16 = 1; else locate16 = 0; if (locate16 != 0) { scsi_locate_16(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*dest_type*/ locate_info->dest_type, /*bam*/ locate_info->block_address_mode, /*partition*/ locate_info->partition, /*logical_id*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } else { uint32_t blk_pointer; blk_pointer = locate_info->logical_id; scsi_locate_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*immed*/ immed, /*cp*/ cp, /*hard*/ hard, /*partition*/ locate_info->partition, /*block_address*/ locate_info->logical_id, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } softc->dsreg = MTIO_DSREG_POS; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * We assume the calculated file and block numbers are unknown * unless we have enough information to populate them. */ softc->fileno = softc->blkno = (daddr_t) -1; /* * If the user requested changing the partition and the request * succeeded, note the partition. */ if ((error == 0) && (cp != 0)) softc->partition = locate_info->partition; else softc->partition = (daddr_t) -1; if (error == 0) { switch (locate_info->dest_type) { case MT_LOCATE_DEST_FILE: /* * This is the only case where we can reliably * calculate the file and block numbers. */ softc->fileno = locate_info->logical_id; softc->blkno = 0; break; case MT_LOCATE_DEST_OBJECT: case MT_LOCATE_DEST_SET: case MT_LOCATE_DEST_EOD: default: break; } } /* * Ask the drive for current position information. */ sagetpos(periph); return (error); } static int saretension(struct cam_periph *periph) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, TRUE, TRUE, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_TEN; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } else softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; return (error); } static int sareservereleaseunit(struct cam_periph *periph, int reserve) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_reserve_release_unit(&ccb->csio, 2, sadone, MSG_SIMPLE_Q_TAG, FALSE, 0, SSD_FULL_SIZE, SCSIOP_TIMEOUT, reserve); softc->dsreg = MTIO_DSREG_RBSY; error = cam_periph_runccb(ccb, saerror, 0, SF_RETRY_UA | SF_NO_PRINT, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); /* * If the error was Illegal Request, then the device doesn't support * RESERVE/RELEASE. This is not an error. */ if (error == EINVAL) { error = 0; } return (error); } static int saloadunload(struct cam_periph *periph, int load) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; ccb = cam_periph_getccb(periph, 1); /* It is safe to retry this operation */ scsi_load_unload(&ccb->csio, 5, sadone, MSG_SIMPLE_Q_TAG, FALSE, FALSE, FALSE, load, SSD_FULL_SIZE, REWIND_TIMEOUT); softc->dsreg = (load)? MTIO_DSREG_LD : MTIO_DSREG_UNL; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); if (error || load == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) -1; softc->rep_fileno = softc->rep_blkno = (daddr_t) -1; } else if (error == 0) { softc->partition = softc->fileno = softc->blkno = (daddr_t) 0; sagetpos(periph); } return (error); } static int saerase(struct cam_periph *periph, int longerase) { union ccb *ccb; struct sa_softc *softc; int error; softc = (struct sa_softc *)periph->softc; if (softc->open_rdonly) return (EBADF); ccb = cam_periph_getccb(periph, 1); scsi_erase(&ccb->csio, 1, sadone, MSG_SIMPLE_Q_TAG, FALSE, longerase, SSD_FULL_SIZE, ERASE_TIMEOUT); softc->dsreg = MTIO_DSREG_ZER; error = cam_periph_runccb(ccb, saerror, 0, 0, softc->device_stats); softc->dsreg = MTIO_DSREG_REST; xpt_release_ccb(ccb); return (error); } /* * Fill an sbuf with density data in XML format. This particular macro * works for multi-byte integer fields. * * Note that 1 byte fields aren't supported here. The reason is that the * compiler does not evaluate the sizeof(), and assumes that any of the * sizes are possible for a given field. So passing in a multi-byte * field will result in a warning that the assignment makes an integer * from a pointer without a cast, if there is an assignment in the 1 byte * case. */ #define SAFILLDENSSB(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ switch (sizeof(dens_data->field)) { \ case 1: \ KASSERT(1 == 0, ("Programmer error, invalid 1 byte " \ "field width for SAFILLDENSFIELD")); \ break; \ case 2: \ SASBADDUINTDESC(sb, indent, \ scsi_2btoul(dens_data->field), %u, field, desc); \ break; \ case 3: \ SASBADDUINTDESC(sb, indent, \ scsi_3btoul(dens_data->field), %u, field, desc); \ break; \ case 4: \ SASBADDUINTDESC(sb, indent, \ scsi_4btoul(dens_data->field), %u, field, desc); \ break; \ case 8: \ SASBADDUINTDESC(sb, indent, \ (uintmax_t)scsi_8btou64(dens_data->field), %ju, \ field, desc); \ break; \ default: \ break; \ } \ }; /* * Fill an sbuf with density data in XML format. This particular macro * works for strings. */ #define SAFILLDENSSBSTR(dens_data, sb, indent, field, desc_remain, \ len_to_go, cur_offset, desc){ \ size_t cur_field_len; \ char tmpstr[32]; \ \ cur_field_len = sizeof(dens_data->field); \ if (desc_remain < cur_field_len) { \ len_to_go -= desc_remain; \ cur_offset += desc_remain; \ continue; \ } \ len_to_go -= cur_field_len; \ cur_offset += cur_field_len; \ desc_remain -= cur_field_len; \ \ cam_strvis(tmpstr, dens_data->field, \ sizeof(dens_data->field), sizeof(tmpstr)); \ SASBADDVARSTRDESC(sb, indent, tmpstr, %s, field, \ strlen(tmpstr) + 1, desc); \ }; /* * Fill an sbuf with density data descriptors. */ static void safilldenstypesb(struct sbuf *sb, int *indent, uint8_t *buf, int buf_len, int is_density) { struct scsi_density_hdr *hdr; uint32_t hdr_len; int len_to_go, cur_offset; int length_offset; int num_reports, need_close; /* * We need at least the header length. Note that this isn't an * error, not all tape drives will have every data type. */ if (buf_len < sizeof(*hdr)) goto bailout; hdr = (struct scsi_density_hdr *)buf; hdr_len = scsi_2btoul(hdr->length); len_to_go = min(buf_len - sizeof(*hdr), hdr_len); if (is_density) { length_offset = __offsetof(struct scsi_density_data, bits_per_mm); } else { length_offset = __offsetof(struct scsi_medium_type_data, num_density_codes); } cur_offset = sizeof(*hdr); num_reports = 0; need_close = 0; while (len_to_go > length_offset) { struct scsi_density_data *dens_data; struct scsi_medium_type_data *type_data; int desc_remain; size_t cur_field_len; dens_data = NULL; type_data = NULL; if (is_density) { dens_data =(struct scsi_density_data *)&buf[cur_offset]; if (dens_data->byte2 & SDD_DLV) desc_remain = scsi_2btoul(dens_data->length); else desc_remain = SDD_DEFAULT_LENGTH - length_offset; } else { type_data = (struct scsi_medium_type_data *) &buf[cur_offset]; desc_remain = scsi_2btoul(type_data->length); } len_to_go -= length_offset; desc_remain = min(desc_remain, len_to_go); cur_offset += length_offset; if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } SASBADDNODENUM(sb, *indent, density_entry, num_reports); num_reports++; need_close = 1; if (is_density) { SASBADDUINTDESC(sb, *indent, dens_data->primary_density_code, %u, primary_density_code, "Primary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->secondary_density_code, %u, secondary_density_code, "Secondary Density Code"); SASBADDUINTDESC(sb, *indent, dens_data->byte2 & ~SDD_DLV, %#x, density_flags, "Density Flags"); SAFILLDENSSB(dens_data, sb, *indent, bits_per_mm, desc_remain, len_to_go, cur_offset, "Bits per mm"); SAFILLDENSSB(dens_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(dens_data, sb, *indent, tracks, desc_remain, len_to_go, cur_offset, "Number of Tracks"); SAFILLDENSSB(dens_data, sb, *indent, capacity, desc_remain, len_to_go, cur_offset, "Capacity"); SAFILLDENSSBSTR(dens_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(dens_data, sb, *indent, density_name, desc_remain, len_to_go, cur_offset, "Density Name"); SAFILLDENSSBSTR(dens_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } else { int i; SASBADDUINTDESC(sb, *indent, type_data->medium_type, %u, medium_type, "Medium Type"); cur_field_len = __offsetof(struct scsi_medium_type_data, media_width) - __offsetof(struct scsi_medium_type_data, num_density_codes); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SASBADDINTDESC(sb, *indent, type_data->num_density_codes, %d, num_density_codes, "Number of Density Codes"); SASBADDNODE(sb, *indent, density_code_list); for (i = 0; i < type_data->num_density_codes; i++) { SASBADDUINTDESC(sb, *indent, type_data->primary_density_codes[i], %u, density_code, "Density Code"); } SASBENDNODE(sb, *indent, density_code_list); SAFILLDENSSB(type_data, sb, *indent, media_width, desc_remain, len_to_go, cur_offset, "Media width"); SAFILLDENSSB(type_data, sb, *indent, medium_length, desc_remain, len_to_go, cur_offset, "Medium length"); /* * Account for the two reserved bytes. */ cur_field_len = sizeof(type_data->reserved2); if (desc_remain < cur_field_len) { len_to_go -= desc_remain; cur_offset += desc_remain; continue; } len_to_go -= cur_field_len; cur_offset += cur_field_len; desc_remain -= cur_field_len; SAFILLDENSSBSTR(type_data, sb, *indent, assigning_org, desc_remain, len_to_go, cur_offset, "Assigning Organization"); SAFILLDENSSBSTR(type_data, sb, *indent, medium_type_name, desc_remain, len_to_go, cur_offset, "Medium type name"); SAFILLDENSSBSTR(type_data, sb, *indent, description, desc_remain, len_to_go, cur_offset, "Description"); } } if (need_close != 0) { SASBENDNODE(sb, *indent, density_entry); } bailout: return; } /* * Fill an sbuf with density data information */ static void safilldensitysb(struct sa_softc *softc, int *indent, struct sbuf *sb) { int i, is_density; SASBADDNODE(sb, *indent, mtdensity); SASBADDUINTDESC(sb, *indent, softc->media_density, %u, media_density, "Current Medium Density"); is_density = 0; for (i = 0; i < SA_DENSITY_TYPES; i++) { int tmpint; if (softc->density_info_valid[i] == 0) continue; SASBADDNODE(sb, *indent, density_report); if (softc->density_type_bits[i] & SRDS_MEDIUM_TYPE) { tmpint = 1; is_density = 0; } else { tmpint = 0; is_density = 1; } SASBADDINTDESC(sb, *indent, tmpint, %d, medium_type_report, "Medium type report"); if (softc->density_type_bits[i] & SRDS_MEDIA) tmpint = 1; else tmpint = 0; SASBADDINTDESC(sb, *indent, tmpint, %d, media_report, "Media report"); safilldenstypesb(sb, indent, softc->density_info[i], softc->density_info_valid[i], is_density); SASBENDNODE(sb, *indent, density_report); } SASBENDNODE(sb, *indent, mtdensity); } #endif /* _KERNEL */ /* * Read tape block limits command. */ void scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_block_limits_data *rlimit_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_block_limits *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)rlimit_buf, sizeof(*rlimit_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BLOCK_LIMITS; } void scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, int sli, int fixed, u_int32_t length, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sa_rw *scsi_cmd; int read; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? SA_READ : SA_WRITE; scsi_cmd->sli_fixed = 0; if (sli && read) scsi_cmd->sli_fixed |= SAR_SLI; if (fixed) scsi_cmd->sli_fixed |= SARW_FIXED; scsi_ulto3b(length, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int eot, int reten, int load, u_int8_t sense_len, u_int32_t timeout) { struct scsi_load_unload *scsi_cmd; scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOAD_UNLOAD; if (immediate) scsi_cmd->immediate = SLU_IMMED; if (eot) scsi_cmd->eot_reten_load |= SLU_EOT; if (reten) scsi_cmd->eot_reten_load |= SLU_RETEN; if (load) scsi_cmd->eot_reten_load |= SLU_LOAD; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_rewind *scsi_cmd; scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REWIND; if (immediate) scsi_cmd->immediate = SREW_IMMED; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_space(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, scsi_space_code code, u_int32_t count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_space *scsi_cmd; scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SPACE; scsi_cmd->code = code; scsi_ulto3b(count, scsi_cmd->count); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int setmark, u_int32_t num_marks, u_int8_t sense_len, u_int32_t timeout) { struct scsi_write_filemarks *scsi_cmd; scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_FILEMARKS; if (immediate) scsi_cmd->byte2 |= SWFMRK_IMMED; if (setmark) scsi_cmd->byte2 |= SWFMRK_WSMK; scsi_ulto3b(num_marks, scsi_cmd->num_marks); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * The reserve and release unit commands differ only by their opcodes. */ void scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int third_party, int third_party_id, u_int8_t sense_len, u_int32_t timeout, int reserve) { struct scsi_reserve_release_unit *scsi_cmd; scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); if (reserve) scsi_cmd->opcode = RESERVE_UNIT; else scsi_cmd->opcode = RELEASE_UNIT; if (third_party) { scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY; scsi_cmd->lun_thirdparty |= ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immediate, int long_erase, u_int8_t sense_len, u_int32_t timeout) { struct scsi_erase *scsi_cmd; scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ERASE; if (immediate) scsi_cmd->lun_imm_long |= SE_IMMED; if (long_erase) scsi_cmd->lun_imm_long |= SE_LONG; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, NULL, 0, sense_len, sizeof(*scsi_cmd), timeout); } /* * Read Tape Position command. */ void scsi_read_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, struct scsi_tape_position_data *sbp, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, (u_int8_t *)sbp, sizeof (*sbp), sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = hardsoft; } /* * Read Tape Position command. */ void scsi_read_position_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int service_action, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_tape_read_position *scmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_read_position *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = READ_POSITION; scmd->byte1 = service_action; /* * The length is only currently set (as of SSC4r03) if the extended * form is specified. The other forms have fixed lengths. */ if (service_action == SA_RPOS_EXTENDED_FORM) scsi_ulto2b(length, scmd->length); } /* * Set Tape Position command. */ void scsi_set_position(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int hardsoft, u_int32_t blkno, u_int8_t sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, (u_int8_t *)NULL, 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (hardsoft) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(blkno, scmd->blkaddr); } /* * XXX KDM figure out how to make a compatibility function. */ void scsi_locate_10(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, int hard, int64_t partition, u_int32_t block_address, int sense_len, u_int32_t timeout) { struct scsi_tape_locate *scmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scmd), timeout); scmd = (struct scsi_tape_locate *)&csio->cdb_io.cdb_bytes; bzero(scmd, sizeof(*scmd)); scmd->opcode = LOCATE; if (immed) scmd->byte1 |= SA_SPOS_IMMED; if (cp) scmd->byte1 |= SA_SPOS_CP; if (hard) scmd->byte1 |= SA_SPOS_BT; scsi_ulto4b(block_address, scmd->blkaddr); scmd->partition = partition; } void scsi_locate_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int immed, int cp, u_int8_t dest_type, int bam, int64_t partition, u_int64_t logical_id, int sense_len, u_int32_t timeout) { struct scsi_locate_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_locate_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOCATE_16; if (immed) scsi_cmd->byte1 |= SA_LC_IMMEDIATE; if (cp) scsi_cmd->byte1 |= SA_LC_CP; scsi_cmd->byte1 |= (dest_type << SA_LC_DEST_TYPE_SHIFT); scsi_cmd->byte2 |= bam; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); } void scsi_report_density_support(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int media, int medium_type, u_int8_t *data_ptr, u_int32_t length, u_int32_t sense_len, u_int32_t timeout) { struct scsi_report_density_support *scsi_cmd; scsi_cmd =(struct scsi_report_density_support *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_DENSITY_SUPPORT; if (media != 0) scsi_cmd->byte1 |= SRDS_MEDIA; if (medium_type != 0) scsi_cmd->byte1 |= SRDS_MEDIUM_TYPE; scsi_ulto2b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_set_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, u_int32_t proportion, u_int32_t sense_len, u_int32_t timeout) { struct scsi_set_capacity *scsi_cmd; scsi_cmd = (struct scsi_set_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SET_CAPACITY; scsi_cmd->byte1 = byte1; scsi_ulto2b(proportion, scsi_cmd->cap_proportion); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_format_medium(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int byte1, int byte2, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t sense_len, u_int32_t timeout) { struct scsi_format_medium *scsi_cmd; scsi_cmd = (struct scsi_format_medium*)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = FORMAT_MEDIUM; scsi_cmd->byte1 = byte1; scsi_cmd->byte2 = byte2; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/(dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_allow_overwrite(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int allow_overwrite, int partition, u_int64_t logical_id, u_int32_t sense_len, u_int32_t timeout) { struct scsi_allow_overwrite *scsi_cmd; scsi_cmd = (struct scsi_allow_overwrite *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = ALLOW_OVERWRITE; scsi_cmd->allow_overwrite = allow_overwrite; scsi_cmd->partition = partition; scsi_u64to8b(logical_id, scsi_cmd->logical_id); cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/ NULL, /*dxfer_len*/ 0, sense_len, sizeof(*scsi_cmd), timeout); } Index: head/sys/cam/scsi/scsi_sg.c =================================================================== --- head/sys/cam/scsi/scsi_sg.c (revision 293349) +++ head/sys/cam/scsi/scsi_sg.c (revision 293350) @@ -1,1015 +1,1018 @@ /*- * Copyright (c) 2007 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * scsi_sg peripheral driver. This driver is meant to implement the Linux * SG passthrough interface for SCSI. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include typedef enum { SG_FLAG_LOCKED = 0x01, SG_FLAG_INVALID = 0x02 } sg_flags; typedef enum { SG_STATE_NORMAL } sg_state; typedef enum { SG_RDWR_FREE, SG_RDWR_INPROG, SG_RDWR_DONE } sg_rdwr_state; typedef enum { SG_CCB_RDWR_IO } sg_ccb_types; #define ccb_type ppriv_field0 #define ccb_rdwr ppriv_ptr1 struct sg_rdwr { TAILQ_ENTRY(sg_rdwr) rdwr_link; int tag; int state; int buf_len; char *buf; union ccb *ccb; union { struct sg_header hdr; struct sg_io_hdr io_hdr; } hdr; }; struct sg_softc { sg_state state; sg_flags flags; int open_count; u_int maxio; struct devstat *device_stats; TAILQ_HEAD(, sg_rdwr) rdwr_done; struct cdev *dev; int sg_timeout; int sg_user_timeout; uint8_t pd_type; union ccb saved_ccb; }; static d_open_t sgopen; static d_close_t sgclose; static d_ioctl_t sgioctl; static d_write_t sgwrite; static d_read_t sgread; static periph_init_t sginit; static periph_ctor_t sgregister; static periph_oninv_t sgoninvalidate; static periph_dtor_t sgcleanup; static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg); static void sgdone(struct cam_periph *periph, union ccb *done_ccb); static int sgsendccb(struct cam_periph *periph, union ccb *ccb); static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb); static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags); static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat); static int scsi_group_len(u_char cmd); static struct periph_driver sgdriver = { sginit, "sg", TAILQ_HEAD_INITIALIZER(sgdriver.units), /* gen */ 0 }; PERIPHDRIVER_DECLARE(sg, sgdriver); static struct cdevsw sg_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT | D_TRACKCLOSE, .d_open = sgopen, .d_close = sgclose, .d_ioctl = sgioctl, .d_write = sgwrite, .d_read = sgread, .d_name = "sg", }; static int sg_version = 30125; static void sginit(void) { cam_status status; /* * Install a global async callback. This callback will receive aync * callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, sgasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("sg: Failed to attach master async callbac " "due to status 0x%x!\n", status); } } static void sgdevgonecb(void *arg) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; int i; periph = (struct cam_periph *)arg; mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = (struct sg_softc *)periph->softc; KASSERT(softc->open_count >= 0, ("Negative open count %d", softc->open_count)); /* * When we get this callback, we will get no more close calls from * devfs. So if we have any dangling opens, we need to release the * reference held for that particular context. */ for (i = 0; i < softc->open_count; i++) cam_periph_release_locked(periph); softc->open_count = 0; /* * Release the reference held for the device node, it is gone now. */ cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the final call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. */ mtx_unlock(mtx); } static void sgoninvalidate(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; /* * Deregister any async callbacks. */ xpt_register_async(0, sgasync, periph, periph->path); softc->flags |= SG_FLAG_INVALID; /* * Tell devfs this device has gone away, and ask for a callback * when it has cleaned up its state. */ destroy_dev_sched_cb(softc->dev, sgdevgonecb, periph); /* * XXX Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ } static void sgcleanup(struct cam_periph *periph) { struct sg_softc *softc; softc = (struct sg_softc *)periph->softc; devstat_remove_entry(softc->device_stats); free(softc, M_DEVBUF); } static void sgasync(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; /* * Allocate a peripheral instance for this device and * start the probe process. */ status = cam_periph_alloc(sgregister, sgoninvalidate, sgcleanup, NULL, "sg", CAM_PERIPH_BIO, path, sgasync, AC_FOUND_DEVICE, cgd); if ((status != CAM_REQ_CMP) && (status != CAM_REQ_INPROG)) { const struct cam_status_entry *entry; entry = cam_fetch_status_entry(status); printf("sgasync: Unable to attach new device " "due to status %#x: %s\n", status, entry ? entry->status_text : "Unknown"); } break; } default: cam_periph_async(periph, code, path, arg); break; } } static cam_status sgregister(struct cam_periph *periph, void *arg) { struct sg_softc *softc; struct ccb_getdev *cgd; struct ccb_pathinq cpi; - int no_tags; + struct make_dev_args args; + int no_tags, error; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("sgregister: no getdev CCB, can't register device\n"); return (CAM_REQ_CMP_ERR); } softc = malloc(sizeof(*softc), M_DEVBUF, M_ZERO | M_NOWAIT); if (softc == NULL) { printf("sgregister: Unable to allocate softc\n"); return (CAM_REQ_CMP_ERR); } softc->state = SG_STATE_NORMAL; softc->pd_type = SID_TYPE(&cgd->inq_data); softc->sg_timeout = SG_DEFAULT_TIMEOUT / SG_DEFAULT_HZ * hz; softc->sg_user_timeout = SG_DEFAULT_TIMEOUT; TAILQ_INIT(&softc->rdwr_done); periph->softc = softc; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; /* real value */ /* * We pass in 0 for all blocksize, since we don't know what the * blocksize of the device is, if it even has a blocksize. */ cam_periph_unlock(periph); no_tags = (cgd->inq_data.flags & SID_CmdQue) == 0; softc->device_stats = devstat_new_entry("sg", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE | (no_tags ? DEVSTAT_NO_ORDERED_TAGS : 0), softc->pd_type | XPORT_DEVSTAT_TYPE(cpi.transport) | DEVSTAT_TYPE_PASS, DEVSTAT_PRIORITY_PASS); /* * Acquire a reference to the periph before we create the devfs * instance for it. We'll release this reference once the devfs * instance has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } /* Register the device */ - softc->dev = make_dev(&sg_cdevsw, periph->unit_number, - UID_ROOT, GID_OPERATOR, 0600, "%s%d", - periph->periph_name, periph->unit_number); + make_dev_args_init(&args); + args.mda_devsw = &sg_cdevsw; + args.mda_unit = periph->unit_number; + args.mda_uid = UID_ROOT; + args.mda_gid = GID_OPERATOR; + args.mda_mode = 0600; + args.mda_si_drv1 = periph; + error = make_dev_s(&args, &softc->dev, "%s%d", + periph->periph_name, periph->unit_number); + if (error != 0) { + cam_periph_lock(periph); + cam_periph_release_locked(periph); + return (CAM_REQ_CMP_ERR); + } if (periph->unit_number < 26) { (void)make_dev_alias(softc->dev, "sg%c", periph->unit_number + 'a'); } else { (void)make_dev_alias(softc->dev, "sg%c%c", ((periph->unit_number / 26) - 1) + 'a', (periph->unit_number % 26) + 'a'); } cam_periph_lock(periph); - softc->dev->si_drv1 = periph; /* * Add as async callback so that we get * notified if this device goes away. */ xpt_register_async(AC_LOST_DEVICE, sgasync, periph, periph->path); if (bootverbose) xpt_announce_periph(periph, NULL); return (CAM_REQ_CMP); } static void sgdone(struct cam_periph *periph, union ccb *done_ccb) { struct sg_softc *softc; struct ccb_scsiio *csio; softc = (struct sg_softc *)periph->softc; csio = &done_ccb->csio; switch (csio->ccb_h.ccb_type) { case SG_CCB_RDWR_IO: { struct sg_rdwr *rdwr; int state; devstat_end_transaction(softc->device_stats, csio->dxfer_len, csio->tag_action & 0xf, ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? DEVSTAT_NO_DATA : (csio->ccb_h.flags & CAM_DIR_OUT) ? DEVSTAT_WRITE : DEVSTAT_READ, NULL, NULL); rdwr = done_ccb->ccb_h.ccb_rdwr; state = rdwr->state; rdwr->state = SG_RDWR_DONE; wakeup(rdwr); break; } default: panic("unknown sg CCB type"); } } static int sgopen(struct cdev *dev, int flags, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; int error = 0; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - if (cam_periph_acquire(periph) != CAM_REQ_CMP) return (ENXIO); /* * Don't allow access when we're running at a high securelevel. */ error = securelevel_gt(td->td_ucred, 1); if (error) { cam_periph_release(periph); return (error); } cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; if (softc->flags & SG_FLAG_INVALID) { cam_periph_release_locked(periph); cam_periph_unlock(periph); return (ENXIO); } softc->open_count++; cam_periph_unlock(periph); return (error); } static int sgclose(struct cdev *dev, int flag, int fmt, struct thread *td) { struct cam_periph *periph; struct sg_softc *softc; struct mtx *mtx; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); mtx = cam_periph_mtx(periph); mtx_lock(mtx); softc = periph->softc; softc->open_count--; cam_periph_release_locked(periph); /* * We reference the lock directly here, instead of using * cam_periph_unlock(). The reason is that the call to * cam_periph_release_locked() above could result in the periph * getting freed. If that is the case, dereferencing the periph * with a cam_periph_unlock() call would cause a page fault. * * cam_periph_release() avoids this problem using the same method, * but we're manually acquiring and dropping the lock here to * protect the open count and avoid another lock acquisition and * release. */ mtx_unlock(mtx); return (0); } static int sgioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union ccb *ccb; struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *softc; struct sg_io_hdr *req; int dir, error; periph = (struct cam_periph *)dev->si_drv1; - if (periph == NULL) - return (ENXIO); - cam_periph_lock(periph); softc = (struct sg_softc *)periph->softc; error = 0; switch (cmd) { case SG_GET_VERSION_NUM: { int *version = (int *)arg; *version = sg_version; break; } case SG_SET_TIMEOUT: { u_int user_timeout = *(u_int *)arg; softc->sg_user_timeout = user_timeout; softc->sg_timeout = user_timeout / SG_DEFAULT_HZ * hz; break; } case SG_GET_TIMEOUT: /* * The value is returned directly to the syscall. */ td->td_retval[0] = softc->sg_user_timeout; error = 0; break; case SG_IO: req = (struct sg_io_hdr *)arg; if (req->cmd_len > IOCDBLEN) { error = EINVAL; break; } if (req->iovec_count != 0) { error = EOPNOTSUPP; break; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); csio = &ccb->csio; error = copyin(req->cmdp, &csio->cdb_io.cdb_bytes, req->cmd_len); if (error) { xpt_release_ccb(ccb); break; } switch(req->dxfer_direction) { case SG_DXFER_TO_DEV: dir = CAM_DIR_OUT; break; case SG_DXFER_FROM_DEV: dir = CAM_DIR_IN; break; case SG_DXFER_TO_FROM_DEV: dir = CAM_DIR_IN | CAM_DIR_OUT; break; case SG_DXFER_NONE: default: dir = CAM_DIR_NONE; break; } cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, req->dxferp, req->dxfer_len, req->mx_sb_len, req->cmd_len, req->timeout); error = sgsendccb(periph, ccb); if (error) { req->host_status = DID_ERROR; req->driver_status = DRIVER_INVALID; xpt_release_ccb(ccb); break; } req->status = csio->scsi_status; req->masked_status = (csio->scsi_status >> 1) & 0x7f; sg_scsiio_status(csio, &req->host_status, &req->driver_status); req->resid = csio->resid; req->duration = csio->ccb_h.timeout; req->info = 0; if ((csio->ccb_h.status & CAM_AUTOSNS_VALID) && (req->sbp != NULL)) { req->sb_len_wr = req->mx_sb_len - csio->sense_resid; error = copyout(&csio->sense_data, req->sbp, req->sb_len_wr); } xpt_release_ccb(ccb); break; case SG_GET_RESERVED_SIZE: { int *size = (int *)arg; *size = DFLTPHYS; break; } case SG_GET_SCSI_ID: { struct sg_scsi_id *id = (struct sg_scsi_id *)arg; id->host_no = cam_sim_path(xpt_path_sim(periph->path)); id->channel = xpt_path_path_id(periph->path); id->scsi_id = xpt_path_target_id(periph->path); id->lun = xpt_path_lun_id(periph->path); id->scsi_type = softc->pd_type; id->h_cmd_per_lun = 1; id->d_queue_depth = 1; id->unused[0] = 0; id->unused[1] = 0; break; } case SG_GET_SG_TABLESIZE: { int *size = (int *)arg; *size = 0; break; } case SG_EMULATED_HOST: case SG_SET_TRANSFORM: case SG_GET_TRANSFORM: case SG_GET_NUM_WAITING: case SG_SCSI_RESET: case SG_GET_REQUEST_TABLE: case SG_SET_KEEP_ORPHAN: case SG_GET_KEEP_ORPHAN: case SG_GET_ACCESS_COUNT: case SG_SET_FORCE_LOW_DMA: case SG_GET_LOW_DMA: case SG_SET_FORCE_PACK_ID: case SG_GET_PACK_ID: case SG_SET_RESERVED_SIZE: case SG_GET_COMMAND_Q: case SG_SET_COMMAND_Q: case SG_SET_DEBUG: case SG_NEXT_CMD_LEN: default: #ifdef CAMDEBUG printf("sgioctl: rejecting cmd 0x%lx\n", cmd); #endif error = ENODEV; break; } cam_periph_unlock(periph); return (error); } static int sgwrite(struct cdev *dev, struct uio *uio, int ioflag) { union ccb *ccb; struct cam_periph *periph; struct ccb_scsiio *csio; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_char cdb_cmd; char *buf; int error = 0, cdb_len, buf_len, dir; periph = dev->si_drv1; rdwr = malloc(sizeof(*rdwr), M_DEVBUF, M_WAITOK | M_ZERO); hdr = &rdwr->hdr.hdr; /* Copy in the header block and sanity check it */ if (uio->uio_resid < sizeof(*hdr)) { error = EINVAL; goto out_hdr; } error = uiomove(hdr, sizeof(*hdr), uio); if (error) goto out_hdr; /* XXX: We don't support SG 3.x read/write API. */ if (hdr->reply_len < 0) { error = ENODEV; goto out_hdr; } ccb = xpt_alloc_ccb(); if (ccb == NULL) { error = ENOMEM; goto out_hdr; } csio = &ccb->csio; /* * Copy in the CDB block. The designers of the interface didn't * bother to provide a size for this in the header, so we have to * figure it out ourselves. */ if (uio->uio_resid < 1) goto out_ccb; error = uiomove(&cdb_cmd, 1, uio); if (error) goto out_ccb; if (hdr->twelve_byte) cdb_len = 12; else cdb_len = scsi_group_len(cdb_cmd); /* * We've already read the first byte of the CDB and advanced the uio * pointer. Just read the rest. */ csio->cdb_io.cdb_bytes[0] = cdb_cmd; error = uiomove(&csio->cdb_io.cdb_bytes[1], cdb_len - 1, uio); if (error) goto out_ccb; /* * Now set up the data block. Again, the designers didn't bother * to make this reliable. */ buf_len = uio->uio_resid; if (buf_len != 0) { buf = malloc(buf_len, M_DEVBUF, M_WAITOK | M_ZERO); error = uiomove(buf, buf_len, uio); if (error) goto out_buf; dir = CAM_DIR_OUT; } else if (hdr->reply_len != 0) { buf = malloc(hdr->reply_len, M_DEVBUF, M_WAITOK | M_ZERO); buf_len = hdr->reply_len; dir = CAM_DIR_IN; } else { buf = NULL; buf_len = 0; dir = CAM_DIR_NONE; } cam_periph_lock(periph); sc = periph->softc; xpt_setup_ccb(&ccb->ccb_h, periph->path, CAM_PRIORITY_NORMAL); cam_fill_csio(csio, /*retries*/1, sgdone, dir|CAM_DEV_QFRZDIS, MSG_SIMPLE_Q_TAG, buf, buf_len, SG_MAX_SENSE, cdb_len, sc->sg_timeout); /* * Send off the command and hope that it works. This path does not * go through sgstart because the I/O is supposed to be asynchronous. */ rdwr->buf = buf; rdwr->buf_len = buf_len; rdwr->tag = hdr->pack_id; rdwr->ccb = ccb; rdwr->state = SG_RDWR_INPROG; ccb->ccb_h.ccb_rdwr = rdwr; ccb->ccb_h.ccb_type = SG_CCB_RDWR_IO; TAILQ_INSERT_TAIL(&sc->rdwr_done, rdwr, rdwr_link); error = sgsendrdwr(periph, ccb); cam_periph_unlock(periph); return (error); out_buf: free(buf, M_DEVBUF); out_ccb: xpt_free_ccb(ccb); out_hdr: free(rdwr, M_DEVBUF); return (error); } static int sgread(struct cdev *dev, struct uio *uio, int ioflag) { struct ccb_scsiio *csio; struct cam_periph *periph; struct sg_softc *sc; struct sg_header *hdr; struct sg_rdwr *rdwr; u_short hstat, dstat; int error, pack_len, reply_len, pack_id; periph = dev->si_drv1; /* XXX The pack len field needs to be updated and written out instead * of discarded. Not sure how to do that. */ uio->uio_rw = UIO_WRITE; if ((error = uiomove(&pack_len, 4, uio)) != 0) return (error); if ((error = uiomove(&reply_len, 4, uio)) != 0) return (error); if ((error = uiomove(&pack_id, 4, uio)) != 0) return (error); uio->uio_rw = UIO_READ; cam_periph_lock(periph); sc = periph->softc; search: TAILQ_FOREACH(rdwr, &sc->rdwr_done, rdwr_link) { if (rdwr->tag == pack_id) break; } if ((rdwr == NULL) || (rdwr->state != SG_RDWR_DONE)) { if (cam_periph_sleep(periph, rdwr, PCATCH, "sgread", 0) == ERESTART) return (EAGAIN); goto search; } TAILQ_REMOVE(&sc->rdwr_done, rdwr, rdwr_link); cam_periph_unlock(periph); hdr = &rdwr->hdr.hdr; csio = &rdwr->ccb->csio; sg_scsiio_status(csio, &hstat, &dstat); hdr->host_status = hstat; hdr->driver_status = dstat; hdr->target_status = csio->scsi_status >> 1; switch (hstat) { case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: case DID_ERROR: default: hdr->result = EIO; break; } if (dstat == DRIVER_SENSE) { bcopy(&csio->sense_data, hdr->sense_buffer, min(csio->sense_len, SG_MAX_SENSE)); #ifdef CAMDEBUG scsi_sense_print(csio); #endif } error = uiomove(&hdr->result, sizeof(*hdr) - offsetof(struct sg_header, result), uio); if ((error == 0) && (hdr->result == 0)) error = uiomove(rdwr->buf, rdwr->buf_len, uio); cam_periph_lock(periph); xpt_free_ccb(rdwr->ccb); cam_periph_unlock(periph); free(rdwr->buf, M_DEVBUF); free(rdwr, M_DEVBUF); return (error); } static int sgsendccb(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; struct cam_periph_map_info mapinfo; int error; softc = periph->softc; bzero(&mapinfo, sizeof(mapinfo)); /* * cam_periph_mapmem calls into proc and vm functions that can * sleep as well as trigger I/O, so we can't hold the lock. * Dropping it here is reasonably safe. * The only CCB opcode that is possible here is XPT_SCSI_IO, no * need for additional checks. */ cam_periph_unlock(periph); error = cam_periph_mapmem(ccb, &mapinfo, softc->maxio); cam_periph_lock(periph); if (error) return (error); error = cam_periph_runccb(ccb, sgerror, CAM_RETRY_SELTO, SF_RETRY_UA, softc->device_stats); cam_periph_unmapmem(ccb, &mapinfo); return (error); } static int sgsendrdwr(struct cam_periph *periph, union ccb *ccb) { struct sg_softc *softc; softc = periph->softc; devstat_start_transaction(softc->device_stats, NULL); xpt_action(ccb); return (0); } static int sgerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags) { struct cam_periph *periph; struct sg_softc *softc; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct sg_softc *)periph->softc; return (cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void sg_scsiio_status(struct ccb_scsiio *csio, u_short *hoststat, u_short *drvstat) { int status; status = csio->ccb_h.status; switch (status & CAM_STATUS_MASK) { case CAM_REQ_CMP: *hoststat = DID_OK; *drvstat = 0; break; case CAM_REQ_CMP_ERR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_REQ_ABORTED: *hoststat = DID_ABORT; *drvstat = 0; break; case CAM_REQ_INVALID: *hoststat = DID_ERROR; *drvstat = DRIVER_INVALID; break; case CAM_DEV_NOT_THERE: *hoststat = DID_BAD_TARGET; *drvstat = 0; break; case CAM_SEL_TIMEOUT: *hoststat = DID_NO_CONNECT; *drvstat = 0; break; case CAM_CMD_TIMEOUT: *hoststat = DID_TIME_OUT; *drvstat = 0; break; case CAM_SCSI_STATUS_ERROR: *hoststat = DID_ERROR; *drvstat = 0; break; case CAM_SCSI_BUS_RESET: *hoststat = DID_RESET; *drvstat = 0; break; case CAM_UNCOR_PARITY: *hoststat = DID_PARITY; *drvstat = 0; break; case CAM_SCSI_BUSY: *hoststat = DID_BUS_BUSY; *drvstat = 0; break; default: *hoststat = DID_ERROR; *drvstat = DRIVER_ERROR; } if (status & CAM_AUTOSNS_VALID) *drvstat = DRIVER_SENSE; } static int scsi_group_len(u_char cmd) { int len[] = {6, 10, 10, 12, 12, 12, 10, 10}; int group; group = (cmd >> 5) & 0x7; return (len[group]); }