Index: stable/10/sys/cam/ctl/ctl.c =================================================================== --- stable/10/sys/cam/ctl/ctl.c (revision 279001) +++ stable/10/sys/cam/ctl/ctl.c (revision 279002) @@ -1,14277 +1,14392 @@ /*- * Copyright (c) 2003-2009 Silicon Graphics International Corp. * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id$ */ /* * CAM Target Layer, a SCSI device emulation subsystem. * * Author: Ken Merry */ #define _CTL_C #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct ctl_softc *control_softc = NULL; /* * Size and alignment macros needed for Copan-specific HA hardware. These * can go away when the HA code is re-written, and uses busdma for any * hardware. */ #define CTL_ALIGN_8B(target, source, type) \ if (((uint32_t)source & 0x7) != 0) \ target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ else \ target = (type)source; #define CTL_SIZE_8B(target, size) \ if ((size & 0x7) != 0) \ target = size + (0x8 - (size & 0x7)); \ else \ target = size; #define CTL_ALIGN_8B_MARGIN 16 /* * Template mode pages. */ /* * Note that these are default values only. The actual values will be * filled in when the user does a mode sense. */ const static struct copan_debugconf_subpage debugconf_page_default = { DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ DBGCNF_SUBPAGE_CODE, /* subpage */ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ DBGCNF_VERSION, /* page_version */ {CTL_TIME_IO_DEFAULT_SECS>>8, CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ }; const static struct copan_debugconf_subpage debugconf_page_changeable = { DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ DBGCNF_SUBPAGE_CODE, /* subpage */ {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 0, /* page_version */ {0xff,0xff}, /* ctl_time_io_secs */ }; const static struct scsi_da_rw_recovery_page rw_er_page_default = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/SMS_RWER_LBPERE, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, /*byte3*/0, /*read_retry_count*/0, /*correction_span*/0, /*head_offset_count*/0, /*data_strobe_offset_cnt*/0, /*byte8*/0, /*write_retry_count*/0, /*reserved2*/0, /*recovery_time_limit*/{0, 0}, }; const static struct scsi_format_page format_page_default = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ SFP_HSEC, /*reserved*/ {0, 0, 0} }; const static struct scsi_format_page format_page_changeable = { /*page_code*/SMS_FORMAT_DEVICE_PAGE, /*page_length*/sizeof(struct scsi_format_page) - 2, /*tracks_per_zone*/ {0, 0}, /*alt_sectors_per_zone*/ {0, 0}, /*alt_tracks_per_zone*/ {0, 0}, /*alt_tracks_per_lun*/ {0, 0}, /*sectors_per_track*/ {0, 0}, /*bytes_per_sector*/ {0, 0}, /*interleave*/ {0, 0}, /*track_skew*/ {0, 0}, /*cylinder_skew*/ {0, 0}, /*flags*/ 0, /*reserved*/ {0, 0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_default = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ CTL_DEFAULT_HEADS, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ SRDP_RPL_DISABLED, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, CTL_DEFAULT_ROTATION_RATE & 0xff}, /*reserved2*/ {0, 0} }; const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { /*page_code*/SMS_RIGID_DISK_PAGE, /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, /*cylinders*/ {0, 0, 0}, /*heads*/ 0, /*start_write_precomp*/ {0, 0, 0}, /*start_reduced_current*/ {0, 0, 0}, /*step_rate*/ {0, 0}, /*landing_zone_cylinder*/ {0, 0, 0}, /*rpl*/ 0, /*rotational_offset*/ 0, /*reserved1*/ 0, /*rotation_rate*/ {0, 0}, /*reserved2*/ {0, 0} }; const static struct scsi_caching_page caching_page_default = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_DISC | SCP_WCE, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0xff, 0xff}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0xff, 0xff}, /*max_pf_ceiling*/ {0xff, 0xff}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_caching_page caching_page_changeable = { /*page_code*/SMS_CACHING_PAGE, /*page_length*/sizeof(struct scsi_caching_page) - 2, /*flags1*/ SCP_WCE | SCP_RCD, /*ret_priority*/ 0, /*disable_pf_transfer_len*/ {0, 0}, /*min_prefetch*/ {0, 0}, /*max_prefetch*/ {0, 0}, /*max_pf_ceiling*/ {0, 0}, /*flags2*/ 0, /*cache_segments*/ 0, /*cache_seg_size*/ {0, 0}, /*reserved*/ 0, /*non_cache_seg_size*/ {0, 0, 0} }; const static struct scsi_control_page control_page_default = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/0, /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, /*eca_and_aen*/0, /*flags4*/SCP_TAS, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_control_page control_page_changeable = { /*page_code*/SMS_CONTROL_MODE_PAGE, /*page_length*/sizeof(struct scsi_control_page) - 2, /*rlec*/SCP_DSENSE, /*queue_flags*/SCP_QUEUE_ALG_MASK, /*eca_and_aen*/SCP_SWP, /*flags4*/0, /*aen_holdoff_period*/{0, 0}, /*busy_timeout_period*/{0, 0}, /*extended_selftest_completion_time*/{0, 0} }; const static struct scsi_info_exceptions_page ie_page_default = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/SIEP_FLAGS_DEXCPT, /*mrie*/0, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 0} }; const static struct scsi_info_exceptions_page ie_page_changeable = { /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, /*info_flags*/0, /*mrie*/0, /*interval_timer*/{0, 0, 0, 0}, /*report_count*/{0, 0, 0, 0} }; #define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0x01, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0x02, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf1, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0xf2, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, /*subpage_code*/0x02, /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, /*flags*/0, /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /*descr*/{}}, {{/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}}, {/*flags*/0, /*resource*/0, /*reserved*/{0, 0}, /*count*/{0, 0, 0, 0}} } }; /* * XXX KDM move these into the softc. */ static int rcv_sync_msg; static uint8_t ctl_pause_rtr; SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); static int worker_threads = -1; TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, &worker_threads, 1, "Number of worker threads"); static int ctl_debug = CTL_DEBUG_NONE; TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, &ctl_debug, 0, "Enabled debug flags"); /* * Supported pages (0x00), Serial number (0x80), Device ID (0x83), * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) */ #define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, int param); static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); static int ctl_init(void); void ctl_shutdown(void); static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); static void ctl_ioctl_online(void *arg); static void ctl_ioctl_offline(void *arg); static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); static int ctl_ioctl_submit_wait(union ctl_io *io); static void ctl_ioctl_datamove(union ctl_io *io); static void ctl_ioctl_done(union ctl_io *io); static void ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask); static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries); static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); -static uint32_t ctl_map_lun(struct ctl_softc *softc, int port_num, uint32_t lun); -static uint32_t ctl_map_lun_back(struct ctl_softc *softc, int port_num, uint32_t lun); static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, struct ctl_be_lun *be_lun, struct ctl_id target_id); static int ctl_free_lun(struct ctl_lun *lun); static void ctl_create_lun(struct ctl_be_lun *be_lun); +static struct ctl_port * ctl_io_port(struct ctl_io_hdr *io_hdr); /** static void ctl_failover_change_pages(struct ctl_softc *softc, struct ctl_scsiio *ctsio, int master); **/ static int ctl_do_mode_select(union ctl_io *io); static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param); static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg); static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); static int ctl_inquiry_std(struct ctl_scsiio *ctsio); static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq); static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io); static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io); static int ctl_check_blocked(struct ctl_lun *lun); static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio); //static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); static void ctl_failover(void); static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type); static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, struct ctl_scsiio *ctsio); static int ctl_scsiio(struct ctl_scsiio *ctsio); static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, ctl_ua_type ua_type); static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type); static int ctl_abort_task(union ctl_io *io); static int ctl_abort_task_set(union ctl_io *io); static int ctl_i_t_nexus_reset(union ctl_io *io); static void ctl_run_task(union ctl_io *io); #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg); static void ctl_done_timer_wakeup(void *arg); #endif /* CTL_IO_DELAY */ static void ctl_send_datamove_done(union ctl_io *io, int have_lock); static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); static void ctl_datamove_remote_write(union ctl_io *io); static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); static int ctl_datamove_remote_sgl_setup(union ctl_io *io); static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback); static void ctl_datamove_remote_read(union ctl_io *io); static void ctl_datamove_remote(union ctl_io *io); static int ctl_process_done(union ctl_io *io); static void ctl_lun_thread(void *arg); static void ctl_thresh_thread(void *arg); static void ctl_work_thread(void *arg); static void ctl_enqueue_incoming(union ctl_io *io); static void ctl_enqueue_rtr(union ctl_io *io); static void ctl_enqueue_done(union ctl_io *io); static void ctl_enqueue_isc(union ctl_io *io); static const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); static const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio); static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry); /* * Load the serialization table. This isn't very pretty, but is probably * the easiest way to do it. */ #include "ctl_ser_table.c" /* * We only need to define open, close and ioctl routines for this driver. */ static struct cdevsw ctl_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ctl_open, .d_close = ctl_close, .d_ioctl = ctl_ioctl, .d_name = "ctl", }; MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t ctl_moduledata = { "ctl", ctl_module_event_handler, NULL }; DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); MODULE_VERSION(ctl, 1); static struct ctl_frontend ioctl_frontend = { .name = "ioctl", }; static void ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.original_sc == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.original_sc->scsiio; ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctsio->io_hdr.status = msg_info->hdr.status; ctsio->scsi_status = msg_info->scsi.scsi_status; ctsio->sense_len = msg_info->scsi.sense_len; ctsio->sense_residual = msg_info->scsi.sense_residual; ctsio->residual = msg_info->scsi.residual; memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, sizeof(ctsio->sense_data)); memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); ctl_enqueue_isc((union ctl_io *)ctsio); } static void ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, union ctl_ha_msg *msg_info) { struct ctl_scsiio *ctsio; if (msg_info->hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ return; } ctsio = &msg_info->hdr.serializing_sc->scsiio; #if 0 /* * Attempt to catch the situation where an I/O has * been freed, and we're using it again. */ if (ctsio->io_hdr.io_type == 0xff) { union ctl_io *tmp_io; tmp_io = (union ctl_io *)ctsio; printf("%s: %p use after free!\n", __func__, ctsio); printf("%s: type %d msg %d cdb %x iptl: " "%d:%d:%d:%d tag 0x%04x " "flag %#x status %x\n", __func__, tmp_io->io_hdr.io_type, tmp_io->io_hdr.msg_type, tmp_io->scsiio.cdb[0], tmp_io->io_hdr.nexus.initid.id, tmp_io->io_hdr.nexus.targ_port, tmp_io->io_hdr.nexus.targ_target.id, tmp_io->io_hdr.nexus.targ_lun, (tmp_io->io_hdr.io_type == CTL_IO_TASK) ? tmp_io->taskio.tag_num : tmp_io->scsiio.tag_num, tmp_io->io_hdr.flags, tmp_io->io_hdr.status); } #endif ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; ctl_enqueue_isc((union ctl_io *)ctsio); } /* * ISC (Inter Shelf Communication) event handler. Events from the HA * subsystem come in here. */ static void ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) { struct ctl_softc *softc; union ctl_io *io; struct ctl_prio *presio; ctl_ha_status isc_status; softc = control_softc; io = NULL; #if 0 printf("CTL: Isc Msg event %d\n", event); #endif if (event == CTL_HA_EVT_MSG_RECV) { union ctl_ha_msg msg_info; isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), /*wait*/ 0); #if 0 printf("CTL: msg_type %d\n", msg_info.msg_type); #endif if (isc_status != 0) { printf("Error receiving message, status = %d\n", isc_status); return; } switch (msg_info.hdr.msg_type) { case CTL_MSG_SERIALIZE: #if 0 printf("Serialize\n"); #endif io = ctl_alloc_io_nowait(softc->othersc_pool); if (io == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); /* Bad Juju */ /* Need to set busy and send msg back */ msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; msg_info.hdr.status = CTL_SCSI_ERROR; msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; msg_info.scsi.sense_len = 0; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ } goto bailout; } ctl_zero_io(io); // populate ctsio from msg_info io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.msg_type = CTL_MSG_SERIALIZE; io->io_hdr.original_sc = msg_info.hdr.original_sc; #if 0 printf("pOrig %x\n", (int)msg_info.original_sc); #endif io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | CTL_FLAG_IO_ACTIVE; /* * If we're in serialization-only mode, we don't * want to go through full done processing. Thus * the COPY flag. * * XXX KDM add another flag that is more specific. */ if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) io->io_hdr.flags |= CTL_FLAG_INT_COPY; io->io_hdr.nexus = msg_info.hdr.nexus; #if 0 printf("targ %d, port %d, iid %d, lun %d\n", io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_lun); #endif io->scsiio.tag_num = msg_info.scsi.tag_num; io->scsiio.tag_type = msg_info.scsi.tag_type; memcpy(io->scsiio.cdb, msg_info.scsi.cdb, CTL_MAX_CDBLEN); if (softc->ha_mode == CTL_HA_MODE_XFER) { const struct ctl_cmd_entry *entry; entry = ctl_get_cmd_entry(&io->scsiio, NULL); io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; io->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; } ctl_enqueue_isc(io); break; /* Performed on the Originating SC, XFER mode only */ case CTL_MSG_DATAMOVE: { struct ctl_sg_entry *sgl; int i, j; io = msg_info.hdr.original_sc; if (io == NULL) { printf("%s: original_sc == NULL!\n", __func__); /* XXX KDM do something here */ break; } io->io_hdr.msg_type = CTL_MSG_DATAMOVE; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* * Keep track of this, we need to send it back over * when the datamove is complete. */ io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; if (msg_info.dt.sg_sequence == 0) { /* * XXX KDM we use the preallocated S/G list * here, but we'll need to change this to * dynamic allocation if we need larger S/G * lists. */ if (msg_info.dt.kern_sg_entries > sizeof(io->io_hdr.remote_sglist) / sizeof(io->io_hdr.remote_sglist[0])) { printf("%s: number of S/G entries " "needed %u > allocated num %zd\n", __func__, msg_info.dt.kern_sg_entries, sizeof(io->io_hdr.remote_sglist)/ sizeof(io->io_hdr.remote_sglist[0])); /* * XXX KDM send a message back to * the other side to shut down the * DMA. The error will come back * through via the normal channel. */ break; } sgl = io->io_hdr.remote_sglist; memset(sgl, 0, sizeof(io->io_hdr.remote_sglist)); io->scsiio.kern_data_ptr = (uint8_t *)sgl; io->scsiio.kern_sg_entries = msg_info.dt.kern_sg_entries; io->scsiio.rem_sg_entries = msg_info.dt.kern_sg_entries; io->scsiio.kern_data_len = msg_info.dt.kern_data_len; io->scsiio.kern_total_len = msg_info.dt.kern_total_len; io->scsiio.kern_data_resid = msg_info.dt.kern_data_resid; io->scsiio.kern_rel_offset = msg_info.dt.kern_rel_offset; /* * Clear out per-DMA flags. */ io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; /* * Add per-DMA flags that are set for this * particular DMA request. */ io->io_hdr.flags |= msg_info.dt.flags & CTL_FLAG_RDMA_MASK; } else sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; for (i = msg_info.dt.sent_sg_entries, j = 0; i < (msg_info.dt.sent_sg_entries + msg_info.dt.cur_sg_entries); i++, j++) { sgl[i].addr = msg_info.dt.sg_list[j].addr; sgl[i].len = msg_info.dt.sg_list[j].len; #if 0 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", __func__, msg_info.dt.sg_list[j].addr, msg_info.dt.sg_list[j].len, sgl[i].addr, sgl[i].len, j, i); #endif } #if 0 memcpy(&sgl[msg_info.dt.sent_sg_entries], msg_info.dt.sg_list, sizeof(*sgl) * msg_info.dt.cur_sg_entries); #endif /* * If this is the last piece of the I/O, we've got * the full S/G list. Queue processing in the thread. * Otherwise wait for the next piece. */ if (msg_info.dt.sg_last != 0) ctl_enqueue_isc(io); break; } /* Performed on the Serializing (primary) SC, XFER mode only */ case CTL_MSG_DATAMOVE_DONE: { if (msg_info.hdr.serializing_sc == NULL) { printf("%s: serializing_sc == NULL!\n", __func__); /* XXX KDM now what? */ break; } /* * We grab the sense information here in case * there was a failure, so we can return status * back to the initiator. */ io = msg_info.hdr.serializing_sc; io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; io->io_hdr.status = msg_info.hdr.status; io->scsiio.scsi_status = msg_info.scsi.scsi_status; io->scsiio.sense_len = msg_info.scsi.sense_len; io->scsiio.sense_residual =msg_info.scsi.sense_residual; io->io_hdr.port_status = msg_info.scsi.fetd_status; io->scsiio.residual = msg_info.scsi.residual; memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, sizeof(io->scsiio.sense_data)); ctl_enqueue_isc(io); break; } /* Preformed on Originating SC, SER_ONLY mode */ case CTL_MSG_R2R: io = msg_info.hdr.original_sc; if (io == NULL) { printf("%s: Major Bummer\n", __func__); return; } else { #if 0 printf("pOrig %x\n",(int) ctsio); #endif } io->io_hdr.msg_type = CTL_MSG_R2R; io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; ctl_enqueue_isc(io); break; /* * Performed on Serializing(i.e. primary SC) SC in SER_ONLY * mode. * Performed on the Originating (i.e. secondary) SC in XFER * mode */ case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) ctl_isc_handler_finish_xfer(softc, &msg_info); else ctl_isc_handler_finish_ser_only(softc, &msg_info); break; /* Preformed on Originating SC */ case CTL_MSG_BAD_JUJU: io = msg_info.hdr.original_sc; if (io == NULL) { printf("%s: Bad JUJU!, original_sc is NULL!\n", __func__); break; } ctl_copy_sense_data(&msg_info, io); /* * IO should have already been cleaned up on other * SC so clear this flag so we won't send a message * back to finish the IO there. */ io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; /* io = msg_info.hdr.serializing_sc; */ io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; ctl_enqueue_isc(io); break; /* Handle resets sent from the other side */ case CTL_MSG_MANAGE_TASKS: { struct ctl_taskio *taskio; taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( softc->othersc_pool); if (taskio == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); /* Bad Juju */ /* should I just call the proper reset func here??? */ goto bailout; } ctl_zero_io((union ctl_io *)taskio); taskio->io_hdr.io_type = CTL_IO_TASK; taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; taskio->io_hdr.nexus = msg_info.hdr.nexus; taskio->task_action = msg_info.task.task_action; taskio->tag_num = msg_info.task.tag_num; taskio->tag_type = msg_info.task.tag_type; #ifdef CTL_TIME_IO taskio->io_hdr.start_time = time_uptime; getbintime(&taskio->io_hdr.start_bt); #if 0 cs_prof_gettime(&taskio->io_hdr.start_ticks); #endif #endif /* CTL_TIME_IO */ ctl_run_task((union ctl_io *)taskio); break; } /* Persistent Reserve action which needs attention */ case CTL_MSG_PERS_ACTION: presio = (struct ctl_prio *)ctl_alloc_io_nowait( softc->othersc_pool); if (presio == NULL) { printf("ctl_isc_event_handler: can't allocate " "ctl_io!\n"); /* Bad Juju */ /* Need to set busy and send msg back */ goto bailout; } ctl_zero_io((union ctl_io *)presio); presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; presio->pr_msg = msg_info.pr; ctl_enqueue_isc((union ctl_io *)presio); break; case CTL_MSG_SYNC_FE: rcv_sync_msg = 1; break; default: printf("How did I get here?\n"); } } else if (event == CTL_HA_EVT_MSG_SENT) { if (param != CTL_HA_STATUS_SUCCESS) { printf("Bad status from ctl_ha_msg_send status %d\n", param); } return; } else if (event == CTL_HA_EVT_DISCONNECT) { printf("CTL: Got a disconnect from Isc\n"); return; } else { printf("ctl_isc_event_handler: Unknown event %d\n", event); return; } bailout: return; } static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) { struct scsi_sense_data *sense; sense = &dest->scsiio.sense_data; bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); dest->scsiio.scsi_status = src->scsi.scsi_status; dest->scsiio.sense_len = src->scsi.sense_len; dest->io_hdr.status = src->hdr.status; } static void ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { ctl_ua_type *pu; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; } static void ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = 0; i < CTL_MAX_PORTS; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] |= ua; } } } static void ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) { ctl_ua_type *pu; mtx_assert(&lun->lun_lock, MA_OWNED); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu == NULL) return; pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; } static void ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) { int i, j; mtx_assert(&lun->lun_lock, MA_OWNED); for (i = 0; i < CTL_MAX_PORTS; i++) { if (lun->pending_ua[i] == NULL) continue; for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (i * CTL_MAX_INIT_PER_PORT + j == except) continue; lun->pending_ua[i][j] &= ~ua; } } } static int ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) { struct ctl_softc *softc = (struct ctl_softc *)arg1; struct ctl_lun *lun; int error, value; if (softc->flags & CTL_FLAG_ACTIVE_SHELF) value = 0; else value = 1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); mtx_lock(&softc->ctl_lock); if (value == 0) softc->flags |= CTL_FLAG_ACTIVE_SHELF; else softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_init(void) { struct ctl_softc *softc; void *other_pool; struct ctl_port *port; int i, error, retval; //int isc_retval; retval = 0; ctl_pause_rtr = 0; rcv_sync_msg = 0; control_softc = malloc(sizeof(*control_softc), M_DEVBUF, M_WAITOK | M_ZERO); softc = control_softc; softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "cam/ctl"); softc->dev->si_drv1 = softc; /* * By default, return a "bad LUN" peripheral qualifier for unknown * LUNs. The user can override this default using the tunable or * sysctl. See the comment in ctl_inquiry_std() for more details. */ softc->inquiry_pq_no_lun = 1; TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", &softc->inquiry_pq_no_lun); sysctl_ctx_init(&softc->sysctl_ctx); softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", CTLFLAG_RD, 0, "CAM Target Layer"); if (softc->sysctl_tree == NULL) { printf("%s: unable to allocate sysctl tree\n", __func__); destroy_dev(softc->dev); free(control_softc, M_DEVBUF); control_softc = NULL; return (ENOMEM); } SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "inquiry_pq_no_lun", CTLFLAG_RW, &softc->inquiry_pq_no_lun, 0, "Report no lun possible for invalid LUNs"); mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); softc->open_count = 0; /* * Default to actually sending a SYNCHRONIZE CACHE command down to * the drive. */ softc->flags = CTL_FLAG_REAL_SYNC; /* * In Copan's HA scheme, the "master" and "slave" roles are * figured out through the slot the controller is in. Although it * is an active/active system, someone has to be in charge. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, "HA head ID (0 - no HA)"); if (softc->ha_id == 0) { softc->flags |= CTL_FLAG_ACTIVE_SHELF; softc->is_single = 1; softc->port_offset = 0; } else softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; /* * XXX KDM need to figure out where we want to get our target ID * and WWID. Is it different on each port? */ softc->target.id = 0; softc->target.wwid[0] = 0x12345678; softc->target.wwid[1] = 0x87654321; STAILQ_INIT(&softc->lun_list); STAILQ_INIT(&softc->pending_lun_queue); STAILQ_INIT(&softc->fe_list); STAILQ_INIT(&softc->port_list); STAILQ_INIT(&softc->be_list); ctl_tpc_init(softc); if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, &other_pool) != 0) { printf("ctl: can't allocate %d entry other SC pool, " "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); return (ENOMEM); } softc->othersc_pool = other_pool; if (worker_threads <= 0) worker_threads = max(1, mp_ncpus / 4); if (worker_threads > CTL_MAX_THREADS) worker_threads = CTL_MAX_THREADS; for (i = 0; i < worker_threads; i++) { struct ctl_thread *thr = &softc->threads[i]; mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); thr->ctl_softc = softc; STAILQ_INIT(&thr->incoming_queue); STAILQ_INIT(&thr->rtr_queue); STAILQ_INIT(&thr->done_queue); STAILQ_INIT(&thr->isc_queue); error = kproc_kthread_add(ctl_work_thread, thr, &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); if (error != 0) { printf("error creating CTL work thread!\n"); ctl_pool_free(other_pool); return (error); } } error = kproc_kthread_add(ctl_lun_thread, softc, &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); if (error != 0) { printf("error creating CTL lun thread!\n"); ctl_pool_free(other_pool); return (error); } error = kproc_kthread_add(ctl_thresh_thread, softc, &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); if (error != 0) { printf("error creating CTL threshold thread!\n"); ctl_pool_free(other_pool); return (error); } if (bootverbose) printf("ctl: CAM Target Layer loaded\n"); /* * Initialize the ioctl front end. */ ctl_frontend_register(&ioctl_frontend); port = &softc->ioctl_info.port; port->frontend = &ioctl_frontend; sprintf(softc->ioctl_info.port_name, "ioctl"); port->port_type = CTL_PORT_IOCTL; port->num_requested_ctl_io = 100; port->port_name = softc->ioctl_info.port_name; port->port_online = ctl_ioctl_online; port->port_offline = ctl_ioctl_offline; port->onoff_arg = &softc->ioctl_info; port->lun_enable = ctl_ioctl_lun_enable; port->lun_disable = ctl_ioctl_lun_disable; port->targ_lun_arg = &softc->ioctl_info; port->fe_datamove = ctl_ioctl_datamove; port->fe_done = ctl_ioctl_done; port->max_targets = 15; port->max_target_id = 15; if (ctl_port_register(&softc->ioctl_info.port) != 0) { printf("ctl: ioctl front end registration failed, will " "continue anyway\n"); } SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); #ifdef CTL_IO_DELAY if (sizeof(struct callout) > CTL_TIMER_BYTES) { printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", sizeof(struct callout), CTL_TIMER_BYTES); return (EINVAL); } #endif /* CTL_IO_DELAY */ return (0); } void ctl_shutdown(void) { struct ctl_softc *softc; struct ctl_lun *lun, *next_lun; softc = (struct ctl_softc *)control_softc; if (ctl_port_deregister(&softc->ioctl_info.port) != 0) printf("ctl: ioctl front end deregistration failed\n"); mtx_lock(&softc->ctl_lock); /* * Free up each LUN. */ for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ next_lun = STAILQ_NEXT(lun, links); ctl_free_lun(lun); } mtx_unlock(&softc->ctl_lock); ctl_frontend_deregister(&ioctl_frontend); #if 0 ctl_shutdown_thread(softc->work_thread); mtx_destroy(&softc->queue_lock); #endif ctl_tpc_shutdown(softc); uma_zdestroy(softc->io_zone); mtx_destroy(&softc->ctl_lock); destroy_dev(softc->dev); sysctl_ctx_free(&softc->sysctl_ctx); free(control_softc, M_DEVBUF); control_softc = NULL; if (bootverbose) printf("ctl: CAM Target Layer unloaded\n"); } static int ctl_module_event_handler(module_t mod, int what, void *arg) { switch (what) { case MOD_LOAD: return (ctl_init()); case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } } /* * XXX KDM should we do some access checks here? Bump a reference count to * prevent a CTL module from being unloaded while someone has it open? */ static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } int ctl_port_enable(ctl_port_type port_type) { struct ctl_softc *softc = control_softc; struct ctl_port *port; if (softc->is_single == 0) { union ctl_ha_msg msg_info; int isc_retval; #if 0 printf("%s: HA mode, synchronizing frontend enable\n", __func__); #endif msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { printf("Sync msg send error retval %d\n", isc_retval); } if (!rcv_sync_msg) { isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 1); } #if 0 printf("CTL:Frontend Enable\n"); } else { printf("%s: single mode, skipping frontend synchronization\n", __func__); #endif } STAILQ_FOREACH(port, &softc->port_list, links) { if (port_type & port->port_type) { #if 0 printf("port %d\n", port->targ_port); #endif ctl_port_online(port); } } return (0); } int ctl_port_disable(ctl_port_type port_type) { struct ctl_softc *softc; struct ctl_port *port; softc = control_softc; STAILQ_FOREACH(port, &softc->port_list, links) { if (port_type & port->port_type) ctl_port_offline(port); } return (0); } /* * Returns 0 for success, 1 for failure. * Currently the only failure mode is if there aren't enough entries * allocated. So, in case of a failure, look at num_entries_dropped, * reallocate and try again. */ int ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, int *num_entries_filled, int *num_entries_dropped, ctl_port_type port_type, int no_virtual) { struct ctl_softc *softc; struct ctl_port *port; int entries_dropped, entries_filled; int retval; int i; softc = control_softc; retval = 0; entries_filled = 0; entries_dropped = 0; i = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { struct ctl_port_entry *entry; if ((port->port_type & port_type) == 0) continue; if ((no_virtual != 0) && (port->virtual_port != 0)) continue; if (entries_filled >= num_entries_alloced) { entries_dropped++; continue; } entry = &entries[i]; entry->port_type = port->port_type; strlcpy(entry->port_name, port->port_name, sizeof(entry->port_name)); entry->physical_port = port->physical_port; entry->virtual_port = port->virtual_port; entry->wwnn = port->wwnn; entry->wwpn = port->wwpn; i++; entries_filled++; } mtx_unlock(&softc->ctl_lock); if (entries_dropped > 0) retval = 1; *num_entries_dropped = entries_dropped; *num_entries_filled = entries_filled; return (retval); } static void ctl_ioctl_online(void *arg) { struct ctl_ioctl_info *ioctl_info; ioctl_info = (struct ctl_ioctl_info *)arg; ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; } static void ctl_ioctl_offline(void *arg) { struct ctl_ioctl_info *ioctl_info; ioctl_info = (struct ctl_ioctl_info *)arg; ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; } /* * Remove an initiator by port number and initiator ID. * Returns 0 for success, -1 for failure. */ int ctl_remove_initiator(struct ctl_port *port, int iid) { struct ctl_softc *softc = control_softc; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid > CTL_MAX_INIT_PER_PORT) { printf("%s: initiator ID %u > maximun %u!\n", __func__, iid, CTL_MAX_INIT_PER_PORT); return (-1); } mtx_lock(&softc->ctl_lock); port->wwpn_iid[iid].in_use--; port->wwpn_iid[iid].last_use = time_uptime; mtx_unlock(&softc->ctl_lock); return (0); } /* * Add an initiator to the initiator map. * Returns iid for success, < 0 for failure. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) { struct ctl_softc *softc = control_softc; time_t best_time; int i, best; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); if (iid >= CTL_MAX_INIT_PER_PORT) { printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); free(name, M_CTL); return (-1); } mtx_lock(&softc->ctl_lock); if (iid < 0 && (wwpn != 0 || name != NULL)) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { iid = i; break; } if (name != NULL && port->wwpn_iid[i].name != NULL && strcmp(name, port->wwpn_iid[i].name) == 0) { iid = i; break; } } } if (iid < 0) { for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0 && port->wwpn_iid[i].wwpn == 0 && port->wwpn_iid[i].name == NULL) { iid = i; break; } } } if (iid < 0) { best = -1; best_time = INT32_MAX; for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { if (port->wwpn_iid[i].in_use == 0) { if (port->wwpn_iid[i].last_use < best_time) { best = i; best_time = port->wwpn_iid[i].last_use; } } } iid = best; } if (iid < 0) { mtx_unlock(&softc->ctl_lock); free(name, M_CTL); return (-2); } if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { /* * This is not an error yet. */ if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { #if 0 printf("%s: port %d iid %u WWPN %#jx arrived" " again\n", __func__, port->targ_port, iid, (uintmax_t)wwpn); #endif goto take; } if (name != NULL && port->wwpn_iid[iid].name != NULL && strcmp(name, port->wwpn_iid[iid].name) == 0) { #if 0 printf("%s: port %d iid %u name '%s' arrived" " again\n", __func__, port->targ_port, iid, name); #endif goto take; } /* * This is an error, but what do we do about it? The * driver is telling us we have a new WWPN for this * initiator ID, so we pretty much need to use it. */ printf("%s: port %d iid %u WWPN %#jx '%s' arrived," " but WWPN %#jx '%s' is still at that address\n", __func__, port->targ_port, iid, wwpn, name, (uintmax_t)port->wwpn_iid[iid].wwpn, port->wwpn_iid[iid].name); /* * XXX KDM clear have_ca and ua_pending on each LUN for * this initiator. */ } take: free(port->wwpn_iid[iid].name, M_CTL); port->wwpn_iid[iid].name = name; port->wwpn_iid[iid].wwpn = wwpn; port->wwpn_iid[iid].in_use++; mtx_unlock(&softc->ctl_lock); return (iid); } static int ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) { int len; switch (port->port_type) { case CTL_PORT_FC: { struct scsi_transportid_fcp *id = (struct scsi_transportid_fcp *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_FC; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); return (sizeof(*id)); } case CTL_PORT_ISCSI: { struct scsi_transportid_iscsi_port *id = (struct scsi_transportid_iscsi_port *)buf; if (port->wwpn_iid[iid].name == NULL) return (0); memset(id, 0, 256); id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | SCSI_PROTO_ISCSI; len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; len = roundup2(min(len, 252), 4); scsi_ulto2b(len, id->additional_length); return (sizeof(*id) + len); } case CTL_PORT_SAS: { struct scsi_transportid_sas *id = (struct scsi_transportid_sas *)buf; if (port->wwpn_iid[iid].wwpn == 0) return (0); memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SAS; scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); return (sizeof(*id)); } default: { struct scsi_transportid_spi *id = (struct scsi_transportid_spi *)buf; memset(id, 0, sizeof(*id)); id->format_protocol = SCSI_PROTO_SPI; scsi_ulto2b(iid, id->scsi_addr); scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); return (sizeof(*id)); } } } static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) { return (0); } static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) { return (0); } /* * Data movement routine for the CTL ioctl frontend port. */ static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sglen, ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy, len_copied; int kern_watermark, ext_watermark; int ext_sglist_malloced; int i, j; ext_sglist_malloced = 0; ext_sg_start = 0; ext_offset = 0; CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); /* * If this flag is set, fake the data transfer. */ if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { ctsio->ext_data_filled = ctsio->ext_data_len; goto bailout; } /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { int len_seen; ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, M_WAITOK); ext_sglist_malloced = 1; if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); goto bailout; } ext_sg_entries = ctsio->ext_sg_entries; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; len_copied = 0; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = MIN(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; kern_watermark += len_to_copy; ext_watermark += len_to_copy; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes to user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", kern_ptr, ext_ptr)); if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); goto bailout; } } else { CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " "bytes from user\n", len_to_copy)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " "to %p\n", ext_ptr, kern_ptr)); if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/0); goto bailout; } } len_copied += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } ctsio->ext_data_filled += len_copied; CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " "kern_sg_entries: %d\n", ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " "kern_data_len = %d\n", ctsio->ext_data_len, ctsio->kern_data_len)); /* XXX KDM set residual?? */ bailout: if (ext_sglist_malloced != 0) free(ext_sglist, M_CTL); return (CTL_RETVAL_COMPLETE); } /* * Serialize a command that went down the "wrong" side, and so was sent to * this controller for execution. The logic is a little different than the * standard case in ctl_scsiio_precheck(). Errors in this case need to get * sent back to the other side, but in the success case, we execute the * command on this side (XFER mode) or tell the other side to execute it * (SER_ONLY mode). */ static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) { struct ctl_softc *softc; union ctl_ha_msg msg_info; struct ctl_lun *lun; int retval = 0; uint32_t targ_lun; softc = control_softc; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; lun = softc->ctl_luns[targ_lun]; if (lun==NULL) { /* * Why isn't LUN defined? The other side wouldn't * send a cmd if the LUN is undefined. */ printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); /* "Logical unit not supported" */ ctl_set_sense_data(&msg_info.scsi.sense_data, lun, /*sense_format*/SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x25, /*ascq*/ 0x00, SSD_ELEM_NONE); msg_info.scsi.sense_len = SSD_FULL_SIZE; msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { } return(1); } mtx_lock(&lun->lun_lock); TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: if (softc->ha_mode == CTL_HA_MODE_XFER) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); } else { /* send msg back to other side */ msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; msg_info.hdr.msg_type = CTL_MSG_R2R; #if 0 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); #endif if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { } } break; case CTL_ACTION_OVERLAP: /* OVERLAPPED COMMANDS ATTEMPTED */ ctl_set_sense_data(&msg_info.scsi.sense_data, lun, /*sense_format*/SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x4E, /*ascq*/ 0x00, SSD_ELEM_NONE); msg_info.scsi.sense_len = SSD_FULL_SIZE; msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; #if 0 printf("BAD JUJU:Major Bummer Overlap\n"); #endif TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); retval = 1; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { } break; case CTL_ACTION_OVERLAP_TAG: /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ ctl_set_sense_data(&msg_info.scsi.sense_data, lun, /*sense_format*/SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x4D, /*ascq*/ ctsio->tag_num & 0xff, SSD_ELEM_NONE); msg_info.scsi.sense_len = SSD_FULL_SIZE; msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; #if 0 printf("BAD JUJU:Major Bummer Overlap Tag\n"); #endif TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); retval = 1; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { } break; case CTL_ACTION_ERROR: default: /* "Internal target failure" */ ctl_set_sense_data(&msg_info.scsi.sense_data, lun, /*sense_format*/SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_HARDWARE_ERROR, /*asc*/ 0x44, /*ascq*/ 0x00, SSD_ELEM_NONE); msg_info.scsi.sense_len = SSD_FULL_SIZE; msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; msg_info.hdr.serializing_sc = NULL; msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; #if 0 printf("BAD JUJU:Major Bummer HW Error\n"); #endif TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); retval = 1; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { } break; } mtx_unlock(&lun->lun_lock); return (retval); } static int ctl_ioctl_submit_wait(union ctl_io *io) { struct ctl_fe_ioctl_params params; ctl_fe_ioctl_state last_state; int done, retval; retval = 0; bzero(¶ms, sizeof(params)); mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); cv_init(¶ms.sem, "ctlioccv"); params.state = CTL_IOCTL_INPROG; last_state = params.state; io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); /* This shouldn't happen */ if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) return (retval); done = 0; do { mtx_lock(¶ms.ioctl_mtx); /* * Check the state here, and don't sleep if the state has * already changed (i.e. wakeup has already occured, but we * weren't waiting yet). */ if (params.state == last_state) { /* XXX KDM cv_wait_sig instead? */ cv_wait(¶ms.sem, ¶ms.ioctl_mtx); } last_state = params.state; switch (params.state) { case CTL_IOCTL_INPROG: /* Why did we wake up? */ /* XXX KDM error here? */ mtx_unlock(¶ms.ioctl_mtx); break; case CTL_IOCTL_DATAMOVE: CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); /* * change last_state back to INPROG to avoid * deadlock on subsequent data moves. */ params.state = last_state = CTL_IOCTL_INPROG; mtx_unlock(¶ms.ioctl_mtx); ctl_ioctl_do_datamove(&io->scsiio); /* * Note that in some cases, most notably writes, * this will queue the I/O and call us back later. * In other cases, generally reads, this routine * will immediately call back and wake us up, * probably using our own context. */ io->scsiio.be_move_done(io); break; case CTL_IOCTL_DONE: mtx_unlock(¶ms.ioctl_mtx); CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); done = 1; break; default: mtx_unlock(¶ms.ioctl_mtx); /* XXX KDM error here? */ break; } } while (done == 0); mtx_destroy(¶ms.ioctl_mtx); cv_destroy(¶ms.sem); return (CTL_RETVAL_COMPLETE); } static void ctl_ioctl_datamove(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DATAMOVE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static void ctl_ioctl_done(union ctl_io *io) { struct ctl_fe_ioctl_params *params; params = (struct ctl_fe_ioctl_params *) io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; mtx_lock(¶ms->ioctl_mtx); params->state = CTL_IOCTL_DONE; cv_broadcast(¶ms->sem); mtx_unlock(¶ms->ioctl_mtx); } static void ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) { struct ctl_fe_ioctl_startstop_info *sd_info; sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; sd_info->hs_info.status = metatask->status; sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; sd_info->hs_info.luns_complete = metatask->taskinfo.startstop.luns_complete; sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; cv_broadcast(&sd_info->sem); } static void ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) { struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; mtx_lock(fe_bbr_info->lock); fe_bbr_info->bbr_info->status = metatask->status; fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; fe_bbr_info->wakeup_done = 1; mtx_unlock(fe_bbr_info->lock); cv_broadcast(&fe_bbr_info->sem); } /* * Returns 0 for success, errno for failure. */ static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) { union ctl_io *io; int retval; retval = 0; mtx_lock(&lun->lun_lock); for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links)) { struct ctl_ooa_entry *entry; /* * If we've got more than we can fit, just count the * remaining entries. */ if (*cur_fill_num >= ooa_hdr->alloc_num) continue; entry = &kern_entries[*cur_fill_num]; entry->tag_num = io->scsiio.tag_num; entry->lun_num = lun->lun; #ifdef CTL_TIME_IO entry->start_bt = io->io_hdr.start_bt; #endif bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); entry->cdb_len = io->scsiio.cdb_len; if (io->io_hdr.flags & CTL_FLAG_BLOCKED) entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; if (io->io_hdr.flags & CTL_FLAG_ABORT) entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; } mtx_unlock(&lun->lun_lock); return (retval); } static void * ctl_copyin_alloc(void *user_addr, int len, char *error_str, size_t error_str_len) { void *kptr; kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); if (copyin(user_addr, kptr, len) != 0) { snprintf(error_str, error_str_len, "Error copying %d bytes " "from user address %p to kernel address %p", len, user_addr, kptr); free(kptr, M_CTL); return (NULL); } return (kptr); } static void ctl_free_args(int num_args, struct ctl_be_arg *args) { int i; if (args == NULL) return; for (i = 0; i < num_args; i++) { free(args[i].kname, M_CTL); free(args[i].kvalue, M_CTL); } free(args, M_CTL); } static struct ctl_be_arg * ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, char *error_str, size_t error_str_len) { struct ctl_be_arg *args; int i; args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), error_str, error_str_len); if (args == NULL) goto bailout; for (i = 0; i < num_args; i++) { args[i].kname = NULL; args[i].kvalue = NULL; } for (i = 0; i < num_args; i++) { uint8_t *tmpptr; args[i].kname = ctl_copyin_alloc(args[i].name, args[i].namelen, error_str, error_str_len); if (args[i].kname == NULL) goto bailout; if (args[i].kname[args[i].namelen - 1] != '\0') { snprintf(error_str, error_str_len, "Argument %d " "name is not NUL-terminated", i); goto bailout; } if (args[i].flags & CTL_BEARG_RD) { tmpptr = ctl_copyin_alloc(args[i].value, args[i].vallen, error_str, error_str_len); if (tmpptr == NULL) goto bailout; if ((args[i].flags & CTL_BEARG_ASCII) && (tmpptr[args[i].vallen - 1] != '\0')) { snprintf(error_str, error_str_len, "Argument " "%d value is not NUL-terminated", i); goto bailout; } args[i].kvalue = tmpptr; } else { args[i].kvalue = malloc(args[i].vallen, M_CTL, M_WAITOK | M_ZERO); } } return (args); bailout: ctl_free_args(num_args, args); return (NULL); } static void ctl_copyout_args(int num_args, struct ctl_be_arg *args) { int i; for (i = 0; i < num_args; i++) { if (args[i].flags & CTL_BEARG_WR) copyout(args[i].kvalue, args[i].value, args[i].vallen); } } /* * Escape characters that are illegal or not recommended in XML. */ int ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) { char *end = str + size; int retval; retval = 0; for (; *str && str < end; str++) { switch (*str) { case '&': retval = sbuf_printf(sb, "&"); break; case '>': retval = sbuf_printf(sb, ">"); break; case '<': retval = sbuf_printf(sb, "<"); break; default: retval = sbuf_putc(sb, *str); break; } if (retval != 0) break; } return (retval); } static void ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) { struct scsi_vpd_id_descriptor *desc; int i; if (id == NULL || id->len < 4) return; desc = (struct scsi_vpd_id_descriptor *)id->data; switch (desc->id_type & SVPD_ID_TYPE_MASK) { case SVPD_ID_TYPE_T10: sbuf_printf(sb, "t10."); break; case SVPD_ID_TYPE_EUI64: sbuf_printf(sb, "eui."); break; case SVPD_ID_TYPE_NAA: sbuf_printf(sb, "naa."); break; case SVPD_ID_TYPE_SCSI_NAME: break; } switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { case SVPD_ID_CODESET_BINARY: for (i = 0; i < desc->length; i++) sbuf_printf(sb, "%02x", desc->identifier[i]); break; case SVPD_ID_CODESET_ASCII: sbuf_printf(sb, "%.*s", (int)desc->length, (char *)desc->identifier); break; case SVPD_ID_CODESET_UTF8: sbuf_printf(sb, "%s", (char *)desc->identifier); break; } } static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_softc *softc; int retval; softc = control_softc; retval = 0; switch (cmd) { case CTL_IO: { union ctl_io *io; void *pool_tmp; /* * If we haven't been "enabled", don't allow any SCSI I/O * to this FETD. */ if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { retval = EPERM; break; } io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); /* * Need to save the pool reference so it doesn't get * spammed by the user's ctl_io. */ pool_tmp = io->io_hdr.pool; memcpy(io, (void *)addr, sizeof(*io)); io->io_hdr.pool = pool_tmp; /* * No status yet, so make sure the status is set properly. */ io->io_hdr.status = CTL_STATUS_NONE; /* * The user sets the initiator ID, target and LUN IDs. */ io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; io->io_hdr.flags |= CTL_FLAG_USER_REQ; if ((io->io_hdr.io_type == CTL_IO_SCSI) && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; retval = ctl_ioctl_submit_wait(io); if (retval != 0) { ctl_free_io(io); break; } memcpy((void *)addr, io, sizeof(*io)); /* return this to our pool */ ctl_free_io(io); break; } case CTL_ENABLE_PORT: case CTL_DISABLE_PORT: case CTL_SET_PORT_WWNS: { struct ctl_port *port; struct ctl_port_entry *entry; entry = (struct ctl_port_entry *)addr; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { int action, done; action = 0; done = 0; if ((entry->port_type == CTL_PORT_NONE) && (entry->targ_port == port->targ_port)) { /* * If the user only wants to enable or * disable or set WWNs on a specific port, * do the operation and we're done. */ action = 1; done = 1; } else if (entry->port_type & port->port_type) { /* * Compare the user's type mask with the * particular frontend type to see if we * have a match. */ action = 1; done = 0; /* * Make sure the user isn't trying to set * WWNs on multiple ports at the same time. */ if (cmd == CTL_SET_PORT_WWNS) { printf("%s: Can't set WWNs on " "multiple ports\n", __func__); retval = EINVAL; break; } } if (action != 0) { /* * XXX KDM we have to drop the lock here, * because the online/offline operations * can potentially block. We need to * reference count the frontends so they * can't go away, */ mtx_unlock(&softc->ctl_lock); if (cmd == CTL_ENABLE_PORT) { struct ctl_lun *lun; STAILQ_FOREACH(lun, &softc->lun_list, links) { port->lun_enable(port->targ_lun_arg, lun->target, lun->lun); } ctl_port_online(port); } else if (cmd == CTL_DISABLE_PORT) { struct ctl_lun *lun; ctl_port_offline(port); STAILQ_FOREACH(lun, &softc->lun_list, links) { port->lun_disable( port->targ_lun_arg, lun->target, lun->lun); } } mtx_lock(&softc->ctl_lock); if (cmd == CTL_SET_PORT_WWNS) ctl_port_set_wwns(port, (entry->flags & CTL_PORT_WWNN_VALID) ? 1 : 0, entry->wwnn, (entry->flags & CTL_PORT_WWPN_VALID) ? 1 : 0, entry->wwpn); } if (done != 0) break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_PORT_LIST: { struct ctl_port *port; struct ctl_port_list *list; int i; list = (struct ctl_port_list *)addr; if (list->alloc_len != (list->alloc_num * sizeof(struct ctl_port_entry))) { printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " "alloc_num %u * sizeof(struct ctl_port_entry) " "%zu\n", __func__, list->alloc_len, list->alloc_num, sizeof(struct ctl_port_entry)); retval = EINVAL; break; } list->fill_len = 0; list->fill_num = 0; list->dropped_num = 0; i = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { struct ctl_port_entry entry, *list_entry; if (list->fill_num >= list->alloc_num) { list->dropped_num++; continue; } entry.port_type = port->port_type; strlcpy(entry.port_name, port->port_name, sizeof(entry.port_name)); entry.targ_port = port->targ_port; entry.physical_port = port->physical_port; entry.virtual_port = port->virtual_port; entry.wwnn = port->wwnn; entry.wwpn = port->wwpn; if (port->status & CTL_PORT_STATUS_ONLINE) entry.online = 1; else entry.online = 0; list_entry = &list->entries[i]; retval = copyout(&entry, list_entry, sizeof(entry)); if (retval != 0) { printf("%s: CTL_GET_PORT_LIST: copyout " "returned %d\n", __func__, retval); break; } i++; list->fill_num++; list->fill_len += sizeof(entry); } mtx_unlock(&softc->ctl_lock); /* * If this is non-zero, we had a copyout fault, so there's * probably no point in attempting to set the status inside * the structure. */ if (retval != 0) break; if (list->dropped_num > 0) list->status = CTL_PORT_LIST_NEED_MORE_SPACE; else list->status = CTL_PORT_LIST_OK; break; } case CTL_DUMP_OOA: { struct ctl_lun *lun; union ctl_io *io; char printbuf[128]; struct sbuf sb; mtx_lock(&softc->ctl_lock); printf("Dumping OOA queues:\n"); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); for (io = (union ctl_io *)TAILQ_FIRST( &lun->ooa_queue); io != NULL; io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, ooa_links)) { sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", (intmax_t)lun->lun, io->scsiio.tag_num, (io->io_hdr.flags & CTL_FLAG_BLOCKED) ? "" : " BLOCKED", (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) ? " DMA" : "", (io->io_hdr.flags & CTL_FLAG_ABORT) ? " ABORT" : "", (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_finish(&sb); printf("%s\n", sbuf_data(&sb)); } mtx_unlock(&lun->lun_lock); } printf("OOA queues dump done\n"); mtx_unlock(&softc->ctl_lock); break; } case CTL_GET_OOA: { struct ctl_lun *lun; struct ctl_ooa *ooa_hdr; struct ctl_ooa_entry *entries; uint32_t cur_fill_num; ooa_hdr = (struct ctl_ooa *)addr; if ((ooa_hdr->alloc_len == 0) || (ooa_hdr->alloc_num == 0)) { printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " "must be non-zero\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num); retval = EINVAL; break; } if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * sizeof(struct ctl_ooa_entry))) { printf("%s: CTL_GET_OOA: alloc len %u must be alloc " "num %d * sizeof(struct ctl_ooa_entry) %zd\n", __func__, ooa_hdr->alloc_len, ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); retval = EINVAL; break; } entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); if (entries == NULL) { printf("%s: could not allocate %d bytes for OOA " "dump\n", __func__, ooa_hdr->alloc_len); retval = ENOMEM; break; } mtx_lock(&softc->ctl_lock); if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); printf("%s: CTL_GET_OOA: invalid LUN %ju\n", __func__, (uintmax_t)ooa_hdr->lun_num); retval = EINVAL; break; } cur_fill_num = 0; if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { STAILQ_FOREACH(lun, &softc->lun_list, links) { retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, ooa_hdr, entries); if (retval != 0) break; } if (retval != 0) { mtx_unlock(&softc->ctl_lock); free(entries, M_CTL); break; } } else { lun = softc->ctl_luns[ooa_hdr->lun_num]; retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, entries); } mtx_unlock(&softc->ctl_lock); ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); ooa_hdr->fill_len = ooa_hdr->fill_num * sizeof(struct ctl_ooa_entry); retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); if (retval != 0) { printf("%s: error copying out %d bytes for OOA dump\n", __func__, ooa_hdr->fill_len); } getbintime(&ooa_hdr->cur_bt); if (cur_fill_num > ooa_hdr->alloc_num) { ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; } else { ooa_hdr->dropped_num = 0; ooa_hdr->status = CTL_OOA_OK; } free(entries, M_CTL); break; } case CTL_CHECK_OOA: { union ctl_io *io; struct ctl_lun *lun; struct ctl_ooa_info *ooa_info; ooa_info = (struct ctl_ooa_info *)addr; if (ooa_info->lun_id >= CTL_MAX_LUNS) { ooa_info->status = CTL_OOA_INVALID_LUN; break; } mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[ooa_info->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); ooa_info->status = CTL_OOA_INVALID_LUN; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); ooa_info->num_entries = 0; for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL; io = (union ctl_io *)TAILQ_NEXT( &io->io_hdr, ooa_links)) { ooa_info->num_entries++; } mtx_unlock(&lun->lun_lock); ooa_info->status = CTL_OOA_SUCCESS; break; } case CTL_HARD_START: case CTL_HARD_STOP: { struct ctl_fe_ioctl_startstop_info ss_info; struct cfi_metatask *metatask; struct mtx hs_mtx; mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); cv_init(&ss_info.sem, "hard start/stop cv" ); metatask = cfi_alloc_metatask(/*can_wait*/ 1); if (metatask == NULL) { retval = ENOMEM; mtx_destroy(&hs_mtx); break; } if (cmd == CTL_HARD_START) metatask->tasktype = CFI_TASK_STARTUP; else metatask->tasktype = CFI_TASK_SHUTDOWN; metatask->callback = ctl_ioctl_hard_startstop_callback; metatask->callback_arg = &ss_info; cfi_action(metatask); /* Wait for the callback */ mtx_lock(&hs_mtx); cv_wait_sig(&ss_info.sem, &hs_mtx); mtx_unlock(&hs_mtx); /* * All information has been copied from the metatask by the * time cv_broadcast() is called, so we free the metatask here. */ cfi_free_metatask(metatask); memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); mtx_destroy(&hs_mtx); break; } case CTL_BBRREAD: { struct ctl_bbrread_info *bbr_info; struct ctl_fe_ioctl_bbrread_info fe_bbr_info; struct mtx bbr_mtx; struct cfi_metatask *metatask; bbr_info = (struct ctl_bbrread_info *)addr; bzero(&fe_bbr_info, sizeof(fe_bbr_info)); bzero(&bbr_mtx, sizeof(bbr_mtx)); mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); fe_bbr_info.bbr_info = bbr_info; fe_bbr_info.lock = &bbr_mtx; cv_init(&fe_bbr_info.sem, "BBR read cv"); metatask = cfi_alloc_metatask(/*can_wait*/ 1); if (metatask == NULL) { mtx_destroy(&bbr_mtx); cv_destroy(&fe_bbr_info.sem); retval = ENOMEM; break; } metatask->tasktype = CFI_TASK_BBRREAD; metatask->callback = ctl_ioctl_bbrread_callback; metatask->callback_arg = &fe_bbr_info; metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; metatask->taskinfo.bbrread.lba = bbr_info->lba; metatask->taskinfo.bbrread.len = bbr_info->len; cfi_action(metatask); mtx_lock(&bbr_mtx); while (fe_bbr_info.wakeup_done == 0) cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); mtx_unlock(&bbr_mtx); bbr_info->status = metatask->status; bbr_info->bbr_status = metatask->taskinfo.bbrread.status; bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; memcpy(&bbr_info->sense_data, &metatask->taskinfo.bbrread.sense_data, MIN(sizeof(bbr_info->sense_data), sizeof(metatask->taskinfo.bbrread.sense_data))); cfi_free_metatask(metatask); mtx_destroy(&bbr_mtx); cv_destroy(&fe_bbr_info.sem); break; } case CTL_DELAY_IO: { struct ctl_io_delay_info *delay_info; #ifdef CTL_IO_DELAY struct ctl_lun *lun; #endif /* CTL_IO_DELAY */ delay_info = (struct ctl_io_delay_info *)addr; #ifdef CTL_IO_DELAY mtx_lock(&softc->ctl_lock); if ((delay_info->lun_id >= CTL_MAX_LUNS) || (softc->ctl_luns[delay_info->lun_id] == NULL)) { delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; } else { lun = softc->ctl_luns[delay_info->lun_id]; mtx_lock(&lun->lun_lock); delay_info->status = CTL_DELAY_STATUS_OK; switch (delay_info->delay_type) { case CTL_DELAY_TYPE_CONT: break; case CTL_DELAY_TYPE_ONESHOT: break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_TYPE; break; } switch (delay_info->delay_loc) { case CTL_DELAY_LOC_DATAMOVE: lun->delay_info.datamove_type = delay_info->delay_type; lun->delay_info.datamove_delay = delay_info->delay_secs; break; case CTL_DELAY_LOC_DONE: lun->delay_info.done_type = delay_info->delay_type; lun->delay_info.done_delay = delay_info->delay_secs; break; default: delay_info->status = CTL_DELAY_STATUS_INVALID_LOC; break; } mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); #else delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; #endif /* CTL_IO_DELAY */ break; } case CTL_REALSYNC_SET: { int *syncstate; syncstate = (int *)addr; mtx_lock(&softc->ctl_lock); switch (*syncstate) { case 0: softc->flags &= ~CTL_FLAG_REAL_SYNC; break; case 1: softc->flags |= CTL_FLAG_REAL_SYNC; break; default: retval = EINVAL; break; } mtx_unlock(&softc->ctl_lock); break; } case CTL_REALSYNC_GET: { int *syncstate; syncstate = (int*)addr; mtx_lock(&softc->ctl_lock); if (softc->flags & CTL_FLAG_REAL_SYNC) *syncstate = 1; else *syncstate = 0; mtx_unlock(&softc->ctl_lock); break; } case CTL_SETSYNC: case CTL_GETSYNC: { struct ctl_sync_info *sync_info; struct ctl_lun *lun; sync_info = (struct ctl_sync_info *)addr; mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[sync_info->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); sync_info->status = CTL_GS_SYNC_NO_LUN; } /* * Get or set the sync interval. We're not bounds checking * in the set case, hopefully the user won't do something * silly. */ mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (cmd == CTL_GETSYNC) sync_info->sync_interval = lun->sync_interval; else lun->sync_interval = sync_info->sync_interval; mtx_unlock(&lun->lun_lock); sync_info->status = CTL_GS_SYNC_OK; break; } case CTL_GETSTATS: { struct ctl_stats *stats; struct ctl_lun *lun; int i; stats = (struct ctl_stats *)addr; if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > stats->alloc_len) { stats->status = CTL_SS_NEED_MORE_SPACE; stats->num_luns = softc->num_luns; break; } /* * XXX KDM no locking here. If the LUN list changes, * things can blow up. */ for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; i++, lun = STAILQ_NEXT(lun, links)) { retval = copyout(&lun->stats, &stats->lun_stats[i], sizeof(lun->stats)); if (retval != 0) break; } stats->num_luns = softc->num_luns; stats->fill_len = sizeof(struct ctl_lun_io_stats) * softc->num_luns; stats->status = CTL_SS_OK; #ifdef CTL_TIME_IO stats->flags = CTL_STATS_FLAG_TIME_VALID; #else stats->flags = CTL_STATS_FLAG_NONE; #endif getnanouptime(&stats->timestamp); break; } case CTL_ERROR_INJECT: { struct ctl_error_desc *err_desc, *new_err_desc; struct ctl_lun *lun; err_desc = (struct ctl_error_desc *)addr; new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, M_WAITOK | M_ZERO); bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[err_desc->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); free(new_err_desc, M_CTL); printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", __func__, (uintmax_t)err_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * We could do some checking here to verify the validity * of the request, but given the complexity of error * injection requests, the checking logic would be fairly * complex. * * For now, if the request is invalid, it just won't get * executed and might get deleted. */ STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); /* * XXX KDM check to make sure the serial number is unique, * in case we somehow manage to wrap. That shouldn't * happen for a very long time, but it's the right thing to * do. */ new_err_desc->serial = lun->error_serial; err_desc->serial = lun->error_serial; lun->error_serial++; mtx_unlock(&lun->lun_lock); break; } case CTL_ERROR_INJECT_DELETE: { struct ctl_error_desc *delete_desc, *desc, *desc2; struct ctl_lun *lun; int delete_done; delete_desc = (struct ctl_error_desc *)addr; delete_done = 0; mtx_lock(&softc->ctl_lock); lun = softc->ctl_luns[delete_desc->lun_id]; if (lun == NULL) { mtx_unlock(&softc->ctl_lock); printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", __func__, (uintmax_t)delete_desc->lun_id); retval = EINVAL; break; } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { if (desc->serial != delete_desc->serial) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); delete_done = 1; } mtx_unlock(&lun->lun_lock); if (delete_done == 0) { printf("%s: CTL_ERROR_INJECT_DELETE: can't find " "error serial %ju on LUN %u\n", __func__, delete_desc->serial, delete_desc->lun_id); retval = EINVAL; break; } break; } case CTL_DUMP_STRUCTS: { int i, j, k; struct ctl_port *port; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); printf("CTL Persistent Reservation information start:\n"); for (i = 0; i < CTL_MAX_LUNS; i++) { struct ctl_lun *lun; lun = softc->ctl_luns[i]; if ((lun == NULL) || ((lun->flags & CTL_LUN_DISABLED) != 0)) continue; for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { if (lun->pr_keys[j] == NULL) continue; for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ if (lun->pr_keys[j][k] == 0) continue; printf(" LUN %d port %d iid %d key " "%#jx\n", i, j, k, (uintmax_t)lun->pr_keys[j][k]); } } } printf("CTL Persistent Reservation information end\n"); printf("CTL Ports:\n"); STAILQ_FOREACH(port, &softc->port_list, links) { printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " "%#jx WWPN %#jx\n", port->targ_port, port->port_name, port->frontend->name, port->port_type, port->physical_port, port->virtual_port, (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 && port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL) continue; printf(" iid %u use %d WWPN %#jx '%s'\n", j, port->wwpn_iid[j].in_use, (uintmax_t)port->wwpn_iid[j].wwpn, port->wwpn_iid[j].name); } } printf("CTL Port information end\n"); mtx_unlock(&softc->ctl_lock); /* * XXX KDM calling this without a lock. We'd likely want * to drop the lock before calling the frontend's dump * routine anyway. */ printf("CTL Frontends:\n"); STAILQ_FOREACH(fe, &softc->fe_list, links) { printf(" Frontend '%s'\n", fe->name); if (fe->fe_dump != NULL) fe->fe_dump(); } printf("CTL Frontend information end\n"); break; } case CTL_LUN_REQ: { struct ctl_lun_req *lun_req; struct ctl_backend_driver *backend; lun_req = (struct ctl_lun_req *)addr; backend = ctl_backend_find(lun_req->backend); if (backend == NULL) { lun_req->status = CTL_LUN_ERROR; snprintf(lun_req->error_str, sizeof(lun_req->error_str), "Backend \"%s\" not found.", lun_req->backend); break; } if (lun_req->num_be_args > 0) { lun_req->kern_be_args = ctl_copyin_args( lun_req->num_be_args, lun_req->be_args, lun_req->error_str, sizeof(lun_req->error_str)); if (lun_req->kern_be_args == NULL) { lun_req->status = CTL_LUN_ERROR; break; } } retval = backend->ioctl(dev, cmd, addr, flag, td); if (lun_req->num_be_args > 0) { ctl_copyout_args(lun_req->num_be_args, lun_req->kern_be_args); ctl_free_args(lun_req->num_be_args, lun_req->kern_be_args); } break; } case CTL_LUN_LIST: { struct sbuf *sb; struct ctl_lun *lun; struct ctl_lun_list *list; struct ctl_option *opt; list = (struct ctl_lun_list *)addr; /* * Allocate a fixed length sbuf here, based on the length * of the user's buffer. We could allocate an auto-extending * buffer, and then tell the user how much larger our * amount of data is than his buffer, but that presents * some problems: * * 1. The sbuf(9) routines use a blocking malloc, and so * we can't hold a lock while calling them with an * auto-extending buffer. * * 2. There is not currently a LUN reference counting * mechanism, outside of outstanding transactions on * the LUN's OOA queue. So a LUN could go away on us * while we're getting the LUN number, backend-specific * information, etc. Thus, given the way things * currently work, we need to hold the CTL lock while * grabbing LUN information. * * So, from the user's standpoint, the best thing to do is * allocate what he thinks is a reasonable buffer length, * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, * double the buffer length and try again. (And repeat * that until he succeeds.) */ sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); retval = sbuf_printf(sb, "\n", (uintmax_t)lun->lun); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", (lun->backend == NULL) ? "none" : lun->backend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", lun->be_lun->lun_type); if (retval != 0) break; if (lun->backend == NULL) { retval = sbuf_printf(sb, "\n"); if (retval != 0) break; continue; } retval = sbuf_printf(sb, "\t%ju\n", (lun->be_lun->maxlba > 0) ? lun->be_lun->maxlba + 1 : 0); if (retval != 0) break; retval = sbuf_printf(sb, "\t%u\n", lun->be_lun->blocksize); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->serial_num, sizeof(lun->be_lun->serial_num)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; retval = sbuf_printf(sb, "\t"); if (retval != 0) break; retval = ctl_sbuf_printf_esc(sb, lun->be_lun->device_id, sizeof(lun->be_lun->device_id)); if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; if (lun->backend->lun_info != NULL) { retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &lun->be_lun->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } retval = sbuf_printf(sb, "\n"); if (retval != 0) break; mtx_unlock(&lun->lun_lock); } if (lun != NULL) mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } case CTL_ISCSI: { struct ctl_iscsi *ci; struct ctl_frontend *fe; ci = (struct ctl_iscsi *)addr; fe = ctl_frontend_find("iscsi"); if (fe == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Frontend \"iscsi\" not found."); break; } retval = fe->ioctl(dev, cmd, addr, flag, td); break; } case CTL_PORT_REQ: { struct ctl_req *req; struct ctl_frontend *fe; req = (struct ctl_req *)addr; fe = ctl_frontend_find(req->driver); if (fe == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Frontend \"%s\" not found.", req->driver); break; } if (req->num_args > 0) { req->kern_args = ctl_copyin_args(req->num_args, req->args, req->error_str, sizeof(req->error_str)); if (req->kern_args == NULL) { req->status = CTL_LUN_ERROR; break; } } retval = fe->ioctl(dev, cmd, addr, flag, td); if (req->num_args > 0) { ctl_copyout_args(req->num_args, req->kern_args); ctl_free_args(req->num_args, req->kern_args); } break; } case CTL_PORT_LIST: { struct sbuf *sb; struct ctl_port *port; struct ctl_lun_list *list; struct ctl_option *opt; int j; + uint32_t plun; list = (struct ctl_lun_list *)addr; sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { list->status = CTL_LUN_LIST_ERROR; snprintf(list->error_str, sizeof(list->error_str), "Unable to allocate %d bytes for LUN list", list->alloc_len); break; } sbuf_printf(sb, "\n"); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { retval = sbuf_printf(sb, "\n", (uintmax_t)port->targ_port); /* * Bail out as soon as we see that we've overfilled * the buffer. */ if (retval != 0) break; retval = sbuf_printf(sb, "\t%s" "\n", port->frontend->name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->port_type); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); if (retval != 0) break; retval = sbuf_printf(sb, "\t%s\n", port->port_name); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->physical_port); if (retval != 0) break; retval = sbuf_printf(sb, "\t%d\n", port->virtual_port); if (retval != 0) break; if (port->target_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->target_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_devid != NULL) { sbuf_printf(sb, "\t"); ctl_id_sbuf(port->port_devid, sb); sbuf_printf(sb, "\n"); } if (port->port_info != NULL) { retval = port->port_info(port->onoff_arg, sb); if (retval != 0) break; } STAILQ_FOREACH(opt, &port->options, links) { retval = sbuf_printf(sb, "\t<%s>%s\n", opt->name, opt->value, opt->name); if (retval != 0) break; } + if (port->lun_map != NULL) { + sbuf_printf(sb, "\ton\n"); + for (j = 0; j < CTL_MAX_LUNS; j++) { + plun = ctl_lun_map_from_port(port, j); + if (plun >= CTL_MAX_LUNS) + continue; + sbuf_printf(sb, + "\t%u\n", + j, plun); + } + } + for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { if (port->wwpn_iid[j].in_use == 0 || (port->wwpn_iid[j].wwpn == 0 && port->wwpn_iid[j].name == NULL)) continue; if (port->wwpn_iid[j].name != NULL) retval = sbuf_printf(sb, "\t%s\n", j, port->wwpn_iid[j].name); else retval = sbuf_printf(sb, "\tnaa.%08jx\n", j, port->wwpn_iid[j].wwpn); if (retval != 0) break; } if (retval != 0) break; retval = sbuf_printf(sb, "\n"); if (retval != 0) break; } mtx_unlock(&softc->ctl_lock); if ((retval != 0) || ((retval = sbuf_printf(sb, "\n")) != 0)) { retval = 0; sbuf_delete(sb); list->status = CTL_LUN_LIST_NEED_MORE_SPACE; snprintf(list->error_str, sizeof(list->error_str), "Out of space, %d bytes is too small", list->alloc_len); break; } sbuf_finish(sb); retval = copyout(sbuf_data(sb), list->lun_xml, sbuf_len(sb) + 1); list->fill_len = sbuf_len(sb) + 1; list->status = CTL_LUN_LIST_OK; sbuf_delete(sb); break; } + case CTL_LUN_MAP: { + struct ctl_lun_map *lm = (struct ctl_lun_map *)addr; + struct ctl_port *port; + + mtx_lock(&softc->ctl_lock); + if (lm->port >= CTL_MAX_PORTS || + (port = softc->ctl_ports[lm->port]) == NULL) { + mtx_unlock(&softc->ctl_lock); + return (ENXIO); + } + if (lm->plun < CTL_MAX_LUNS) { + if (lm->lun == UINT32_MAX) + retval = ctl_lun_map_unset(port, lm->plun); + else if (lm->lun < CTL_MAX_LUNS && + softc->ctl_luns[lm->lun] != NULL) + retval = ctl_lun_map_set(port, lm->plun, lm->lun); + else { + mtx_unlock(&softc->ctl_lock); + return (ENXIO); + } + } else if (lm->plun == UINT32_MAX) { + if (lm->lun == UINT32_MAX) + retval = ctl_lun_map_deinit(port); + else + retval = ctl_lun_map_init(port); + } else { + mtx_unlock(&softc->ctl_lock); + return (ENXIO); + } + mtx_unlock(&softc->ctl_lock); + break; + } default: { /* XXX KDM should we fix this? */ #if 0 struct ctl_backend_driver *backend; unsigned int type; int found; found = 0; /* * We encode the backend type as the ioctl type for backend * ioctls. So parse it out here, and then search for a * backend of this type. */ type = _IOC_TYPE(cmd); STAILQ_FOREACH(backend, &softc->be_list, links) { if (backend->type == type) { found = 1; break; } } if (found == 0) { printf("ctl: unknown ioctl command %#lx or backend " "%d\n", cmd, type); retval = EINVAL; break; } retval = backend->ioctl(dev, cmd, addr, flag, td); #endif retval = ENOTTY; break; } } return (retval); } uint32_t ctl_get_initindex(struct ctl_nexus *nexus) { if (nexus->targ_port < CTL_MAX_PORTS) return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); else return (nexus->initid.id + ((nexus->targ_port - CTL_MAX_PORTS) * CTL_MAX_INIT_PER_PORT)); } uint32_t ctl_get_resindex(struct ctl_nexus *nexus) { return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); } uint32_t ctl_port_idx(int port_num) { if (port_num < CTL_MAX_PORTS) return(port_num); else return(port_num - CTL_MAX_PORTS); } -static uint32_t -ctl_map_lun(struct ctl_softc *softc, int port_num, uint32_t lun_id) +int +ctl_lun_map_init(struct ctl_port *port) { - struct ctl_port *port; + uint32_t i; - port = softc->ctl_ports[ctl_port_idx(port_num)]; + if (port->lun_map == NULL) + port->lun_map = malloc(sizeof(uint32_t) * CTL_MAX_LUNS, + M_CTL, M_NOWAIT); + if (port->lun_map == NULL) + return (ENOMEM); + for (i = 0; i < CTL_MAX_LUNS; i++) + port->lun_map[i] = UINT32_MAX; + return (0); +} + +int +ctl_lun_map_deinit(struct ctl_port *port) +{ + + if (port->lun_map == NULL) + return (0); + free(port->lun_map, M_CTL); + port->lun_map = NULL; + return (0); +} + +int +ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun) +{ + int status; + + if (port->lun_map == NULL) { + status = ctl_lun_map_init(port); + if (status != 0) + return (status); + } + port->lun_map[plun] = glun; + return (0); +} + +int +ctl_lun_map_unset(struct ctl_port *port, uint32_t plun) +{ + + if (port->lun_map == NULL) + return (0); + port->lun_map[plun] = UINT32_MAX; + return (0); +} + +int +ctl_lun_map_unsetg(struct ctl_port *port, uint32_t glun) +{ + int i; + + if (port->lun_map == NULL) + return (0); + for (i = 0; i < CTL_MAX_LUNS; i++) { + if (port->lun_map[i] == glun) + port->lun_map[i] = UINT32_MAX; + } + return (0); +} + +uint32_t +ctl_lun_map_from_port(struct ctl_port *port, uint32_t lun_id) +{ + if (port == NULL) return (UINT32_MAX); - if (port->lun_map == NULL) + if (port->lun_map == NULL || lun_id >= CTL_MAX_LUNS) return (lun_id); - return (port->lun_map(port->targ_lun_arg, lun_id)); + return (port->lun_map[lun_id]); } -static uint32_t -ctl_map_lun_back(struct ctl_softc *softc, int port_num, uint32_t lun_id) +uint32_t +ctl_lun_map_to_port(struct ctl_port *port, uint32_t lun_id) { - struct ctl_port *port; uint32_t i; - port = softc->ctl_ports[ctl_port_idx(port_num)]; + if (port == NULL) + return (UINT32_MAX); if (port->lun_map == NULL) return (lun_id); for (i = 0; i < CTL_MAX_LUNS; i++) { - if (port->lun_map(port->targ_lun_arg, i) == lun_id) + if (port->lun_map[i] == lun_id) return (i); } return (UINT32_MAX); } +static struct ctl_port * +ctl_io_port(struct ctl_io_hdr *io_hdr) +{ + int port_num; + + port_num = io_hdr->nexus.targ_port; + return (control_softc->ctl_ports[ctl_port_idx(port_num)]); +} + /* * Note: This only works for bitmask sizes that are at least 32 bits, and * that are a power of 2. */ int ctl_ffz(uint32_t *mask, uint32_t size) { uint32_t num_chunks, num_pieces; int i, j; num_chunks = (size >> 5); if (num_chunks == 0) num_chunks++; num_pieces = MIN((sizeof(uint32_t) * 8), size); for (i = 0; i < num_chunks; i++) { for (j = 0; j < num_pieces; j++) { if ((mask[i] & (1 << j)) == 0) return ((i << 5) + j); } } return (-1); } int ctl_set_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) != 0) return (-1); else mask[chunk] |= (1 << piece); return (0); } int ctl_clear_mask(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (-1); else mask[chunk] &= ~(1 << piece); return (0); } int ctl_is_set(uint32_t *mask, uint32_t bit) { uint32_t chunk, piece; chunk = bit >> 5; piece = bit % (sizeof(uint32_t) * 8); if ((mask[chunk] & (1 << piece)) == 0) return (0); else return (1); } static uint64_t ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return (0); return (t[residx % CTL_MAX_INIT_PER_PORT]); } static void ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; if (t == NULL) return; t[residx % CTL_MAX_INIT_PER_PORT] = 0; } static void ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) { uint64_t *p; u_int i; i = residx/CTL_MAX_INIT_PER_PORT; if (lun->pr_keys[i] != NULL) return; mtx_unlock(&lun->lun_lock); p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, M_WAITOK | M_ZERO); mtx_lock(&lun->lun_lock); if (lun->pr_keys[i] == NULL) lun->pr_keys[i] = p; else free(p, M_CTL); } static void ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) { uint64_t *t; t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; KASSERT(t != NULL, ("prkey %d is not allocated", residx)); t[residx % CTL_MAX_INIT_PER_PORT] = key; } /* * ctl_softc, pool_name, total_ctl_io are passed in. * npool is passed out. */ int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool) { #ifdef IO_POOLS struct ctl_io_pool *pool; pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, M_NOWAIT | M_ZERO); if (pool == NULL) return (ENOMEM); snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); pool->ctl_softc = ctl_softc; pool->zone = uma_zsecond_create(pool->name, NULL, NULL, NULL, NULL, ctl_softc->io_zone); /* uma_prealloc(pool->zone, total_ctl_io); */ *npool = pool; #else *npool = ctl_softc->io_zone; #endif return (0); } void ctl_pool_free(struct ctl_io_pool *pool) { if (pool == NULL) return; #ifdef IO_POOLS uma_zdestroy(pool->zone); free(pool, M_CTL); #endif } union ctl_io * ctl_alloc_io(void *pool_ref) { union ctl_io *io; #ifdef IO_POOLS struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; io = uma_zalloc(pool->zone, M_WAITOK); #else io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); #endif if (io != NULL) io->io_hdr.pool = pool_ref; return (io); } union ctl_io * ctl_alloc_io_nowait(void *pool_ref) { union ctl_io *io; #ifdef IO_POOLS struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; io = uma_zalloc(pool->zone, M_NOWAIT); #else io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); #endif if (io != NULL) io->io_hdr.pool = pool_ref; return (io); } void ctl_free_io(union ctl_io *io) { #ifdef IO_POOLS struct ctl_io_pool *pool; #endif if (io == NULL) return; #ifdef IO_POOLS pool = (struct ctl_io_pool *)io->io_hdr.pool; uma_zfree(pool->zone, io); #else uma_zfree((uma_zone_t)io->io_hdr.pool, io); #endif } void ctl_zero_io(union ctl_io *io) { void *pool_ref; if (io == NULL) return; /* * May need to preserve linked list pointers at some point too. */ pool_ref = io->io_hdr.pool; memset(io, 0, sizeof(*io)); io->io_hdr.pool = pool_ref; } /* * This routine is currently used for internal copies of ctl_ios that need * to persist for some reason after we've already returned status to the * FETD. (Thus the flag set.) * * XXX XXX * Note that this makes a blind copy of all fields in the ctl_io, except * for the pool reference. This includes any memory that has been * allocated! That memory will no longer be valid after done has been * called, so this would be VERY DANGEROUS for command that actually does * any reads or writes. Right now (11/7/2005), this is only used for immediate * start and stop commands, which don't transfer any data, so this is not a * problem. If it is used for anything else, the caller would also need to * allocate data buffer space and this routine would need to be modified to * copy the data buffer(s) as well. */ void ctl_copy_io(union ctl_io *src, union ctl_io *dest) { void *pool_ref; if ((src == NULL) || (dest == NULL)) return; /* * May need to preserve linked list pointers at some point too. */ pool_ref = dest->io_hdr.pool; memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); dest->io_hdr.pool = pool_ref; /* * We need to know that this is an internal copy, and doesn't need * to get passed back to the FETD that allocated it. */ dest->io_hdr.flags |= CTL_FLAG_INT_COPY; } int ctl_expand_number(const char *buf, uint64_t *num) { char *endptr; uint64_t number; unsigned shift; number = strtoq(buf, &endptr, 0); switch (tolower((unsigned char)*endptr)) { case 'e': shift = 60; break; case 'p': shift = 50; break; case 't': shift = 40; break; case 'g': shift = 30; break; case 'm': shift = 20; break; case 'k': shift = 10; break; case 'b': case '\0': /* No unit. */ *num = number; return (0); default: /* Unrecognized unit. */ return (-1); } if ((number << shift) >> shift != number) { /* Overflow */ return (-1); } *num = number << shift; return (0); } /* * This routine could be used in the future to load default and/or saved * mode page parameters for a particuar lun. */ static int ctl_init_page_index(struct ctl_lun *lun) { int i; struct ctl_page_index *page_index; const char *value; uint64_t ival; memcpy(&lun->mode_pages.index, page_index_template, sizeof(page_index_template)); for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { page_index = &lun->mode_pages.index[i]; /* * If this is a disk-only mode page, there's no point in * setting it up. For some pages, we have to have some * basic information about the disk in order to calculate the * mode page data. */ if ((lun->be_lun->lun_type != T_DIRECT) && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; switch (page_index->page_code & SMPH_PC_MASK) { case SMS_RW_ERROR_RECOVERY_PAGE: { if (page_index->subpage != SMS_SUBPAGE_PAGE_0) panic("subpage is incorrect!"); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], &rw_er_page_changeable, sizeof(rw_er_page_changeable)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], &rw_er_page_default, sizeof(rw_er_page_default)); memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], &rw_er_page_default, sizeof(rw_er_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rw_er_page; break; } case SMS_FORMAT_DEVICE_PAGE: { struct scsi_format_page *format_page; if (page_index->subpage != SMS_SUBPAGE_PAGE_0) panic("subpage is incorrect!"); /* * Sectors per track are set above. Bytes per * sector need to be set here on a per-LUN basis. */ memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[ CTL_PAGE_CHANGEABLE], &format_page_changeable, sizeof(format_page_changeable)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], &format_page_default, sizeof(format_page_default)); memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], &format_page_default, sizeof(format_page_default)); format_page = &lun->mode_pages.format_page[ CTL_PAGE_CURRENT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_DEFAULT]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); format_page = &lun->mode_pages.format_page[ CTL_PAGE_SAVED]; scsi_ulto2b(lun->be_lun->blocksize, format_page->bytes_per_sector); page_index->page_data = (uint8_t *)lun->mode_pages.format_page; break; } case SMS_RIGID_DISK_PAGE: { struct scsi_rigid_disk_page *rigid_disk_page; uint32_t sectors_per_cylinder; uint64_t cylinders; #ifndef __XSCALE__ int shift; #endif /* !__XSCALE__ */ if (page_index->subpage != SMS_SUBPAGE_PAGE_0) panic("invalid subpage value %d", page_index->subpage); /* * Rotation rate and sectors per track are set * above. We calculate the cylinders here based on * capacity. Due to the number of heads and * sectors per track we're using, smaller arrays * may turn out to have 0 cylinders. Linux and * FreeBSD don't pay attention to these mode pages * to figure out capacity, but Solaris does. It * seems to deal with 0 cylinders just fine, and * works out a fake geometry based on the capacity. */ memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT], &rigid_disk_page_default, sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[ CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, sizeof(rigid_disk_page_changeable)); sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * CTL_DEFAULT_HEADS; /* * The divide method here will be more accurate, * probably, but results in floating point being * used in the kernel on i386 (__udivdi3()). On the * XScale, though, __udivdi3() is implemented in * software. * * The shift method for cylinder calculation is * accurate if sectors_per_cylinder is a power of * 2. Otherwise it might be slightly off -- you * might have a bit of a truncation problem. */ #ifdef __XSCALE__ cylinders = (lun->be_lun->maxlba + 1) / sectors_per_cylinder; #else for (shift = 31; shift > 0; shift--) { if (sectors_per_cylinder & (1 << shift)) break; } cylinders = (lun->be_lun->maxlba + 1) >> shift; #endif /* * We've basically got 3 bytes, or 24 bits for the * cylinder size in the mode page. If we're over, * just round down to 2^24. */ if (cylinders > 0xffffff) cylinders = 0xffffff; rigid_disk_page = &lun->mode_pages.rigid_disk_page[ CTL_PAGE_DEFAULT]; scsi_ulto3b(cylinders, rigid_disk_page->cylinders); if ((value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) { scsi_ulto2b(strtol(value, NULL, 0), rigid_disk_page->rotation_rate); } memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], sizeof(rigid_disk_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.rigid_disk_page; break; } case SMS_CACHING_PAGE: { struct scsi_caching_page *caching_page; if (page_index->subpage != SMS_SUBPAGE_PAGE_0) panic("invalid subpage value %d", page_index->subpage); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], &caching_page_default, sizeof(caching_page_default)); memcpy(&lun->mode_pages.caching_page[ CTL_PAGE_CHANGEABLE], &caching_page_changeable, sizeof(caching_page_changeable)); memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], &caching_page_default, sizeof(caching_page_default)); caching_page = &lun->mode_pages.caching_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "writecache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 &= ~SCP_WCE; value = ctl_get_opt(&lun->be_lun->options, "readcache"); if (value != NULL && strcmp(value, "off") == 0) caching_page->flags1 |= SCP_RCD; memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], &lun->mode_pages.caching_page[CTL_PAGE_SAVED], sizeof(caching_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.caching_page; break; } case SMS_CONTROL_MODE_PAGE: { struct scsi_control_page *control_page; if (page_index->subpage != SMS_SUBPAGE_PAGE_0) panic("invalid subpage value %d", page_index->subpage); memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], &control_page_default, sizeof(control_page_default)); memcpy(&lun->mode_pages.control_page[ CTL_PAGE_CHANGEABLE], &control_page_changeable, sizeof(control_page_changeable)); memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], &control_page_default, sizeof(control_page_default)); control_page = &lun->mode_pages.control_page[ CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "reordering"); if (value != NULL && strcmp(value, "unrestricted") == 0) { control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; } memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], &lun->mode_pages.control_page[CTL_PAGE_SAVED], sizeof(control_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.control_page; break; } case SMS_INFO_EXCEPTIONS_PAGE: { switch (page_index->subpage) { case SMS_SUBPAGE_PAGE_0: memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[ CTL_PAGE_CHANGEABLE], &ie_page_changeable, sizeof(ie_page_changeable)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], &ie_page_default, sizeof(ie_page_default)); memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], &ie_page_default, sizeof(ie_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.ie_page; break; case 0x02: { struct ctl_logical_block_provisioning_page *page; memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], &lbp_page_default, sizeof(lbp_page_default)); memcpy(&lun->mode_pages.lbp_page[ CTL_PAGE_CHANGEABLE], &lbp_page_changeable, sizeof(lbp_page_changeable)); memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], &lbp_page_default, sizeof(lbp_page_default)); page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; value = ctl_get_opt(&lun->be_lun->options, "avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[0].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[0].count); } value = ctl_get_opt(&lun->be_lun->options, "used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[1].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[1].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-avail-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[2].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_DEC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[2].count); } value = ctl_get_opt(&lun->be_lun->options, "pool-used-threshold"); if (value != NULL && ctl_expand_number(value, &ival) == 0) { page->descr[3].flags |= SLBPPD_ENABLED | SLBPPD_ARMING_INC; if (lun->be_lun->blocksize) ival /= lun->be_lun->blocksize; else ival /= 512; scsi_ulto4b(ival >> CTL_LBP_EXPONENT, page->descr[3].count); } memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], sizeof(lbp_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.lbp_page; }} break; } case SMS_VENDOR_SPECIFIC_PAGE:{ switch (page_index->subpage) { case DBGCNF_SUBPAGE_CODE: { struct copan_debugconf_subpage *current_page, *saved_page; memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_CURRENT], &debugconf_page_default, sizeof(debugconf_page_default)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_CHANGEABLE], &debugconf_page_changeable, sizeof(debugconf_page_changeable)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_DEFAULT], &debugconf_page_default, sizeof(debugconf_page_default)); memcpy(&lun->mode_pages.debugconf_subpage[ CTL_PAGE_SAVED], &debugconf_page_default, sizeof(debugconf_page_default)); page_index->page_data = (uint8_t *)lun->mode_pages.debugconf_subpage; current_page = (struct copan_debugconf_subpage *) (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); saved_page = (struct copan_debugconf_subpage *) (page_index->page_data + (page_index->page_len * CTL_PAGE_SAVED)); break; } default: panic("invalid subpage value %d", page_index->subpage); break; } break; } default: panic("invalid page value %d", page_index->page_code & SMPH_PC_MASK); break; } } return (CTL_RETVAL_COMPLETE); } static int ctl_init_log_page_index(struct ctl_lun *lun) { struct ctl_page_index *page_index; int i, j, k, prev; memcpy(&lun->log_pages.index, log_page_index_template, sizeof(log_page_index_template)); prev = -1; for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* * If this is a disk-only mode page, there's no point in * setting it up. For some pages, we have to have some * basic information about the disk in order to calculate the * mode page data. */ if ((lun->be_lun->lun_type != T_DIRECT) && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && lun->backend->lun_attr == NULL) continue; if (page_index->page_code != prev) { lun->log_pages.pages_page[j] = page_index->page_code; prev = page_index->page_code; j++; } lun->log_pages.subpages_page[k*2] = page_index->page_code; lun->log_pages.subpages_page[k*2+1] = page_index->subpage; k++; } lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; lun->log_pages.index[0].page_len = j; lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; lun->log_pages.index[1].page_len = k * 2; lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; return (CTL_RETVAL_COMPLETE); } static int hex2bin(const char *str, uint8_t *buf, int buf_size) { int i; u_char c; memset(buf, 0, buf_size); while (isspace(str[0])) str++; if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) str += 2; buf_size *= 2; for (i = 0; str[i] != 0 && i < buf_size; i++) { c = str[i]; if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else break; if (c >= 16) break; if ((i & 1) == 0) buf[i / 2] |= (c << 4); else buf[i / 2] |= c; } return ((i + 1) / 2); } /* * LUN allocation. * * Requirements: * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he * wants us to allocate the LUN and he can block. * - ctl_softc is always set * - be_lun is set if the LUN has a backend (needed for disk LUNs) * * Returns 0 for success, non-zero (errno) for failure. */ static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, struct ctl_be_lun *const be_lun, struct ctl_id target_id) { struct ctl_lun *nlun, *lun; struct ctl_port *port; struct scsi_vpd_id_descriptor *desc; struct scsi_vpd_id_t10 *t10id; const char *eui, *naa, *scsiname, *vendor, *value; int lun_number, i, lun_malloced; int devidlen, idlen1, idlen2 = 0, len; if (be_lun == NULL) return (EINVAL); /* * We currently only support Direct Access or Processor LUN types. */ switch (be_lun->lun_type) { case T_DIRECT: break; case T_PROCESSOR: break; case T_SEQUENTIAL: case T_CHANGER: default: be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); break; } if (ctl_lun == NULL) { lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); lun_malloced = 1; } else { lun_malloced = 0; lun = ctl_lun; } memset(lun, 0, sizeof(*lun)); if (lun_malloced) lun->flags = CTL_LUN_MALLOCED; /* Generate LUN ID. */ devidlen = max(CTL_DEVID_MIN_LEN, strnlen(be_lun->device_id, CTL_DEVID_LEN)); idlen1 = sizeof(*t10id) + devidlen; len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; scsiname = ctl_get_opt(&be_lun->options, "scsiname"); if (scsiname != NULL) { idlen2 = roundup2(strlen(scsiname) + 1, 4); len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; } eui = ctl_get_opt(&be_lun->options, "eui"); if (eui != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } naa = ctl_get_opt(&be_lun->options, "naa"); if (naa != NULL) { len += sizeof(struct scsi_vpd_id_descriptor) + 16; } lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; desc->proto_codeset = SVPD_ID_CODESET_ASCII; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; desc->length = idlen1; t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; memset(t10id->vendor, ' ', sizeof(t10id->vendor)); if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); } else { strncpy(t10id->vendor, vendor, min(sizeof(t10id->vendor), strlen(vendor))); } strncpy((char *)t10id->vendor_spec_id, (char *)be_lun->device_id, devidlen); if (scsiname != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen2; strlcpy(desc->identifier, scsiname, idlen2); } if (eui != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_EUI64; desc->length = hex2bin(eui, desc->identifier, 16); desc->length = desc->length > 12 ? 16 : (desc->length > 8 ? 12 : 8); len -= 16 - desc->length; } if (naa != NULL) { desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + desc->length); desc->proto_codeset = SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_NAA; desc->length = hex2bin(naa, desc->identifier, 16); desc->length = desc->length > 8 ? 16 : 8; len -= 16 - desc->length; } lun->lun_devid->len = len; mtx_lock(&ctl_softc->ctl_lock); /* * See if the caller requested a particular LUN number. If so, see * if it is available. Otherwise, allocate the first available LUN. */ if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { mtx_unlock(&ctl_softc->ctl_lock); if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { printf("ctl: requested LUN ID %d is higher " "than CTL_MAX_LUNS - 1 (%d)\n", be_lun->req_lun_id, CTL_MAX_LUNS - 1); } else { /* * XXX KDM return an error, or just assign * another LUN ID in this case?? */ printf("ctl: requested LUN ID %d is already " "in use\n", be_lun->req_lun_id); } if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); return (ENOSPC); } lun_number = be_lun->req_lun_id; } else { lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); if (lun_number == -1) { mtx_unlock(&ctl_softc->ctl_lock); printf("ctl: can't allocate LUN on target %ju, out of " "LUNs\n", (uintmax_t)target_id.id); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); be_lun->lun_config_status(be_lun->be_lun, CTL_LUN_CONFIG_FAILURE); return (ENOSPC); } } ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); lun->target = target_id; lun->lun = lun_number; lun->be_lun = be_lun; /* * The processor LUN is always enabled. Disk LUNs come on line * disabled, and must be enabled by the backend. */ lun->flags |= CTL_LUN_DISABLED; lun->backend = be_lun->be; be_lun->ctl_lun = lun; be_lun->lun_id = lun_number; atomic_add_int(&be_lun->be->num_luns, 1); if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) lun->flags |= CTL_LUN_OFFLINE; if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) lun->flags |= CTL_LUN_STOPPED; if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) lun->flags |= CTL_LUN_INOPERABLE; if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) lun->flags |= CTL_LUN_PRIMARY_SC; value = ctl_get_opt(&be_lun->options, "readonly"); if (value != NULL && strcmp(value, "on") == 0) lun->flags |= CTL_LUN_READONLY; lun->serseq = CTL_LUN_SERSEQ_OFF; if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) lun->serseq = CTL_LUN_SERSEQ_READ; value = ctl_get_opt(&be_lun->options, "serseq"); if (value != NULL && strcmp(value, "on") == 0) lun->serseq = CTL_LUN_SERSEQ_ON; else if (value != NULL && strcmp(value, "read") == 0) lun->serseq = CTL_LUN_SERSEQ_READ; else if (value != NULL && strcmp(value, "off") == 0) lun->serseq = CTL_LUN_SERSEQ_OFF; lun->ctl_softc = ctl_softc; TAILQ_INIT(&lun->ooa_queue); TAILQ_INIT(&lun->blocked_queue); STAILQ_INIT(&lun->error_list); ctl_tpc_lun_init(lun); /* * Initialize the mode and log page index. */ ctl_init_page_index(lun); ctl_init_log_page_index(lun); /* * Now, before we insert this lun on the lun list, set the lun * inventory changed UA for all other luns. */ STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); ctl_softc->ctl_luns[lun_number] = lun; ctl_softc->num_luns++; /* Setup statistics gathering */ lun->stats.device_type = be_lun->lun_type; lun->stats.lun_number = lun_number; if (lun->stats.device_type == T_DIRECT) lun->stats.blocksize = be_lun->blocksize; else lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; for (i = 0;i < CTL_MAX_PORTS;i++) lun->stats.ports[i].targ_port = i; mtx_unlock(&ctl_softc->ctl_lock); lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); /* * Run through each registered FETD and bring it online if it isn't * already. Enable the target ID if it hasn't been enabled, and * enable this particular LUN. */ STAILQ_FOREACH(port, &ctl_softc->port_list, links) { int retval; retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); if (retval != 0) { printf("ctl_alloc_lun: FETD %s port %d returned error " "%d for lun_enable on target %ju lun %d\n", port->port_name, port->targ_port, retval, (uintmax_t)target_id.id, lun_number); } else port->status |= CTL_PORT_STATUS_LUN_ONLINE; } return (0); } /* * Delete a LUN. * Assumptions: * - LUN has already been marked invalid and any pending I/O has been taken * care of. */ static int ctl_free_lun(struct ctl_lun *lun) { struct ctl_softc *softc; -#if 0 struct ctl_port *port; -#endif struct ctl_lun *nlun; int i; softc = lun->ctl_softc; mtx_assert(&softc->ctl_lock, MA_OWNED); + STAILQ_FOREACH(port, &softc->port_list, links) + ctl_lun_map_unsetg(port, lun->lun); + STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); ctl_clear_mask(softc->ctl_lun_mask, lun->lun); softc->ctl_luns[lun->lun] = NULL; if (!TAILQ_EMPTY(&lun->ooa_queue)) panic("Freeing a LUN %p with outstanding I/O!!\n", lun); softc->num_luns--; /* * XXX KDM this scheme only works for a single target/multiple LUN * setup. It needs to be revamped for a multiple target scheme. * * XXX KDM this results in port->lun_disable() getting called twice, * once when ctl_disable_lun() is called, and a second time here. * We really need to re-think the LUN disable semantics. There * should probably be several steps/levels to LUN removal: * - disable * - invalidate * - free * * Right now we only have a disable method when communicating to * the front end ports, at least for individual LUNs. */ #if 0 STAILQ_FOREACH(port, &softc->port_list, links) { int retval; retval = port->lun_disable(port->targ_lun_arg, lun->target, lun->lun); if (retval != 0) { printf("ctl_free_lun: FETD %s port %d returned error " "%d for lun_disable on target %ju lun %jd\n", port->port_name, port->targ_port, retval, (uintmax_t)lun->target.id, (intmax_t)lun->lun); } if (STAILQ_FIRST(&softc->lun_list) == NULL) { port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; retval = port->targ_disable(port->targ_lun_arg,lun->target); if (retval != 0) { printf("ctl_free_lun: FETD %s port %d " "returned error %d for targ_disable on " "target %ju\n", port->port_name, port->targ_port, retval, (uintmax_t)lun->target.id); } else port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) continue; #if 0 port->port_offline(port->onoff_arg); port->status &= ~CTL_PORT_STATUS_ONLINE; #endif } } #endif /* * Tell the backend to free resources, if this LUN has a backend. */ atomic_subtract_int(&lun->be_lun->be->num_luns, 1); lun->be_lun->lun_shutdown(lun->be_lun->be_lun); ctl_tpc_lun_shutdown(lun); mtx_destroy(&lun->lun_lock); free(lun->lun_devid, M_CTL); for (i = 0; i < CTL_MAX_PORTS; i++) free(lun->pending_ua[i], M_CTL); for (i = 0; i < 2 * CTL_MAX_PORTS; i++) free(lun->pr_keys[i], M_CTL); free(lun->write_buffer, M_CTL); if (lun->flags & CTL_LUN_MALLOCED) free(lun, M_CTL); STAILQ_FOREACH(nlun, &softc->lun_list, links) { mtx_lock(&nlun->lun_lock); ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); mtx_unlock(&nlun->lun_lock); } return (0); } static void ctl_create_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; softc = control_softc; /* * ctl_alloc_lun() should handle all potential failure cases. */ ctl_alloc_lun(softc, NULL, be_lun, softc->target); } int ctl_add_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc = control_softc; mtx_lock(&softc->ctl_lock); STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); mtx_unlock(&softc->ctl_lock); wakeup(&softc->pending_lun_queue); return (0); } int ctl_enable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port, *nport; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_DISABLED) == 0) { /* * eh? Why did we get called if the LUN is already * enabled? */ mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags &= ~CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { nport = STAILQ_NEXT(port, links); /* * Drop the lock while we call the FETD's enable routine. * This can lead to a callback into CTL (at least in the * case of the internal initiator frontend. */ mtx_unlock(&softc->ctl_lock); retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("%s: FETD %s port %d returned error " "%d for lun_enable on target %ju lun %jd\n", __func__, port->port_name, port->targ_port, retval, (uintmax_t)lun->target.id, (intmax_t)lun->lun); } #if 0 else { /* NOTE: TODO: why does lun enable affect port status? */ port->status |= CTL_PORT_STATUS_LUN_ONLINE; } #endif } mtx_unlock(&softc->ctl_lock); return (0); } int ctl_disable_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_port *port; struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&softc->ctl_lock); mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); return (0); } lun->flags |= CTL_LUN_DISABLED; mtx_unlock(&lun->lun_lock); STAILQ_FOREACH(port, &softc->port_list, links) { mtx_unlock(&softc->ctl_lock); /* * Drop the lock before we call the frontend's disable * routine, to avoid lock order reversals. * * XXX KDM what happens if the frontend list changes while * we're traversing it? It's unlikely, but should be handled. */ retval = port->lun_disable(port->targ_lun_arg, lun->target, lun->lun); mtx_lock(&softc->ctl_lock); if (retval != 0) { printf("ctl_alloc_lun: FETD %s port %d returned error " "%d for lun_disable on target %ju lun %jd\n", port->port_name, port->targ_port, retval, (uintmax_t)lun->target.id, (intmax_t)lun->lun); } } mtx_unlock(&softc->ctl_lock); return (0); } int ctl_start_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_stop_lun(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_STOPPED; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_offline(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_OFFLINE; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_online(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_OFFLINE; mtx_unlock(&lun->lun_lock); return (0); } int ctl_invalidate_lun(struct ctl_be_lun *be_lun) { struct ctl_softc *softc; struct ctl_lun *lun; lun = (struct ctl_lun *)be_lun->ctl_lun; softc = lun->ctl_softc; mtx_lock(&lun->lun_lock); /* * The LUN needs to be disabled before it can be marked invalid. */ if ((lun->flags & CTL_LUN_DISABLED) == 0) { mtx_unlock(&lun->lun_lock); return (-1); } /* * Mark the LUN invalid. */ lun->flags |= CTL_LUN_INVALID; /* * If there is nothing in the OOA queue, go ahead and free the LUN. * If we have something in the OOA queue, we'll free it when the * last I/O completes. */ if (TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); mtx_lock(&softc->ctl_lock); ctl_free_lun(lun); mtx_unlock(&softc->ctl_lock); } else mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_inoperable(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags |= CTL_LUN_INOPERABLE; mtx_unlock(&lun->lun_lock); return (0); } int ctl_lun_operable(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_INOPERABLE; mtx_unlock(&lun->lun_lock); return (0); } void ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) { struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; mtx_lock(&lun->lun_lock); ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); mtx_unlock(&lun->lun_lock); } /* * Backend "memory move is complete" callback for requests that never * make it down to say RAIDCore's configuration code. */ int ctl_config_move_done(union ctl_io *io) { int retval; CTL_DEBUG_PRINT(("ctl_config_move_done\n")); KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); if ((io->io_hdr.port_status != 0) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { /* * For hardware error sense keys, the sense key * specific value is defined to be a retry count, * but we use it to pass back an internal FETD * error code. XXX KDM Hopefully the FETD is only * using 16 bits for an error code, since that's * all the space we have in the sks field. */ ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ io->io_hdr.port_status); } if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { /* * XXX KDM just assuming a single pointer here, and not a * S/G list. If we start using S/G lists for config data, * we'll need to know how to clean them up here as well. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) free(io->scsiio.kern_data_ptr, M_CTL); ctl_done(io); retval = CTL_RETVAL_COMPLETE; } else { /* * XXX KDM now we need to continue data movement. Some * options: * - call ctl_scsiio() again? We don't do this for data * writes, because for those at least we know ahead of * time where the write will go and how long it is. For * config writes, though, that information is largely * contained within the write itself, thus we need to * parse out the data again. * * - Call some other function once the data is in? */ if (ctl_debug & CTL_DEBUG_CDB_DATA) ctl_data_print(io); /* * XXX KDM call ctl_scsiio() again for now, and check flag * bits to see whether we're allocated or not. */ retval = ctl_scsiio(&io->scsiio); } return (retval); } /* * This gets called by a backend driver when it is done with a * data_submit method. */ void ctl_data_submit_done(union ctl_io *io) { /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } ctl_done(io); } /* * This gets called by a backend driver when it is done with a * configuration write. */ void ctl_config_write_done(union ctl_io *io) { uint8_t *buf; /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. * * If there is an error, though, we don't want to keep processing. * Instead, just send status back to the initiator. */ if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { io->scsiio.io_cont(io); return; } /* * Since a configuration write can be done for commands that actually * have data allocated, like write buffer, and commands that have * no data, like start/stop unit, we need to check here. */ if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); } void ctl_config_read_done(union ctl_io *io) { uint8_t *buf; /* * If there is some error -- we are done, skip data transfer. */ if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) buf = io->scsiio.kern_data_ptr; else buf = NULL; ctl_done(io); if (buf) free(buf, M_CTL); return; } /* * If the IO_CONT flag is set, we need to call the supplied * function to continue processing the I/O, instead of completing * the I/O just yet. */ if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { io->scsiio.io_cont(io); return; } ctl_datamove(io); } /* * SCSI release command. */ int ctl_scsi_release(struct ctl_scsiio *ctsio) { int length, longid, thirdparty_id, resv_id; struct ctl_lun *lun; uint32_t residx; length = 0; resv_id = 0; CTL_DEBUG_PRINT(("ctl_scsi_release\n")); residx = ctl_get_resindex(&ctsio->io_hdr.nexus); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case RELEASE_10: { struct scsi_release_10 *cdb; cdb = (struct scsi_release_10 *)ctsio->cdb; if (cdb->byte2 & SR10_LONGID) longid = 1; else thirdparty_id = cdb->thirdparty_id; resv_id = cdb->resv_id; length = scsi_2btoul(cdb->length); break; } } /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ length = 0; if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (length > 0) thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); mtx_lock(&lun->lun_lock); /* * According to SPC, it is not an error for an intiator to attempt * to release a reservation on a LUN that isn't reserved, or that * is reserved by another initiator. The reservation can only be * released, though, by the initiator who made it or by one of * several reset type events. */ if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; mtx_unlock(&lun->lun_lock); if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_scsi_reserve(struct ctl_scsiio *ctsio) { int extent, thirdparty, longid; int resv_id, length; uint64_t thirdparty_id; struct ctl_lun *lun; uint32_t residx; extent = 0; thirdparty = 0; longid = 0; resv_id = 0; length = 0; thirdparty_id = 0; CTL_DEBUG_PRINT(("ctl_reserve\n")); residx = ctl_get_resindex(&ctsio->io_hdr.nexus); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case RESERVE_10: { struct scsi_reserve_10 *cdb; cdb = (struct scsi_reserve_10 *)ctsio->cdb; if (cdb->byte2 & SR10_LONGID) longid = 1; else thirdparty_id = cdb->thirdparty_id; resv_id = cdb->resv_id; length = scsi_2btoul(cdb->length); break; } } /* * XXX KDM right now, we only support LUN reservation. We don't * support 3rd party reservations, or extent reservations, which * might actually need the parameter list. If we've gotten this * far, we've got a LUN reservation. Anything else got kicked out * above. So, according to SPC, ignore the length. */ length = 0; if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (length > 0) thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { ctl_set_reservation_conflict(ctsio); goto bailout; } lun->flags |= CTL_LUN_RESERVED; lun->res_idx = residx; ctl_set_success(ctsio); bailout: mtx_unlock(&lun->lun_lock); if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_start_stop(struct ctl_scsiio *ctsio) { struct scsi_start_stop_unit *cdb; struct ctl_lun *lun; int retval; CTL_DEBUG_PRINT(("ctl_start_stop\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; retval = 0; cdb = (struct scsi_start_stop_unit *)ctsio->cdb; /* * XXX KDM * We don't support the immediate bit on a stop unit. In order to * do that, we would need to code up a way to know that a stop is * pending, and hold off any new commands until it completes, one * way or another. Then we could accept or reject those commands * depending on its status. We would almost need to do the reverse * of what we do below for an immediate start -- return the copy of * the ctl_io to the FETD with status to send to the host (and to * free the copy!) and then free the original I/O once the stop * actually completes. That way, the OOA queue mechanism can work * to block commands that shouldn't proceed. Another alternative * would be to put the copy in the queue in place of the original, * and return the original back to the caller. That could be * slightly safer.. */ if ((cdb->byte2 & SSS_IMMED) && ((cdb->how & SSS_START) == 0)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((lun->flags & CTL_LUN_PR_RESERVED) && ((cdb->how & SSS_START)==0)) { uint32_t residx; residx = ctl_get_resindex(&ctsio->io_hdr.nexus); if (ctl_get_prkey(lun, residx) == 0 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } /* * If there is no backend on this device, we can't start or stop * it. In theory we shouldn't get any start/stop commands in the * first place at this level if the LUN doesn't have a backend. * That should get stopped by the command decode code. */ if (lun->backend == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM Copan-specific offline behavior. * Figure out a reasonable way to port this? */ #ifdef NEEDTOPORT mtx_lock(&lun->lun_lock); if (((cdb->byte2 & SSS_ONOFFLINE) == 0) && (lun->flags & CTL_LUN_OFFLINE)) { /* * If the LUN is offline, and the on/offline bit isn't set, * reject the start or stop. Otherwise, let it through. */ mtx_unlock(&lun->lun_lock); ctl_set_lun_not_ready(ctsio); ctl_done((union ctl_io *)ctsio); } else { mtx_unlock(&lun->lun_lock); #endif /* NEEDTOPORT */ /* * This could be a start or a stop when we're online, * or a stop/offline or start/online. A start or stop when * we're offline is covered in the case above. */ /* * In the non-immediate case, we send the request to * the backend and return status to the user when * it is done. * * In the immediate case, we allocate a new ctl_io * to hold a copy of the request, and send that to * the backend. We then set good status on the * user's request and return it immediately. */ if (cdb->byte2 & SSS_IMMED) { union ctl_io *new_io; new_io = ctl_alloc_io(ctsio->io_hdr.pool); ctl_copy_io((union ctl_io *)ctsio, new_io); retval = lun->backend->config_write(new_io); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); } else { retval = lun->backend->config_write( (union ctl_io *)ctsio); } #ifdef NEEDTOPORT } #endif return (retval); } /* * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but * we don't really do anything with the LBA and length fields if the user * passes them in. Instead we'll just flush out the cache for the entire * LUN. */ int ctl_sync_cache(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_softc *softc; uint64_t starting_lba; uint32_t block_count; int retval; CTL_DEBUG_PRINT(("ctl_sync_cache\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retval = 0; switch (ctsio->cdb[0]) { case SYNCHRONIZE_CACHE: { struct scsi_sync_cache *cdb; cdb = (struct scsi_sync_cache *)ctsio->cdb; starting_lba = scsi_4btoul(cdb->begin_lba); block_count = scsi_2btoul(cdb->lb_count); break; } case SYNCHRONIZE_CACHE_16: { struct scsi_sync_cache_16 *cdb; cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; starting_lba = scsi_8btou64(cdb->begin_lba); block_count = scsi_4btoul(cdb->lb_count); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; break; /* NOTREACHED */ } /* * We check the LBA and length, but don't do anything with them. * A SYNCHRONIZE CACHE will cause the entire cache for this lun to * get flushed. This check will just help satisfy anyone who wants * to see an error for an out of range LBA. */ if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; } /* * If this LUN has no backend, we can't flush the cache anyway. */ if (lun->backend == NULL) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); goto bailout; } /* * Check to see whether we're configured to send the SYNCHRONIZE * CACHE command directly to the back end. */ mtx_lock(&lun->lun_lock); if ((softc->flags & CTL_FLAG_REAL_SYNC) && (++(lun->sync_count) >= lun->sync_interval)) { lun->sync_count = 0; mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); } else { mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); } bailout: return (retval); } int ctl_format(struct ctl_scsiio *ctsio) { struct scsi_format *cdb; struct ctl_lun *lun; int length, defect_list_len; CTL_DEBUG_PRINT(("ctl_format\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_format *)ctsio->cdb; length = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) length = sizeof(struct scsi_format_header_long); else length = sizeof(struct scsi_format_header_short); } if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) && (length > 0)) { ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); ctsio->kern_data_len = length; ctsio->kern_total_len = length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } defect_list_len = 0; if (cdb->byte2 & SF_FMTDATA) { if (cdb->byte2 & SF_LONGLIST) { struct scsi_format_header_long *header; header = (struct scsi_format_header_long *) ctsio->kern_data_ptr; defect_list_len = scsi_4btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } else { struct scsi_format_header_short *header; header = (struct scsi_format_header_short *) ctsio->kern_data_ptr; defect_list_len = scsi_2btoul(header->defect_list_len); if (defect_list_len != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); goto bailout; } } } /* * The format command will clear out the "Medium format corrupted" * status if set by the configuration code. That status is really * just a way to notify the host that we have lost the media, and * get them to issue a command that will basically make them think * they're blowing away the media. */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_INOPERABLE; mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); bailout: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_buffer(struct ctl_scsiio *ctsio) { struct scsi_read_buffer *cdb; struct ctl_lun *lun; int buffer_offset, len; static uint8_t descr[4]; static uint8_t echo_descr[4] = { 0 }; CTL_DEBUG_PRINT(("ctl_read_buffer\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_read_buffer *)ctsio->cdb; if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { descr[0] = 0; scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); ctsio->kern_data_ptr = descr; len = min(len, sizeof(descr)); } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { ctsio->kern_data_ptr = echo_descr; len = min(len, sizeof(echo_descr)); } else { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; } ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctl_set_success(ctsio); ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_buffer(struct ctl_scsiio *ctsio) { struct scsi_write_buffer *cdb; struct ctl_lun *lun; int buffer_offset, len; CTL_DEBUG_PRINT(("ctl_write_buffer\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_write_buffer *)ctsio->cdb; if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = scsi_3btoul(cdb->length); buffer_offset = scsi_3btoul(cdb->offset); if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { if (lun->write_buffer == NULL) { lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, M_CTL, M_WAITOK); } ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_write_same(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_write_same\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; switch (ctsio->cdb[0]) { case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); byte2 = cdb->byte2; break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); byte2 = cdb->byte2; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* NDOB and ANCHOR flags can be used only together with UNMAP */ if ((byte2 & SWS_UNMAP) == 0 && (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Zero number of blocks means "to the last logical block" */ if (num_blocks == 0) { if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 1, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } num_blocks = (lun->be_lun->maxlba + 1) - lba; } len = lun->be_lun->blocksize; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((byte2 & SWS_NDOB) == 0 && (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = byte2; retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); } int ctl_unmap(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_unmap *cdb; struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_header *hdr; struct scsi_unmap_desc *buf, *end, *endnz, *range; uint64_t lba; uint32_t num_blocks; int len, retval; uint8_t byte2; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_unmap\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_unmap *)ctsio->cdb; len = scsi_2btoul(cdb->length); byte2 = cdb->byte2; /* * If we've got a kernel request that hasn't been malloced yet, * malloc it and tell the caller the data buffer is here. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; ctsio->kern_data_len = len; ctsio->kern_total_len = len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } len = ctsio->kern_total_len - ctsio->kern_data_resid; hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; if (len < sizeof (*hdr) || len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 0, /*command*/ 0, /*field*/ 0, /*bit_valid*/ 0, /*bit*/ 0); goto done; } len = scsi_2btoul(hdr->desc_length); buf = (struct scsi_unmap_desc *)(hdr + 1); end = buf + len / sizeof(*buf); endnz = buf; for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); num_blocks = scsi_4btoul(range->length); if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (num_blocks != 0) endnz = range + 1; } /* * Block backend can not handle zero last range. * Filter it out and return if there is nothing left. */ len = (uint8_t *)endnz - (uint8_t *)buf; if (len == 0) { ctl_set_success(ctsio); goto done; } mtx_lock(&lun->lun_lock); ptrlen = (struct ctl_ptr_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; ptrlen->ptr = (void *)buf; ptrlen->len = len; ptrlen->flags = byte2; ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); retval = lun->backend->config_write((union ctl_io *)ctsio); return (retval); done: if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { free(ctsio->kern_data_ptr, M_CTL); ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; } ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Note that this function currently doesn't actually do anything inside * CTL to enforce things if the DQue bit is turned on. * * Also note that this function can't be used in the default case, because * the DQue bit isn't set in the changeable mask for the control mode page * anyway. This is just here as an example for how to implement a page * handler, and a placeholder in case we want to allow the user to turn * tagged queueing on and off. * * The D_SENSE bit handling is functional, however, and will turn * descriptor sense on and off for a given LUN. */ int ctl_control_page_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct scsi_control_page *current_cp, *saved_cp, *user_cp; struct ctl_lun *lun; int set_ua; uint32_t initidx; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; user_cp = (struct scsi_control_page *)page_ptr; current_cp = (struct scsi_control_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); saved_cp = (struct scsi_control_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_SAVED)); mtx_lock(&lun->lun_lock); if (((current_cp->rlec & SCP_DSENSE) == 0) && ((user_cp->rlec & SCP_DSENSE) != 0)) { /* * Descriptor sense is currently turned off and the user * wants to turn it on. */ current_cp->rlec |= SCP_DSENSE; saved_cp->rlec |= SCP_DSENSE; lun->flags |= CTL_LUN_SENSE_DESC; set_ua = 1; } else if (((current_cp->rlec & SCP_DSENSE) != 0) && ((user_cp->rlec & SCP_DSENSE) == 0)) { /* * Descriptor sense is currently turned on, and the user * wants to turn it off. */ current_cp->rlec &= ~SCP_DSENSE; saved_cp->rlec &= ~SCP_DSENSE; lun->flags &= ~CTL_LUN_SENSE_DESC; set_ua = 1; } if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; set_ua = 1; } if ((current_cp->eca_and_aen & SCP_SWP) != (user_cp->eca_and_aen & SCP_SWP)) { current_cp->eca_and_aen &= ~SCP_SWP; current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; saved_cp->eca_and_aen &= ~SCP_SWP; saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); return (0); } int ctl_caching_sp_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { struct scsi_caching_page *current_cp, *saved_cp, *user_cp; struct ctl_lun *lun; int set_ua; uint32_t initidx; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); set_ua = 0; user_cp = (struct scsi_caching_page *)page_ptr; current_cp = (struct scsi_caching_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT)); saved_cp = (struct scsi_caching_page *) (page_index->page_data + (page_index->page_len * CTL_PAGE_SAVED)); mtx_lock(&lun->lun_lock); if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); set_ua = 1; } if (set_ua != 0) ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); mtx_unlock(&lun->lun_lock); return (0); } int ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr) { uint8_t *c; int i; c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; ctl_time_io_secs = (c[0] << 8) | (c[1] << 0) | 0; CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); printf("page data:"); for (i=0; i<8; i++) printf(" %.2x",page_ptr[i]); printf("\n"); return (0); } int ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct copan_debugconf_subpage *page; page = (struct copan_debugconf_subpage *)page_index->page_data + (page_index->page_len * pc); switch (pc) { case SMS_PAGE_CTRL_CHANGEABLE >> 6: case SMS_PAGE_CTRL_DEFAULT >> 6: case SMS_PAGE_CTRL_SAVED >> 6: /* * We don't update the changable or default bits for this page. */ break; case SMS_PAGE_CTRL_CURRENT >> 6: page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; break; default: #ifdef NEEDTOPORT EPRINT(0, "Invalid PC %d!!", pc); #endif /* NEEDTOPORT */ break; } return (0); } static int ctl_do_mode_select(union ctl_io *io) { struct scsi_mode_page_header *page_header; struct ctl_page_index *page_index; struct ctl_scsiio *ctsio; int control_dev, page_len; int page_len_offset, page_len_size; union ctl_modepage_info *modepage_info; struct ctl_lun *lun; int *len_left, *len_used; int retval, i; ctsio = &io->scsiio; page_index = NULL; page_len = 0; retval = CTL_RETVAL_COMPLETE; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun->be_lun->lun_type != T_DIRECT) control_dev = 1; else control_dev = 0; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; len_left = &modepage_info->header.len_left; len_used = &modepage_info->header.len_used; do_next_page: page_header = (struct scsi_mode_page_header *) (ctsio->kern_data_ptr + *len_used); if (*len_left == 0) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (*len_left < sizeof(struct scsi_mode_page_header)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if ((page_header->page_code & SMPH_SPF) && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * XXX KDM should we do something with the block descriptor? */ for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((control_dev != 0) && (lun->mode_pages.index[i].page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != (page_header->page_code & SMPH_PC_MASK)) continue; /* * If neither page has a subpage code, then we've got a * match. */ if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) && ((page_header->page_code & SMPH_SPF) == 0)) { page_index = &lun->mode_pages.index[i]; page_len = page_header->page_length; break; } /* * If both pages have subpages, then the subpage numbers * have to match. */ if ((lun->mode_pages.index[i].page_code & SMPH_SPF) && (page_header->page_code & SMPH_SPF)) { struct scsi_mode_page_header_sp *sph; sph = (struct scsi_mode_page_header_sp *)page_header; if (lun->mode_pages.index[i].subpage == sph->subpage) { page_index = &lun->mode_pages.index[i]; page_len = scsi_2btoul(sph->page_length); break; } } } /* * If we couldn't find the page, or if we don't have a mode select * handler for it, send back an error to the user. */ if ((page_index == NULL) || (page_index->select_handler == NULL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (page_index->page_code & SMPH_SPF) { page_len_offset = 2; page_len_size = 2; } else { page_len_size = 1; page_len_offset = 1; } /* * If the length the initiator gives us isn't the one we specify in * the mode page header, or if they didn't specify enough data in * the CDB to avoid truncating this page, kick out the request. */ if ((page_len != (page_index->page_len - page_len_offset - page_len_size)) || (*len_left < page_index->page_len)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + page_len_offset, /*bit_valid*/ 0, /*bit*/ 0); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Run through the mode page, checking to make sure that the bits * the user changed are actually legal for him to change. */ for (i = 0; i < page_index->page_len; i++) { uint8_t *user_byte, *change_mask, *current_byte; int bad_bit; int j; user_byte = (uint8_t *)page_header + i; change_mask = page_index->page_data + (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; current_byte = page_index->page_data + (page_index->page_len * CTL_PAGE_CURRENT) + i; /* * Check to see whether the user set any bits in this byte * that he is not allowed to set. */ if ((*user_byte & ~(*change_mask)) == (*current_byte & ~(*change_mask))) continue; /* * Go through bit by bit to determine which one is illegal. */ bad_bit = 0; for (j = 7; j >= 0; j--) { if ((((1 << i) & ~(*change_mask)) & *user_byte) != (((1 << i) & ~(*change_mask)) & *current_byte)) { bad_bit = i; break; } } ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ *len_used + i, /*bit_valid*/ 1, /*bit*/ bad_bit); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Decrement these before we call the page handler, since we may * end up getting called back one way or another before the handler * returns to this context. */ *len_left -= page_index->page_len; *len_used += page_index->page_len; retval = page_index->select_handler(ctsio, page_index, (uint8_t *)page_header); /* * If the page handler returns CTL_RETVAL_QUEUED, then we need to * wait until this queued command completes to finish processing * the mode page. If it returns anything other than * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have * already set the sense information, freed the data pointer, and * completed the io for us. */ if (retval != CTL_RETVAL_COMPLETE) goto bailout_no_done; /* * If the initiator sent us more than one page, parse the next one. */ if (*len_left > 0) goto do_next_page; ctl_set_success(ctsio); free(ctsio->kern_data_ptr, M_CTL); ctl_done((union ctl_io *)ctsio); bailout_no_done: return (CTL_RETVAL_COMPLETE); } int ctl_mode_select(struct ctl_scsiio *ctsio) { int param_len, pf, sp; int header_size, bd_len; int len_left, len_used; struct ctl_page_index *page_index; struct ctl_lun *lun; int control_dev, page_len; union ctl_modepage_info *modepage_info; int retval; pf = 0; sp = 0; page_len = 0; len_used = 0; len_left = 0; retval = 0; bd_len = 0; page_index = NULL; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun->be_lun->lun_type != T_DIRECT) control_dev = 1; else control_dev = 0; switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_select_6 *cdb; cdb = (struct scsi_mode_select_6 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = cdb->length; header_size = sizeof(struct scsi_mode_header_6); break; } case MODE_SELECT_10: { struct scsi_mode_select_10 *cdb; cdb = (struct scsi_mode_select_10 *)ctsio->cdb; pf = (cdb->byte2 & SMS_PF) ? 1 : 0; sp = (cdb->byte2 & SMS_SP) ? 1 : 0; param_len = scsi_2btoul(cdb->length); header_size = sizeof(struct scsi_mode_header_10); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * From SPC-3: * "A parameter list length of zero indicates that the Data-Out Buffer * shall be empty. This condition shall not be considered as an error." */ if (param_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Since we'll hit this the first time through, prior to * allocation, we don't need to free a data buffer here. */ if (param_len < header_size) { ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Allocate the data buffer and grab the user's data. In theory, * we shouldn't have to sanity check the parameter list length here * because the maximum size is 64K. We should be able to malloc * that much without too many problems. */ if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } switch (ctsio->cdb[0]) { case MODE_SELECT_6: { struct scsi_mode_header_6 *mh6; mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; bd_len = mh6->blk_desc_len; break; } case MODE_SELECT_10: { struct scsi_mode_header_10 *mh10; mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; bd_len = scsi_2btoul(mh10->blk_desc_len); break; } default: panic("Invalid CDB type %#x", ctsio->cdb[0]); break; } if (param_len < (header_size + bd_len)) { free(ctsio->kern_data_ptr, M_CTL); ctl_set_param_len_error(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_config_write_done(), it'll get passed back to * ctl_do_mode_select() for further processing, or completion if * we're all done. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_do_mode_select; modepage_info = (union ctl_modepage_info *) ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; memset(modepage_info, 0, sizeof(*modepage_info)); len_left = param_len - header_size - bd_len; len_used = header_size + bd_len; modepage_info->header.len_left = len_left; modepage_info->header.len_used = len_used; return (ctl_do_mode_select((union ctl_io *)ctsio)); } int ctl_mode_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; int pc, page_code, dbd, llba, subpage; int alloc_len, page_len, header_len, total_len; struct scsi_mode_block_descr *block_desc; struct ctl_page_index *page_index; int control_dev; dbd = 0; llba = 0; block_desc = NULL; page_index = NULL; CTL_DEBUG_PRINT(("ctl_mode_sense\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun->be_lun->lun_type != T_DIRECT) control_dev = 1; else control_dev = 0; switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_sense_6 *cdb; cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_6); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = cdb->length; break; } case MODE_SENSE_10: { struct scsi_mode_sense_10 *cdb; cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; header_len = sizeof(struct scsi_mode_hdr_10); if (cdb->byte2 & SMS_DBD) dbd = 1; else header_len += sizeof(struct scsi_mode_block_descr); if (cdb->byte2 & SMS10_LLBAA) llba = 1; pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SMS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); break; } default: ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * We have to make a first pass through to calculate the size of * the pages that match the user's query. Then we allocate enough * memory to hold it, and actually copy the data into the buffer. */ switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i; page_len = 0; /* * At the moment, values other than 0 and 0xff here are * reserved according to SPC-3. */ if ((subpage != SMS_SUBPAGE_PAGE_0) && (subpage != SMS_SUBPAGE_ALL)) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 3, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { if ((control_dev != 0) && (lun->mode_pages.index[i].page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; /* * We don't use this subpage if the user didn't * request all subpages. */ if ((lun->mode_pages.index[i].subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; #if 0 printf("found page %#x len %d\n", lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].page_len); #endif page_len += lun->mode_pages.index[i].page_len; } break; } default: { int i; page_len = 0; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { /* Look for the right page code */ if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((lun->mode_pages.index[i].subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if ((control_dev != 0) && (lun->mode_pages.index[i].page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; #if 0 printf("found page %#x len %d\n", lun->mode_pages.index[i].page_code & SMPH_PC_MASK, lun->mode_pages.index[i].page_len); #endif page_len += lun->mode_pages.index[i].page_len; } if (page_len == 0) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } } total_len = header_len + page_len; #if 0 printf("header_len = %d, page_len = %d, total_len = %d\n", header_len, page_len, total_len); #endif ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } switch (ctsio->cdb[0]) { case MODE_SENSE_6: { struct scsi_mode_hdr_6 *header; header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; header->datalen = MIN(total_len - 1, 254); if (control_dev == 0) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->flags & CTL_LUN_READONLY) || (lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) header->block_descr_len = 0; else header->block_descr_len = sizeof(struct scsi_mode_block_descr); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } case MODE_SENSE_10: { struct scsi_mode_hdr_10 *header; int datalen; header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; datalen = MIN(total_len - 2, 65533); scsi_ulto2b(datalen, header->datalen); if (control_dev == 0) { header->dev_specific = 0x10; /* DPOFUA */ if ((lun->flags & CTL_LUN_READONLY) || (lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) header->dev_specific |= 0x80; /* WP */ } if (dbd) scsi_ulto2b(0, header->block_descr_len); else scsi_ulto2b(sizeof(struct scsi_mode_block_descr), header->block_descr_len); block_desc = (struct scsi_mode_block_descr *)&header[1]; break; } default: panic("invalid CDB type %#x", ctsio->cdb[0]); break; /* NOTREACHED */ } /* * If we've got a disk, use its blocksize in the block * descriptor. Otherwise, just set it to 0. */ if (dbd == 0) { if (control_dev == 0) scsi_ulto3b(lun->be_lun->blocksize, block_desc->block_len); else scsi_ulto3b(0, block_desc->block_len); } switch (page_code) { case SMS_ALL_PAGES_PAGE: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; if ((control_dev != 0) && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; /* * We don't use this subpage if the user didn't * request all subpages. We already checked (above) * to make sure the user only specified a subpage * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. */ if ((page_index->subpage != 0) && (subpage == SMS_SUBPAGE_PAGE_0)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } default: { int i, data_used; data_used = header_len; for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { struct ctl_page_index *page_index; page_index = &lun->mode_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SMPH_PC_MASK) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if ((page_index->subpage != subpage) && (subpage != SMS_SUBPAGE_ALL)) continue; /* Make sure the page is supported for this dev type */ if ((control_dev != 0) && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) continue; /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index,pc); memcpy(ctsio->kern_data_ptr + data_used, page_index->page_data + (page_index->page_len * pc), page_index->page_len); data_used += page_index->page_len; } break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc) { struct ctl_lun *lun; struct scsi_log_param_header *phdr; uint8_t *data; uint64_t val; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data = page_index->page_data; if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0001, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x0002, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x01; /* per-LUN */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f1, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } if (lun->backend->lun_attr != NULL && (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) != UINT64_MAX) { phdr = (struct scsi_log_param_header *)data; scsi_ulto2b(0x00f2, phdr->param_code); phdr->param_control = SLP_LBIN | SLP_LP; phdr->param_len = 8; data = (uint8_t *)(phdr + 1); scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); data[4] = 0x02; /* per-pool */ data += phdr->param_len; } page_index->page_len = data - page_index->page_data; return (0); } int ctl_log_sense(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; int i, pc, page_code, subpage; int alloc_len, total_len; struct ctl_page_index *page_index; struct scsi_log_sense *cdb; struct scsi_log_header *header; CTL_DEBUG_PRINT(("ctl_log_sense\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_log_sense *)ctsio->cdb; pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; page_code = cdb->page & SLS_PAGE_CODE; subpage = cdb->subpage; alloc_len = scsi_2btoul(cdb->length); page_index = NULL; for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { page_index = &lun->log_pages.index[i]; /* Look for the right page code */ if ((page_index->page_code & SL_PAGE_CODE) != page_code) continue; /* Look for the right subpage or the subpage wildcard*/ if (page_index->subpage != subpage) continue; break; } if (i >= CTL_NUM_LOG_PAGES) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_log_header) + page_index->page_len; ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } header = (struct scsi_log_header *)ctsio->kern_data_ptr; header->page = page_index->page_code; if (page_index->subpage) { header->page |= SL_SPF; header->subpage = page_index->subpage; } scsi_ulto2b(page_index->page_len, header->datalen); /* * Call the handler, if it exists, to update the * page to the latest values. */ if (page_index->sense_handler != NULL) page_index->sense_handler(ctsio, page_index, pc); memcpy(header + 1, page_index->page_data, page_index->page_len); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity(struct ctl_scsiio *ctsio) { struct scsi_read_capacity *cdb; struct scsi_read_capacity_data *data; struct ctl_lun *lun; uint32_t lba; CTL_DEBUG_PRINT(("ctl_read_capacity\n")); cdb = (struct scsi_read_capacity *)ctsio->cdb; lba = scsi_4btoul(cdb->addr); if (((cdb->pmi & SRC_PMI) == 0) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; ctsio->residual = 0; ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If the maximum LBA is greater than 0xfffffffe, the user must * issue a SERVICE ACTION IN (16) command, with the read capacity * serivce action set. */ if (lun->be_lun->maxlba > 0xfffffffe) scsi_ulto4b(0xffffffff, data->addr); else scsi_ulto4b(lun->be_lun->maxlba, data->addr); /* * XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_capacity_16(struct ctl_scsiio *ctsio) { struct scsi_read_capacity_16 *cdb; struct scsi_read_capacity_data_long *data; struct ctl_lun *lun; uint64_t lba; uint32_t alloc_len; CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; alloc_len = scsi_4btoul(cdb->alloc_len); lba = scsi_8btou64(cdb->addr); if ((cdb->reladr & SRC16_PMI) && (lba != 0)) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; if (sizeof(*data) < alloc_len) { ctsio->residual = alloc_len - sizeof(*data); ctsio->kern_data_len = sizeof(*data); ctsio->kern_total_len = sizeof(*data); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; scsi_u64to8b(lun->be_lun->maxlba, data->addr); /* XXX KDM this may not be 512 bytes... */ scsi_ulto4b(lun->be_lun->blocksize, data->length); data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_get_lba_status(struct ctl_scsiio *ctsio) { struct scsi_get_lba_status *cdb; struct scsi_get_lba_status_data *data; struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t alloc_len, total_len; int retval; CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_get_lba_status *)ctsio->cdb; lba = scsi_8btou64(cdb->addr); alloc_len = scsi_4btoul(cdb->alloc_len); if (lba > lun->be_lun->maxlba) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(*data) + sizeof(data->descr[0]); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* Fill dummy data in case backend can't tell anything. */ scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); scsi_u64to8b(lba, data->descr[0].addr); scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), data->descr[0].length); data->descr[0].status = 0; /* Mapped or unknown. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = total_len; lbalen->flags = 0; retval = lun->backend->config_read((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_read_defect(struct ctl_scsiio *ctsio) { struct scsi_read_defect_data_10 *ccb10; struct scsi_read_defect_data_12 *ccb12; struct scsi_read_defect_data_hdr_10 *data10; struct scsi_read_defect_data_hdr_12 *data12; uint32_t alloc_len, data_len; uint8_t format; CTL_DEBUG_PRINT(("ctl_read_defect\n")); if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; format = ccb10->format; alloc_len = scsi_2btoul(ccb10->alloc_length); data_len = sizeof(*data10); } else { ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; format = ccb12->format; alloc_len = scsi_4btoul(ccb12->alloc_length); data_len = sizeof(*data12); } if (alloc_len == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { data10 = (struct scsi_read_defect_data_hdr_10 *) ctsio->kern_data_ptr; data10->format = format; scsi_ulto2b(0, data10->length); } else { data12 = (struct scsi_read_defect_data_hdr_12 *) ctsio->kern_data_ptr; data12->format = format; scsi_ulto2b(0, data12->generation); scsi_ulto4b(0, data12->length); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) { struct scsi_maintenance_in *cdb; int retval; int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; int num_target_port_groups, num_target_ports; struct ctl_lun *lun; struct ctl_softc *softc; struct ctl_port *port; struct scsi_target_group_data *rtg_ptr; struct scsi_target_group_data_extended *rtg_ext_ptr; struct scsi_target_port_group_descriptor *tpg_desc; CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); cdb = (struct scsi_maintenance_in *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retval = CTL_RETVAL_COMPLETE; switch (cdb->byte2 & STG_PDF_MASK) { case STG_PDF_LENGTH: ext = 0; break; case STG_PDF_EXTENDED: ext = 1; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 5); ctl_done((union ctl_io *)ctsio); return(retval); } if (softc->is_single) num_target_port_groups = 1; else num_target_port_groups = NUM_TARGET_PORT_GROUPS; num_target_ports = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; - if (ctl_map_lun_back(softc, port->targ_port, lun->lun) >= - CTL_MAX_LUNS) + if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; num_target_ports++; } mtx_unlock(&softc->ctl_lock); if (ext) total_len = sizeof(struct scsi_target_group_data_extended); else total_len = sizeof(struct scsi_target_group_data); total_len += sizeof(struct scsi_target_port_group_descriptor) * num_target_port_groups + sizeof(struct scsi_target_port_descriptor) * num_target_ports * num_target_port_groups; alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (ext) { rtg_ext_ptr = (struct scsi_target_group_data_extended *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); rtg_ext_ptr->format_type = 0x10; rtg_ext_ptr->implicit_transition_time = 0; tpg_desc = &rtg_ext_ptr->groups[0]; } else { rtg_ptr = (struct scsi_target_group_data *) ctsio->kern_data_ptr; scsi_ulto4b(total_len - 4, rtg_ptr->length); tpg_desc = &rtg_ptr->groups[0]; } mtx_lock(&softc->ctl_lock); pg = softc->port_offset / CTL_MAX_PORTS; if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; os = TPG_ASYMMETRIC_ACCESS_STANDBY; } else if (lun->flags & CTL_LUN_PRIMARY_SC) { gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; } else { gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } } else { gs = TPG_ASYMMETRIC_ACCESS_STANDBY; os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; } for (g = 0; g < num_target_port_groups; g++) { tpg_desc->pref_state = (g == pg) ? gs : os; tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; scsi_ulto2b(g + 1, tpg_desc->target_port_group); tpg_desc->status = TPG_IMPLICIT; pc = 0; STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; - if (ctl_map_lun_back(softc, port->targ_port, lun->lun) - >= CTL_MAX_LUNS) + if (ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; scsi_ulto2b(p, tpg_desc->descriptors[pc]. relative_target_port_identifier); pc++; } tpg_desc->target_port_count = pc; tpg_desc = (struct scsi_target_port_group_descriptor *) &tpg_desc->descriptors[pc]; } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_report_supported_opcodes *cdb; const struct ctl_cmd_entry *entry, *sentry; struct scsi_report_supported_opcodes_all *all; struct scsi_report_supported_opcodes_descr *descr; struct scsi_report_supported_opcodes_one *one; int retval; int alloc_len, total_len; int opcode, service_action, i, j, num; CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; retval = CTL_RETVAL_COMPLETE; opcode = cdb->requested_opcode; service_action = scsi_2btoul(cdb->requested_service_action); switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) num++; } } else { if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) num++; } } total_len = sizeof(struct scsi_report_supported_opcodes_all) + num * sizeof(struct scsi_report_supported_opcodes_descr); break; case RSO_OPTIONS_OC: if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; case RSO_OPTIONS_OC_SA: if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || service_action >= 32) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; break; default: ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 2); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; switch (cdb->options & RSO_OPTIONS_MASK) { case RSO_OPTIONS_ALL: all = (struct scsi_report_supported_opcodes_all *) ctsio->kern_data_ptr; num = 0; for (i = 0; i < 256; i++) { entry = &ctl_cmd_table[i]; if (entry->flags & CTL_CMD_FLAG_SA5) { for (j = 0; j < 32; j++) { sentry = &((const struct ctl_cmd_entry *) entry->execute)[j]; if (!ctl_cmd_applicable( lun->be_lun->lun_type, sentry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(j, descr->service_action); descr->flags = RSO_SERVACTV; scsi_ulto2b(sentry->length, descr->cdb_length); } } else { if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) continue; descr = &all->descr[num++]; descr->opcode = i; scsi_ulto2b(0, descr->service_action); descr->flags = 0; scsi_ulto2b(entry->length, descr->cdb_length); } } scsi_ulto4b( num * sizeof(struct scsi_report_supported_opcodes_descr), all->length); break; case RSO_OPTIONS_OC: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; goto fill_one; case RSO_OPTIONS_OC_SA: one = (struct scsi_report_supported_opcodes_one *) ctsio->kern_data_ptr; entry = &ctl_cmd_table[opcode]; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; fill_one: if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { one->support = 3; scsi_ulto2b(entry->length, one->cdb_length); one->cdb_usage[0] = opcode; memcpy(&one->cdb_usage[1], entry->usage, entry->length - 1); } else one->support = 1; break; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return(retval); } int ctl_report_supported_tmf(struct ctl_scsiio *ctsio) { struct scsi_report_supported_tmf *cdb; struct scsi_report_supported_tmf_data *data; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_supported_tmf_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; data->byte2 |= RST_ITNRS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_report_timestamp(struct ctl_scsiio *ctsio) { struct scsi_report_timestamp *cdb; struct scsi_report_timestamp_data *data; struct timeval tv; int64_t timestamp; int retval; int alloc_len, total_len; CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); cdb = (struct scsi_report_timestamp *)ctsio->cdb; retval = CTL_RETVAL_COMPLETE; total_len = sizeof(struct scsi_report_timestamp_data); alloc_len = scsi_4btoul(cdb->length); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); ctsio->kern_sg_entries = 0; if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*data) - 2, data->length); data->origin = RTS_ORIG_OUTSIDE; getmicrotime(&tv); timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; scsi_ulto4b(timestamp >> 16, data->timestamp); scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) { struct scsi_per_res_in *cdb; int alloc_len, total_len = 0; /* struct scsi_per_res_in_rsrv in_data; */ struct ctl_lun *lun; struct ctl_softc *softc; uint64_t key; CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); cdb = (struct scsi_per_res_in *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; retry: mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: /* read keys */ total_len = sizeof(struct scsi_per_res_in_keys) + lun->pr_key_count * sizeof(struct scsi_per_res_key); break; case SPRI_RR: /* read reservation */ if (lun->flags & CTL_LUN_PR_RESERVED) total_len = sizeof(struct scsi_per_res_in_rsrv); else total_len = sizeof(struct scsi_per_res_in_header); break; case SPRI_RC: /* report capabilities */ total_len = sizeof(struct scsi_per_res_cap); break; case SPRI_RS: /* read full status */ total_len = sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count; break; default: panic("Invalid PR type %x", cdb->action); } mtx_unlock(&lun->lun_lock); ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); if (total_len < alloc_len) { ctsio->residual = alloc_len - total_len; ctsio->kern_data_len = total_len; ctsio->kern_total_len = total_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; mtx_lock(&lun->lun_lock); switch (cdb->action) { case SPRI_RK: { // read keys struct scsi_per_res_in_keys *res_keys; int i, key_count; res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len != (sizeof(struct scsi_per_res_in_keys) + (lun->pr_key_count * sizeof(struct scsi_per_res_key)))){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); scsi_ulto4b(sizeof(struct scsi_per_res_key) * lun->pr_key_count, res_keys->header.length); for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; /* * We used lun->pr_key_count to calculate the * size to allocate. If it turns out the number of * initiators with the registered flag set is * larger than that (i.e. they haven't been kept in * sync), we've got a problem. */ if (key_count >= lun->pr_key_count) { #ifdef NEEDTOPORT csevent_log(CSC_CTL | CSC_SHELF_SW | CTL_PR_ERROR, csevent_LogType_Fault, csevent_AlertLevel_Yellow, csevent_FRU_ShelfController, csevent_FRU_Firmware, csevent_FRU_Unknown, "registered keys %d >= key " "count %d", key_count, lun->pr_key_count); #endif key_count++; continue; } scsi_u64to8b(key, res_keys->keys[key_count].key); key_count++; } break; } case SPRI_RR: { // read reservation struct scsi_per_res_in_rsrv *res; int tmp_len, header_only; res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; scsi_ulto4b(lun->PRGeneration, res->header.generation); if (lun->flags & CTL_LUN_PR_RESERVED) { tmp_len = sizeof(struct scsi_per_res_in_rsrv); scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), res->header.length); header_only = 0; } else { tmp_len = sizeof(struct scsi_per_res_in_header); scsi_ulto4b(0, res->header.length); header_only = 1; } /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (tmp_len != total_len) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation status changed, retrying\n", __func__); goto retry; } /* * No reservation held, so we're done. */ if (header_only != 0) break; /* * If the registration is an All Registrants type, the key * is 0, since it doesn't really matter. */ if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), res->data.reservation); } res->data.scopetype = lun->res_type; break; } case SPRI_RC: //report capabilities { struct scsi_per_res_cap *res_cap; uint16_t type_mask; res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; scsi_ulto2b(sizeof(*res_cap), res_cap->length); res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; type_mask = SPRI_TM_WR_EX_AR | SPRI_TM_EX_AC_RO | SPRI_TM_WR_EX_RO | SPRI_TM_EX_AC | SPRI_TM_WR_EX | SPRI_TM_EX_AC_AR; scsi_ulto2b(type_mask, res_cap->type_mask); break; } case SPRI_RS: { // read full status struct scsi_per_res_in_full *res_status; struct scsi_per_res_in_full_desc *res_desc; struct ctl_port *port; int i, len; res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; /* * We had to drop the lock to allocate our buffer, which * leaves time for someone to come in with another * persistent reservation. (That is unlikely, though, * since this should be the only persistent reservation * command active right now.) */ if (total_len < (sizeof(struct scsi_per_res_in_header) + (sizeof(struct scsi_per_res_in_full_desc) + 256) * lun->pr_key_count)){ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); printf("%s: reservation length changed, retrying\n", __func__); goto retry; } scsi_ulto4b(lun->PRGeneration, res_status->header.generation); res_desc = &res_status->desc[0]; for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { if ((key = ctl_get_prkey(lun, i)) == 0) continue; scsi_u64to8b(key, res_desc->res_key.key); if ((lun->flags & CTL_LUN_PR_RESERVED) && (lun->pr_res_idx == i || lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { res_desc->flags = SPRI_FULL_R_HOLDER; res_desc->scopetype = lun->res_type; } scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, res_desc->rel_trgt_port_id); len = 0; port = softc->ctl_ports[ ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; if (port != NULL) len = ctl_create_iid(port, i % CTL_MAX_INIT_PER_PORT, res_desc->transport_id); scsi_ulto4b(len, res_desc->additional_length); res_desc = (struct scsi_per_res_in_full_desc *) &res_desc->transport_id[len]; } scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], res_status->header.length); break; } default: /* * This is a bug, because we just checked for this above, * and should have returned an error. */ panic("Invalid PR type %x", cdb->action); break; /* NOTREACHED */ } mtx_unlock(&lun->lun_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static void ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) { int off = lun->ctl_softc->persis_offset; if (residx >= off && residx < off + CTL_MAX_INITIATORS) ctl_est_ua(lun, residx - off, ua); } /* * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if * it should return. */ static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, uint64_t sa_res_key, uint8_t type, uint32_t residx, struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, struct scsi_per_res_out_parms* param) { union ctl_ha_msg persis_io; int retval, i; int isc_retval; retval = 0; mtx_lock(&lun->lun_lock); if (sa_res_key == 0) { if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Unregister everybody else and build UA for * them */ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->res_type = type; if (lun->res_type != SPR_TYPE_WR_EX_AR && lun->res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned " "from ctl_ha_msg_send %d\n", isc_retval); } } else { /* not all registrants */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || !(lun->flags & CTL_LUN_PR_RESERVED)) { int found = 0; if (res_key == sa_res_key) { /* special case */ /* * The spec implies this is not good but doesn't * say what to do. There are two choices either * generate a res conflict or check condition * with illegal field in parameter data. Since * that is what is done when the sa_res_key is * zero I'll take that approach since this has * to do with the sa_res_key. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 8, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) != sa_res_key) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } } else { /* Reserved but not all registrants */ /* sa_res_key is res holder */ if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { /* validate scope and type */ if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (1); } if (type>8 || type==2 || type==4 || type==0) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (1); } /* * Do the following: * if sa_res_key != res_key remove all * registrants w/sa_res_key and generate UA * for these registrants(Registrations * Preempted) if it wasn't an exclusive * reservation generate UA(Reservations * Preempted) for all other registered nexuses * if the type has changed. Establish the new * reservation and holder. If res_key and * sa_res_key are the same do the above * except don't unregister the res holder. */ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (type != lun->res_type && (lun->res_type == SPR_TYPE_WR_EX_RO || lun->res_type ==SPR_TYPE_EX_AC_RO)){ ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->res_type = type; if (lun->res_type != SPR_TYPE_WR_EX_AR && lun->res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned " "from ctl_ha_msg_send %d\n", isc_retval); } } else { /* * sa_res_key is not the res holder just * remove registrants */ int found=0; for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (sa_res_key != ctl_get_prkey(lun, i)) continue; found = 1; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } if (!found) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (1); } persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_PREEMPT; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned " "from ctl_ha_msg_send %d\n", isc_retval); } } } lun->PRGeneration++; mtx_unlock(&lun->lun_lock); return (retval); } static void ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) { uint64_t sa_res_key; int i; sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS || lun->pr_res_idx == CTL_PR_NO_RESERVATION || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { if (sa_res_key == 0) { /* * Unregister everybody else and build UA for * them */ for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->pr_key_count = 1; lun->res_type = msg->pr.pr_info.res_type; if (lun->res_type != SPR_TYPE_WR_EX_AR && lun->res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; } else { for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (sa_res_key == ctl_get_prkey(lun, i)) continue; ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } } } else { for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (i == msg->pr.pr_info.residx || ctl_get_prkey(lun, i) == 0) continue; if (sa_res_key == ctl_get_prkey(lun, i)) { ctl_clr_prkey(lun, i); lun->pr_key_count--; ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } else if (msg->pr.pr_info.res_type != lun->res_type && (lun->res_type == SPR_TYPE_WR_EX_RO || lun->res_type == SPR_TYPE_EX_AC_RO)) { ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->res_type = msg->pr.pr_info.res_type; if (lun->res_type != SPR_TYPE_WR_EX_AR && lun->res_type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = msg->pr.pr_info.residx; else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; } lun->PRGeneration++; } int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) { int retval; int isc_retval; u_int32_t param_len; struct scsi_per_res_out *cdb; struct ctl_lun *lun; struct scsi_per_res_out_parms* param; struct ctl_softc *softc; uint32_t residx; uint64_t res_key, sa_res_key, key; uint8_t type; union ctl_ha_msg persis_io; int i; CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); retval = CTL_RETVAL_COMPLETE; cdb = (struct scsi_per_res_out *)ctsio->cdb; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; softc = lun->ctl_softc; /* * We only support whole-LUN scope. The scope & type are ignored for * register, register and ignore existing key and clear. * We sometimes ignore scope and type on preempts too!! * Verify reservation type here as well. */ type = cdb->scope_type & SPR_TYPE_MASK; if ((cdb->action == SPRO_RESERVE) || (cdb->action == SPRO_RELEASE)) { if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 4); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } if (type>8 || type==2 || type==4 || type==0) { ctl_set_invalid_field(/*ctsio*/ ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 1, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } param_len = scsi_4btoul(cdb->length); if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); ctsio->kern_data_len = param_len; ctsio->kern_total_len = param_len; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; residx = ctl_get_resindex(&ctsio->io_hdr.nexus); res_key = scsi_8btou64(param->res_key.key); sa_res_key = scsi_8btou64(param->serv_act_res_key); /* * Validate the reservation key here except for SPRO_REG_IGNO * This must be done for all other service actions */ if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { mtx_lock(&lun->lun_lock); if ((key = ctl_get_prkey(lun, residx)) != 0) { if (res_key != key) { /* * The current key passed in doesn't match * the one the initiator previously * registered. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { /* * We are not registered */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } else if (res_key != 0) { /* * We are not registered and trying to register but * the register key isn't zero. */ mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } switch (cdb->action & SPRO_ACTION_MASK) { case SPRO_REGISTER: case SPRO_REG_IGNO: { #if 0 printf("Registration received\n"); #endif /* * We don't support any of these options, as we report in * the read capabilities request (see * ctl_persistent_reserve_in(), above). */ if ((param->flags & SPR_SPEC_I_PT) || (param->flags & SPR_ALL_TG_PT) || (param->flags & SPR_APTPL)) { int bit_ptr; if (param->flags & SPR_APTPL) bit_ptr = 0; else if (param->flags & SPR_ALL_TG_PT) bit_ptr = 2; else /* SPR_SPEC_I_PT */ bit_ptr = 3; free(ctsio->kern_data_ptr, M_CTL); ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 0, /*field*/ 20, /*bit_valid*/ 1, /*bit*/ bit_ptr); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_lock(&lun->lun_lock); /* * The initiator wants to clear the * key/unregister. */ if (sa_res_key == 0) { if ((res_key == 0 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO && ctl_get_prkey(lun, residx) == 0)) { mtx_unlock(&lun->lun_lock); goto done; } ctl_clr_prkey(lun, residx); lun->pr_key_count--; if (residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->res_type == SPR_TYPE_WR_EX_RO || lun->res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = 0; i < CTL_MAX_INITIATORS;i++){ if (ctl_get_prkey(lun, i + softc->persis_offset) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; persis_io.pr.pr_info.residx = residx; if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0 )) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } } else /* sa_res_key != 0 */ { /* * If we aren't registered currently then increment * the key count and set the registered flag. */ ctl_alloc_prkey(lun, residx); if (ctl_get_prkey(lun, residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, residx, sa_res_key); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_REG_KEY; persis_io.pr.pr_info.residx = residx; memcpy(persis_io.pr.pr_info.sa_res_key, param->serv_act_res_key, sizeof(param->serv_act_res_key)); if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } } lun->PRGeneration++; mtx_unlock(&lun->lun_lock); break; } case SPRO_RESERVE: #if 0 printf("Reserve executed type %d\n", type); #endif mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_PR_RESERVED) { /* * if this isn't the reservation holder and it's * not a "all registrants" type or if the type is * different then we have a conflict */ if ((lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) || lun->res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_reservation_conflict(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } mtx_unlock(&lun->lun_lock); } else /* create a reservation */ { /* * If it's not an "all registrants" type record * reservation holder */ if (type != SPR_TYPE_WR_EX_AR && type != SPR_TYPE_EX_AC_AR) lun->pr_res_idx = residx; /* Res holder */ else lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; lun->flags |= CTL_LUN_PR_RESERVED; lun->res_type = type; mtx_unlock(&lun->lun_lock); /* send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RESERVE; persis_io.pr.pr_info.residx = lun->pr_res_idx; persis_io.pr.pr_info.res_type = type; if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } } break; case SPRO_RELEASE: mtx_lock(&lun->lun_lock); if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { /* No reservation exists return good status */ mtx_unlock(&lun->lun_lock); goto done; } /* * Is this nexus a reservation holder? */ if (lun->pr_res_idx != residx && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { /* * not a res holder return good status but * do nothing */ mtx_unlock(&lun->lun_lock); goto done; } if (lun->res_type != type) { mtx_unlock(&lun->lun_lock); free(ctsio->kern_data_ptr, M_CTL); ctl_set_illegal_pr_release(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* okay to release */ lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->res_type = 0; /* * if this isn't an exclusive access * res generate UA for all other * registrants. */ if (type != SPR_TYPE_EX_AC && type != SPR_TYPE_WR_EX) { for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (i == residx || ctl_get_prkey(lun, i + softc->persis_offset) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } mtx_unlock(&lun->lun_lock); /* Send msg to other side */ persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_RELEASE; if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } break; case SPRO_CLEAR: /* send msg to other side */ mtx_lock(&lun->lun_lock); lun->flags &= ~CTL_LUN_PR_RESERVED; lun->res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; ctl_clr_prkey(lun, residx); for (i=0; i < 2*CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i) != 0) { ctl_clr_prkey(lun, i); ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->PRGeneration++; mtx_unlock(&lun->lun_lock); persis_io.hdr.nexus = ctsio->io_hdr.nexus; persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; persis_io.pr.pr_info.action = CTL_PR_CLEAR; if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Persis Out error returned from " "ctl_ha_msg_send %d\n", isc_retval); } break; case SPRO_PREEMPT: case SPRO_PRE_ABO: { int nretval; nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, residx, ctsio, cdb, param); if (nretval != 0) return (CTL_RETVAL_COMPLETE); break; } default: panic("Invalid PR type %x", cdb->action); } done: free(ctsio->kern_data_ptr, M_CTL); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } /* * This routine is for handling a message from the other SC pertaining to * persistent reserve out. All the error checking will have been done * so only perorming the action need be done here to keep the two * in sync. */ static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) { struct ctl_lun *lun; struct ctl_softc *softc; int i; uint32_t targ_lun; softc = control_softc; targ_lun = msg->hdr.nexus.targ_mapped_lun; lun = softc->ctl_luns[targ_lun]; mtx_lock(&lun->lun_lock); switch(msg->pr.pr_info.action) { case CTL_PR_REG_KEY: ctl_alloc_prkey(lun, msg->pr.pr_info.residx); if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) lun->pr_key_count++; ctl_set_prkey(lun, msg->pr.pr_info.residx, scsi_8btou64(msg->pr.pr_info.sa_res_key)); lun->PRGeneration++; break; case CTL_PR_UNREG_KEY: ctl_clr_prkey(lun, msg->pr.pr_info.residx); lun->pr_key_count--; /* XXX Need to see if the reservation has been released */ /* if so do we need to generate UA? */ if (msg->pr.pr_info.residx == lun->pr_res_idx) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; if ((lun->res_type == SPR_TYPE_WR_EX_RO || lun->res_type == SPR_TYPE_EX_AC_RO) && lun->pr_key_count) { /* * If the reservation is a registrants * only type we need to generate a UA * for other registered inits. The * sense code should be RESERVATIONS * RELEASED */ for (i = 0; i < CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i + softc->persis_offset) == 0) continue; ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } } lun->res_type = 0; } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { if (lun->pr_key_count==0) { lun->flags &= ~CTL_LUN_PR_RESERVED; lun->res_type = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; } } lun->PRGeneration++; break; case CTL_PR_RESERVE: lun->flags |= CTL_LUN_PR_RESERVED; lun->res_type = msg->pr.pr_info.res_type; lun->pr_res_idx = msg->pr.pr_info.residx; break; case CTL_PR_RELEASE: /* * if this isn't an exclusive access res generate UA for all * other registrants. */ if (lun->res_type != SPR_TYPE_EX_AC && lun->res_type != SPR_TYPE_WR_EX) { for (i = 0; i < CTL_MAX_INITIATORS; i++) if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); } lun->flags &= ~CTL_LUN_PR_RESERVED; lun->pr_res_idx = CTL_PR_NO_RESERVATION; lun->res_type = 0; break; case CTL_PR_PREEMPT: ctl_pro_preempt_other(lun, msg); break; case CTL_PR_CLEAR: lun->flags &= ~CTL_LUN_PR_RESERVED; lun->res_type = 0; lun->pr_key_count = 0; lun->pr_res_idx = CTL_PR_NO_RESERVATION; for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { if (ctl_get_prkey(lun, i) == 0) continue; ctl_clr_prkey(lun, i); ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); } lun->PRGeneration++; break; } mtx_unlock(&lun->lun_lock); } int ctl_read_write(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; int isread; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); flags = 0; retval = CTL_RETVAL_COMPLETE; isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; switch (ctsio->cdb[0]) { case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)ctsio->cdb; lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ lba &= 0x1fffff; num_blocks = cdb->length; /* * This is correct according to SBC-2. */ if (num_blocks == 0) num_blocks = 256; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case WRITE_ATOMIC_16: { struct scsi_rw_16 *cdb; if (lun->be_lun->atomicblock == 0) { ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SRW12_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW12_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); if (num_blocks > lun->be_lun->atomicblock) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)ctsio->cdb; flags |= CTL_LLF_FUA; if (cdb->byte2 & SWV_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. * Note that this cannot happen with WRITE(6) or READ(6), since 0 * translates to 256 blocks for those commands. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA and/or DPO if caches are disabled. */ if (isread) { if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_RCD) != 0) flags |= CTL_LLF_FUA | CTL_LLF_DPO; } else { if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static int ctl_cnw_cont(union ctl_io *io) { struct ctl_scsiio *ctsio; struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; int retval; ctsio = &io->scsiio; ctsio->io_hdr.status = CTL_STATUS_NONE; ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->flags &= ~CTL_LLF_COMPARE; lbalen->flags |= CTL_LLF_WRITE; CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_cnw(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int flags, retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); flags = 0; retval = CTL_RETVAL_COMPLETE; switch (ctsio->cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)ctsio->cdb; if (cdb->byte2 & SRW10_FUA) flags |= CTL_LLF_FUA; if (cdb->byte2 & SRW10_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = cdb->length; break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); break; /* NOTREACHED */ } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* Set FUA if write cache is disabled. */ if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & SCP_WCE) == 0) flags |= CTL_LLF_FUA; ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; ctsio->kern_rel_offset = 0; /* * Set the IO_CONT flag, so that if this I/O gets passed to * ctl_data_submit_done(), it'll get passed back to * ctl_ctl_cnw_cont() for further processing. */ ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; ctsio->io_cont = ctl_cnw_cont; lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; lbalen->flags = CTL_LLF_COMPARE | flags; CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_verify(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct ctl_lba_len_flags *lbalen; uint64_t lba; uint32_t num_blocks; int bytchk, flags; int retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); bytchk = 0; flags = CTL_LLF_FUA; retval = CTL_RETVAL_COMPLETE; switch (ctsio->cdb[0]) { case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_4btoul(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)ctsio->cdb; if (cdb->byte2 & SVFY_BYTCHK) bytchk = 1; if (cdb->byte2 & SVFY_DPO) flags |= CTL_LLF_DPO; lba = scsi_8btou64(cdb->addr); num_blocks = scsi_4btoul(cdb->length); break; } default: /* * We got a command we don't support. This shouldn't * happen, commands should be filtered out above us. */ ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * The first check is to make sure we're in bounds, the second * check is to catch wrap-around problems. If the lba + num blocks * is less than the lba, then we've wrapped around and the block * range is invalid anyway. */ if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) || ((lba + num_blocks) < lba)) { ctl_set_lba_out_of_range(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * According to SBC-3, a transfer length of 0 is not an error. */ if (num_blocks == 0) { ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } lbalen = (struct ctl_lba_len_flags *) &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; lbalen->lba = lba; lbalen->len = num_blocks; if (bytchk) { lbalen->flags = CTL_LLF_COMPARE | flags; ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; } else { lbalen->flags = CTL_LLF_VERIFY | flags; ctsio->kern_total_len = 0; } ctsio->kern_rel_offset = 0; CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } int ctl_report_luns(struct ctl_scsiio *ctsio) { struct ctl_softc *softc = control_softc; struct scsi_report_luns *cdb; struct scsi_report_luns_data *lun_data; struct ctl_lun *lun, *request_lun; + struct ctl_port *port; int num_luns, retval; uint32_t alloc_len, lun_datalen; int num_filled, well_known; uint32_t initidx, targ_lun_id, lun_id; retval = CTL_RETVAL_COMPLETE; well_known = 0; cdb = (struct scsi_report_luns *)ctsio->cdb; CTL_DEBUG_PRINT(("ctl_report_luns\n")); mtx_lock(&softc->ctl_lock); num_luns = softc->num_luns; mtx_unlock(&softc->ctl_lock); switch (cdb->select_report) { case RPL_REPORT_DEFAULT: case RPL_REPORT_ALL: break; case RPL_REPORT_WELLKNOWN: well_known = 1; num_luns = 0; break; default: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); break; /* NOTREACHED */ } alloc_len = scsi_4btoul(cdb->length); /* * The initiator has to allocate at least 16 bytes for this request, * so he can at least get the header and the first LUN. Otherwise * we reject the request (per SPC-3 rev 14, section 6.21). */ if (alloc_len < (sizeof(struct scsi_report_luns_data) + sizeof(struct scsi_report_luns_lundata))) { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 6, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (retval); } request_lun = (struct ctl_lun *) ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; + port = ctl_io_port(&ctsio->io_hdr); lun_datalen = sizeof(*lun_data) + (num_luns * sizeof(struct scsi_report_luns_lundata)); ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); mtx_lock(&softc->ctl_lock); for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { - lun_id = ctl_map_lun(softc, ctsio->io_hdr.nexus.targ_port, - targ_lun_id); + lun_id = ctl_lun_map_from_port(port, targ_lun_id); if (lun_id >= CTL_MAX_LUNS) continue; lun = softc->ctl_luns[lun_id]; if (lun == NULL) continue; if (targ_lun_id <= 0xff) { /* * Peripheral addressing method, bus number 0. */ lun_data->luns[num_filled].lundata[0] = RPL_LUNDATA_ATYP_PERIPH; lun_data->luns[num_filled].lundata[1] = targ_lun_id; num_filled++; } else if (targ_lun_id <= 0x3fff) { /* * Flat addressing method. */ lun_data->luns[num_filled].lundata[0] = RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); lun_data->luns[num_filled].lundata[1] = (targ_lun_id & 0xff); num_filled++; } else if (targ_lun_id <= 0xffffff) { /* * Extended flat addressing method. */ lun_data->luns[num_filled].lundata[0] = RPL_LUNDATA_ATYP_EXTLUN | 0x12; scsi_ulto3b(targ_lun_id, &lun_data->luns[num_filled].lundata[1]); num_filled++; } else { printf("ctl_report_luns: bogus LUN number %jd, " "skipping\n", (intmax_t)targ_lun_id); } /* * According to SPC-3, rev 14 section 6.21: * * "The execution of a REPORT LUNS command to any valid and * installed logical unit shall clear the REPORTED LUNS DATA * HAS CHANGED unit attention condition for all logical * units of that target with respect to the requesting * initiator. A valid and installed logical unit is one * having a PERIPHERAL QUALIFIER of 000b in the standard * INQUIRY data (see 6.4.2)." * * If request_lun is NULL, the LUN this report luns command * was issued to is either disabled or doesn't exist. In that * case, we shouldn't clear any pending lun change unit * attention. */ if (request_lun != NULL) { mtx_lock(&lun->lun_lock); ctl_clr_ua(lun, initidx, CTL_UA_RES_RELEASE); mtx_unlock(&lun->lun_lock); } } mtx_unlock(&softc->ctl_lock); /* * It's quite possible that we've returned fewer LUNs than we allocated * space for. Trim it. */ lun_datalen = sizeof(*lun_data) + (num_filled * sizeof(struct scsi_report_luns_lundata)); if (lun_datalen < alloc_len) { ctsio->residual = alloc_len - lun_datalen; ctsio->kern_data_len = lun_datalen; ctsio->kern_total_len = lun_datalen; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * We set this to the actual data length, regardless of how much * space we actually have to return results. If the user looks at * this value, he'll know whether or not he allocated enough space * and reissue the command if necessary. We don't support well * known logical units, so if the user asks for that, return none. */ scsi_ulto4b(lun_datalen - 8, lun_data->length); /* * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy * this request. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (retval); } int ctl_request_sense(struct ctl_scsiio *ctsio) { struct scsi_request_sense *cdb; struct scsi_sense_data *sense_ptr; struct ctl_softc *ctl_softc; struct ctl_lun *lun; uint32_t initidx; int have_error; scsi_sense_data_type sense_format; ctl_ua_type ua_type; cdb = (struct scsi_request_sense *)ctsio->cdb; ctl_softc = control_softc; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_request_sense\n")); /* * Determine which sense format the user wants. */ if (cdb->byte2 & SRS_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; /* * struct scsi_sense_data, which is currently set to 256 bytes, is * larger than the largest allowed value for the length field in the * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. */ ctsio->residual = 0; ctsio->kern_data_len = cdb->length; ctsio->kern_total_len = cdb->length; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * If we don't have a LUN, we don't have any pending sense. */ if (lun == NULL) goto no_sense; have_error = 0; initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); /* * Check for pending sense, and then for pending unit attentions. * Pending sense gets returned first, then pending unit attentions. */ mtx_lock(&lun->lun_lock); #ifdef CTL_WITH_CA if (ctl_is_set(lun->have_ca, initidx)) { scsi_sense_data_type stored_format; /* * Check to see which sense format was used for the stored * sense data. */ stored_format = scsi_sense_type(&lun->pending_sense[initidx]); /* * If the user requested a different sense format than the * one we stored, then we need to convert it to the other * format. If we're going from descriptor to fixed format * sense data, we may lose things in translation, depending * on what options were used. * * If the stored format is SSD_TYPE_NONE (i.e. invalid), * for some reason we'll just copy it out as-is. */ if ((stored_format == SSD_TYPE_FIXED) && (sense_format == SSD_TYPE_DESC)) ctl_sense_to_desc((struct scsi_sense_data_fixed *) &lun->pending_sense[initidx], (struct scsi_sense_data_desc *)sense_ptr); else if ((stored_format == SSD_TYPE_DESC) && (sense_format == SSD_TYPE_FIXED)) ctl_sense_to_fixed((struct scsi_sense_data_desc *) &lun->pending_sense[initidx], (struct scsi_sense_data_fixed *)sense_ptr); else memcpy(sense_ptr, &lun->pending_sense[initidx], MIN(sizeof(*sense_ptr), sizeof(lun->pending_sense[initidx]))); ctl_clear_mask(lun->have_ca, initidx); have_error = 1; } else #endif { ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); if (ua_type != CTL_UA_NONE) have_error = 1; if (ua_type == CTL_UA_LUN_CHANGE) { mtx_unlock(&lun->lun_lock); mtx_lock(&ctl_softc->ctl_lock); ctl_clear_ua(ctl_softc, initidx, ua_type); mtx_unlock(&ctl_softc->ctl_lock); mtx_lock(&lun->lun_lock); } } mtx_unlock(&lun->lun_lock); /* * We already have a pending error, return it. */ if (have_error != 0) { /* * We report the SCSI status as OK, since the status of the * request sense command itself is OK. * We report 0 for the sense length, because we aren't doing * autosense in this case. We're reporting sense as * parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } no_sense: /* * No sense information to report, so we report that everything is * okay. */ ctl_set_sense_data(sense_ptr, lun, sense_format, /*current_error*/ 1, /*sense_key*/ SSD_KEY_NO_SENSE, /*asc*/ 0x00, /*ascq*/ 0x00, SSD_ELEM_NONE); /* * We report 0 for the sense length, because we aren't doing * autosense in this case. We're reporting sense as parameter data. */ ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_tur(struct ctl_scsiio *ctsio) { CTL_DEBUG_PRINT(("ctl_tur\n")); ctl_set_success(ctsio); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } #ifdef notyet static int ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) { } #endif /* * SCSI VPD page 0x00, the Supported VPD Pages page. */ static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_supported_pages *pages; int sup_page_size; struct ctl_lun *lun; int p; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; sup_page_size = sizeof(struct scsi_vpd_supported_pages) * SCSI_EVPD_NUM_SUPPORTED_PAGES; ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sup_page_size < alloc_len) { ctsio->residual = alloc_len - sup_page_size; ctsio->kern_data_len = sup_page_size; ctsio->kern_total_len = sup_page_size; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) pages->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; p = 0; /* Supported VPD pages */ pages->page_list[p++] = SVPD_SUPPORTED_PAGES; /* Serial Number */ pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; /* Device Identification */ pages->page_list[p++] = SVPD_DEVICE_ID; /* Extended INQUIRY Data */ pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; /* Mode Page Policy */ pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; /* SCSI Ports */ pages->page_list[p++] = SVPD_SCSI_PORTS; /* Third-party Copy */ pages->page_list[p++] = SVPD_SCSI_TPC; if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { /* Block limits */ pages->page_list[p++] = SVPD_BLOCK_LIMITS; /* Block Device Characteristics */ pages->page_list[p++] = SVPD_BDC; /* Logical Block Provisioning */ pages->page_list[p++] = SVPD_LBP; } pages->length = p; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x80, the Unit Serial Number page. */ static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_unit_serial_number *sn_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = 4 + CTL_SN_LEN; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; sn_ptr->length = CTL_SN_LEN; /* * If we don't have a LUN, we just leave the serial number as * all spaces. */ if (lun != NULL) { strncpy((char *)sn_ptr->serial_num, (char *)lun->be_lun->serial_num, CTL_SN_LEN); } else memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x86, the Extended INQUIRY Data page. */ static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_extended_inquiry_data *eid_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_extended_inquiry_data); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; scsi_ulto2b(data_len - 4, eid_ptr->page_length); /* * We support head of queue, ordered and simple tags. */ eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; /* * Volatile cache supported. */ eid_ptr->flags3 = SVPD_EID_V_SUP; /* * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit * attention for a particular IT nexus on all LUNs once we report * it to that nexus once. This bit is required as of SPC-4. */ eid_ptr->flags4 = SVPD_EID_LUICLT; /* * XXX KDM in order to correctly answer this, we would need * information from the SIM to determine how much sense data it * can send. So this would really be a path inquiry field, most * likely. This can be set to a maximum of 252 according to SPC-4, * but the hardware may or may not be able to support that much. * 0 just means that the maximum sense data length is not reported. */ eid_ptr->max_sense_length = 0; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_mode_page_policy *mpp_ptr; struct ctl_lun *lun; int data_len; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_mode_page_policy) + sizeof(struct scsi_vpd_mode_page_policy_descr); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; scsi_ulto2b(data_len - 4, mpp_ptr->page_length); mpp_ptr->descr[0].page_code = 0x3f; mpp_ptr->descr[0].subpage_code = 0xff; mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * SCSI VPD page 0x83, the Device Identification page. */ static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_device_id *devid_ptr; struct scsi_vpd_id_descriptor *desc; struct ctl_softc *softc; struct ctl_lun *lun; struct ctl_port *port; int data_len; uint8_t proto; softc = control_softc; port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; data_len = sizeof(struct scsi_vpd_device_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_rel_trgt_port_id) + sizeof(struct scsi_vpd_id_descriptor) + sizeof(struct scsi_vpd_id_trgt_port_grp_id); if (lun && lun->lun_devid) data_len += lun->lun_devid->len; if (port->port_devid) data_len += port->port_devid->len; if (port->target_devid) data_len += port->target_devid->len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. */ if (lun != NULL) devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; devid_ptr->page_code = SVPD_DEVICE_ID; scsi_ulto2b(data_len - 4, devid_ptr->length); if (port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; /* * We're using a LUN association here. i.e., this device ID is a * per-LUN identifier. */ if (lun && lun->lun_devid) { memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + lun->lun_devid->len); } /* * This is for the WWPN which is a port association. */ if (port->port_devid) { memcpy(desc, port->port_devid->data, port->port_devid->len); desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + port->port_devid->len); } /* * This is for the Relative Target Port(type 4h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_RELTARG; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_rel_trgt_port_id)); /* * This is for the Target Port Group(type 5h) identifier */ desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_TPORTGRP; desc->length = 4; scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, &desc->identifier[2]); desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + sizeof(struct scsi_vpd_id_trgt_port_grp_id)); /* * This is for the Target identifier */ if (port->target_devid) { memcpy(desc, port->target_devid->data, port->target_devid->len); } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) { struct ctl_softc *softc = control_softc; struct scsi_vpd_scsi_ports *sp; struct scsi_vpd_port_designation *pd; struct scsi_vpd_port_designation_cont *pdc; struct ctl_lun *lun; struct ctl_port *port; int data_len, num_target_ports, iid_len, id_len, g, pg, p; int num_target_port_groups; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (softc->is_single) num_target_port_groups = 1; else num_target_port_groups = NUM_TARGET_PORT_GROUPS; num_target_ports = 0; iid_len = 0; id_len = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && - ctl_map_lun_back(softc, port->targ_port, lun->lun) >= - CTL_MAX_LUNS) + ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; num_target_ports++; if (port->init_devid) iid_len += port->init_devid->len; if (port->port_devid) id_len += port->port_devid->len; } mtx_unlock(&softc->ctl_lock); data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * num_target_ports * (sizeof(struct scsi_vpd_port_designation) + sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) sp->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; sp->page_code = SVPD_SCSI_PORTS; scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), sp->page_length); pd = &sp->design[0]; mtx_lock(&softc->ctl_lock); pg = softc->port_offset / CTL_MAX_PORTS; for (g = 0; g < num_target_port_groups; g++) { STAILQ_FOREACH(port, &softc->port_list, links) { if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) continue; if (lun != NULL && - ctl_map_lun_back(softc, port->targ_port, lun->lun) - >= CTL_MAX_LUNS) + ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) continue; p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; scsi_ulto2b(p, pd->relative_port_id); if (port->init_devid && g == pg) { iid_len = port->init_devid->len; memcpy(pd->initiator_transportid, port->init_devid->data, port->init_devid->len); } else iid_len = 0; scsi_ulto2b(iid_len, pd->initiator_transportid_length); pdc = (struct scsi_vpd_port_designation_cont *) (&pd->initiator_transportid[iid_len]); if (port->port_devid && g == pg) { id_len = port->port_devid->len; memcpy(pdc->target_port_descriptors, port->port_devid->data, port->port_devid->len); } else id_len = 0; scsi_ulto2b(id_len, pdc->target_port_descriptors_length); pd = (struct scsi_vpd_port_designation *) ((uint8_t *)pdc->target_port_descriptors + id_len); } } mtx_unlock(&softc->ctl_lock); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_block_limits *bl_ptr; struct ctl_lun *lun; int bs; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*bl_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*bl_ptr); ctsio->kern_data_len = sizeof(*bl_ptr); ctsio->kern_total_len = sizeof(*bl_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bl_ptr->page_code = SVPD_BLOCK_LIMITS; scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); bl_ptr->max_cmp_write_len = 0xff; scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); if (lun != NULL) { bs = lun->be_lun->blocksize; scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); if (lun->be_lun->ublockexp != 0) { scsi_ulto4b((1 << lun->be_lun->ublockexp), bl_ptr->opt_unmap_grain); scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, bl_ptr->unmap_grain_align); } } scsi_ulto4b(lun->be_lun->atomicblock, bl_ptr->max_atomic_transfer_length); scsi_ulto4b(0, bl_ptr->atomic_alignment); scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); } scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_block_device_characteristics *bdc_ptr; struct ctl_lun *lun; const char *value; u_int i; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*bdc_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*bdc_ptr); ctsio->kern_data_len = sizeof(*bdc_ptr); ctsio->kern_total_len = sizeof(*bdc_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; bdc_ptr->page_code = SVPD_BDC; scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) i = strtol(value, NULL, 0); else i = CTL_DEFAULT_ROTATION_RATE; scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); if (lun != NULL && (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) i = strtol(value, NULL, 0); else i = 0; bdc_ptr->wab_wac_ff = (i & 0x0f); bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) { struct scsi_vpd_logical_block_prov *lbp_ptr; struct ctl_lun *lun; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; if (sizeof(*lbp_ptr) < alloc_len) { ctsio->residual = alloc_len - sizeof(*lbp_ptr); ctsio->kern_data_len = sizeof(*lbp_ptr); ctsio->kern_total_len = sizeof(*lbp_ptr); } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; ctsio->kern_sg_entries = 0; /* * The control device is always connected. The disk device, on the * other hand, may not be online all the time. Need to change this * to figure out whether the disk device is actually online or not. */ if (lun != NULL) lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; lbp_ptr->page_code = SVPD_LBP; scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; lbp_ptr->prov_type = SVPD_LBP_THIN; } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } /* * INQUIRY with the EVPD bit set. */ static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; struct scsi_inquiry *cdb; int alloc_len, retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); switch (cdb->page_code) { case SVPD_SUPPORTED_PAGES: retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); break; case SVPD_UNIT_SERIAL_NUMBER: retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); break; case SVPD_DEVICE_ID: retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); break; case SVPD_EXTENDED_INQUIRY_DATA: retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); break; case SVPD_MODE_PAGE_POLICY: retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); break; case SVPD_SCSI_PORTS: retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); break; case SVPD_SCSI_TPC: retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); break; case SVPD_BLOCK_LIMITS: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); break; case SVPD_BDC: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); break; case SVPD_LBP: if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) goto err; retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); break; default: err: ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); retval = CTL_RETVAL_COMPLETE; break; } return (retval); } /* * Standard INQUIRY data. */ static int ctl_inquiry_std(struct ctl_scsiio *ctsio) { struct scsi_inquiry_data *inq_ptr; struct scsi_inquiry *cdb; struct ctl_softc *softc; struct ctl_lun *lun; char *val; uint32_t alloc_len, data_len; ctl_port_type port_type; softc = control_softc; /* * Figure out whether we're talking to a Fibre Channel port or not. * We treat the ioctl front end, and any SCSI adapters, as packetized * SCSI front ends. */ port_type = softc->ctl_ports[ ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) port_type = CTL_PORT_SCSI; lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; cdb = (struct scsi_inquiry *)ctsio->cdb; alloc_len = scsi_2btoul(cdb->length); /* * We malloc the full inquiry data size here and fill it * in. If the user only asks for less, we'll give him * that much. */ data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; ctsio->kern_sg_entries = 0; ctsio->kern_data_resid = 0; ctsio->kern_rel_offset = 0; if (data_len < alloc_len) { ctsio->residual = alloc_len - data_len; ctsio->kern_data_len = data_len; ctsio->kern_total_len = data_len; } else { ctsio->residual = 0; ctsio->kern_data_len = alloc_len; ctsio->kern_total_len = alloc_len; } /* * If we have a LUN configured, report it as connected. Otherwise, * report that it is offline or no device is supported, depending * on the value of inquiry_pq_no_lun. * * According to the spec (SPC-4 r34), the peripheral qualifier * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: * * "A peripheral device having the specified peripheral device type * is not connected to this logical unit. However, the device * server is capable of supporting the specified peripheral device * type on this logical unit." * * According to the same spec, the peripheral qualifier * SID_QUAL_BAD_LU (011b) is used in this scenario: * * "The device server is not capable of supporting a peripheral * device on this logical unit. For this peripheral qualifier the * peripheral device type shall be set to 1Fh. All other peripheral * device type values are reserved for this peripheral qualifier." * * Given the text, it would seem that we probably want to report that * the LUN is offline here. There is no LUN connected, but we can * support a LUN at the given LUN number. * * In the real world, though, it sounds like things are a little * different: * * - Linux, when presented with a LUN with the offline peripheral * qualifier, will create an sg driver instance for it. So when * you attach it to CTL, you wind up with a ton of sg driver * instances. (One for every LUN that Linux bothered to probe.) * Linux does this despite the fact that it issues a REPORT LUNs * to LUN 0 to get the inventory of supported LUNs. * * - There is other anecdotal evidence (from Emulex folks) about * arrays that use the offline peripheral qualifier for LUNs that * are on the "passive" path in an active/passive array. * * So the solution is provide a hopefully reasonable default * (return bad/no LUN) and allow the user to change the behavior * with a tunable/sysctl variable. */ if (lun != NULL) inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | lun->be_lun->lun_type; else if (softc->inquiry_pq_no_lun == 0) inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; else inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; /* RMB in byte 2 is 0 */ inq_ptr->version = SCSI_REV_SPC4; /* * According to SAM-3, even if a device only supports a single * level of LUN addressing, it should still set the HISUP bit: * * 4.9.1 Logical unit numbers overview * * All logical unit number formats described in this standard are * hierarchical in structure even when only a single level in that * hierarchy is used. The HISUP bit shall be set to one in the * standard INQUIRY data (see SPC-2) when any logical unit number * format described in this standard is used. Non-hierarchical * formats are outside the scope of this standard. * * Therefore we set the HiSup bit here. * * The reponse format is 2, per SPC-3. */ inq_ptr->response_format = SID_HiSup | 2; inq_ptr->additional_length = data_len - (offsetof(struct scsi_inquiry_data, additional_length) + 1); CTL_DEBUG_PRINT(("additional_length = %d\n", inq_ptr->additional_length)); inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; /* 16 bit addressing */ if (port_type == CTL_PORT_SCSI) inq_ptr->spc2_flags = SPC2_SID_ADDR16; /* XXX set the SID_MultiP bit here if we're actually going to respond on multiple ports */ inq_ptr->spc2_flags |= SPC2_SID_MultiP; /* 16 bit data bus, synchronous transfers */ if (port_type == CTL_PORT_SCSI) inq_ptr->flags = SID_WBus16 | SID_Sync; /* * XXX KDM do we want to support tagged queueing on the control * device at all? */ if ((lun == NULL) || (lun->be_lun->lun_type != T_PROCESSOR)) inq_ptr->flags |= SID_CmdQue; /* * Per SPC-3, unused bytes in ASCII strings are filled with spaces. * We have 8 bytes for the vendor name, and 16 bytes for the device * name and 4 bytes for the revision. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "vendor")) == NULL) { strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); } else { memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); strncpy(inq_ptr->vendor, val, min(sizeof(inq_ptr->vendor), strlen(val))); } if (lun == NULL) { strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { switch (lun->be_lun->lun_type) { case T_DIRECT: strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, sizeof(inq_ptr->product)); break; case T_PROCESSOR: strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, sizeof(inq_ptr->product)); break; default: strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, sizeof(inq_ptr->product)); break; } } else { memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); strncpy(inq_ptr->product, val, min(sizeof(inq_ptr->product), strlen(val))); } /* * XXX make this a macro somewhere so it automatically gets * incremented when we make changes. */ if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, "revision")) == NULL) { strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); } else { memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); strncpy(inq_ptr->revision, val, min(sizeof(inq_ptr->revision), strlen(val))); } /* * For parallel SCSI, we support double transition and single * transition clocking. We also support QAS (Quick Arbitration * and Selection) and Information Unit transfers on both the * control and array devices. */ if (port_type == CTL_PORT_SCSI) inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | SID_SPI_IUS; /* SAM-5 (no version claimed) */ scsi_ulto2b(0x00A0, inq_ptr->version1); /* SPC-4 (no version claimed) */ scsi_ulto2b(0x0460, inq_ptr->version2); if (port_type == CTL_PORT_FC) { /* FCP-2 ANSI INCITS.350:2003 */ scsi_ulto2b(0x0917, inq_ptr->version3); } else if (port_type == CTL_PORT_SCSI) { /* SPI-4 ANSI INCITS.362:200x */ scsi_ulto2b(0x0B56, inq_ptr->version3); } else if (port_type == CTL_PORT_ISCSI) { /* iSCSI (no version claimed) */ scsi_ulto2b(0x0960, inq_ptr->version3); } else if (port_type == CTL_PORT_SAS) { /* SAS (no version claimed) */ scsi_ulto2b(0x0BE0, inq_ptr->version3); } if (lun == NULL) { /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); } else { switch (lun->be_lun->lun_type) { case T_DIRECT: /* SBC-4 (no version claimed) */ scsi_ulto2b(0x0600, inq_ptr->version4); break; case T_PROCESSOR: default: break; } } ctl_set_success(ctsio); ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; ctsio->be_move_done = ctl_config_move_done; ctl_datamove((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } int ctl_inquiry(struct ctl_scsiio *ctsio) { struct scsi_inquiry *cdb; int retval; CTL_DEBUG_PRINT(("ctl_inquiry\n")); cdb = (struct scsi_inquiry *)ctsio->cdb; if (cdb->byte2 & SI_EVPD) retval = ctl_inquiry_evpd(ctsio); else if (cdb->page_code == 0) retval = ctl_inquiry_std(ctsio); else { ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 2, /*bit_valid*/ 0, /*bit*/ 0); ctl_done((union ctl_io *)ctsio); return (CTL_RETVAL_COMPLETE); } return (retval); } /* * For known CDB types, parse the LBA and length. */ static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) { if (io->io_hdr.io_type != CTL_IO_SCSI) return (1); switch (io->scsiio.cdb[0]) { case COMPARE_AND_WRITE: { struct scsi_compare_and_write *cdb; cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = cdb->length; break; } case READ_6: case WRITE_6: { struct scsi_rw_6 *cdb; cdb = (struct scsi_rw_6 *)io->scsiio.cdb; *lba = scsi_3btoul(cdb->addr); /* only 5 bits are valid in the most significant address byte */ *lba &= 0x1fffff; *len = cdb->length; break; } case READ_10: case WRITE_10: { struct scsi_rw_10 *cdb; cdb = (struct scsi_rw_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_VERIFY_10: { struct scsi_write_verify_10 *cdb; cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case READ_12: case WRITE_12: { struct scsi_rw_12 *cdb; cdb = (struct scsi_rw_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_12: { struct scsi_write_verify_12 *cdb; cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case READ_16: case WRITE_16: case WRITE_ATOMIC_16: { struct scsi_rw_16 *cdb; cdb = (struct scsi_rw_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_VERIFY_16: { struct scsi_write_verify_16 *cdb; cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case WRITE_SAME_10: { struct scsi_write_same_10 *cdb; cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case WRITE_SAME_16: { struct scsi_write_same_16 *cdb; cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_10: { struct scsi_verify_10 *cdb; cdb = (struct scsi_verify_10 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_2btoul(cdb->length); break; } case VERIFY_12: { struct scsi_verify_12 *cdb; cdb = (struct scsi_verify_12 *)io->scsiio.cdb; *lba = scsi_4btoul(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case VERIFY_16: { struct scsi_verify_16 *cdb; cdb = (struct scsi_verify_16 *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = scsi_4btoul(cdb->length); break; } case UNMAP: { *lba = 0; *len = UINT64_MAX; break; } case SERVICE_ACTION_IN: { /* GET LBA STATUS */ struct scsi_get_lba_status *cdb; cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; *lba = scsi_8btou64(cdb->addr); *len = UINT32_MAX; break; } default: return (1); break; /* NOTREACHED */ } return (0); } static ctl_action ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, bool seq) { uint64_t endlba1, endlba2; endlba1 = lba1 + len1 - (seq ? 0 : 1); endlba2 = lba2 + len2 - 1; if ((endlba1 < lba2) || (endlba2 < lba1)) return (CTL_ACTION_PASS); else return (CTL_ACTION_BLOCK); } static int ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) { struct ctl_ptr_len_flags *ptrlen; struct scsi_unmap_desc *buf, *end, *range; uint64_t lba; uint32_t len; /* If not UNMAP -- go other way. */ if (io->io_hdr.io_type != CTL_IO_SCSI || io->scsiio.cdb[0] != UNMAP) return (CTL_ACTION_ERROR); /* If UNMAP without data -- block and wait for data. */ ptrlen = (struct ctl_ptr_len_flags *) &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || ptrlen->ptr == NULL) return (CTL_ACTION_BLOCK); /* UNMAP with data -- check for collision. */ buf = (struct scsi_unmap_desc *)ptrlen->ptr; end = buf + ptrlen->len / sizeof(*buf); for (range = buf; range < end; range++) { lba = scsi_8btou64(range->lba); len = scsi_4btoul(range->length); if ((lba < lba2 + len2) && (lba + len > lba2)) return (CTL_ACTION_BLOCK); } return (CTL_ACTION_PASS); } static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) { uint64_t lba1, lba2; uint64_t len1, len2; int retval; if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); retval = ctl_extent_check_unmap(io1, lba2, len2); if (retval != CTL_ACTION_ERROR) return (retval); if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); } static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) { uint64_t lba1, lba2; uint64_t len1, len2; if (ctl_get_lba_len(io1, &lba1, &len1) != 0) return (CTL_ACTION_ERROR); if (ctl_get_lba_len(io2, &lba2, &len2) != 0) return (CTL_ACTION_ERROR); if (lba1 + len1 == lba2) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); } static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *ooa_io) { const struct ctl_cmd_entry *pending_entry, *ooa_entry; ctl_serialize_action *serialize_row; /* * The initiator attempted multiple untagged commands at the same * time. Can't do that. */ if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid.id == ooa_io->io_hdr.nexus.initid.id)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP); /* * The initiator attempted to send multiple tagged commands with * the same ID. (It's fine if different initiators have the same * tag ID.) * * Even if all of those conditions are true, we don't kill the I/O * if the command ahead of us has been aborted. We won't end up * sending it to the FETD, and it's perfectly legal to resend a * command with the same tag number as long as the previous * instance of this tag number has been aborted somehow. */ if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) && ((pending_io->io_hdr.nexus.targ_port == ooa_io->io_hdr.nexus.targ_port) && (pending_io->io_hdr.nexus.initid.id == ooa_io->io_hdr.nexus.initid.id)) && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | CTL_FLAG_STATUS_SENT)) == 0)) return (CTL_ACTION_OVERLAP_TAG); /* * If we get a head of queue tag, SAM-3 says that we should * immediately execute it. * * What happens if this command would normally block for some other * reason? e.g. a request sense with a head of queue tag * immediately after a write. Normally that would block, but this * will result in its getting executed immediately... * * We currently return "pass" instead of "skip", so we'll end up * going through the rest of the queue to check for overlapped tags. * * XXX KDM check for other types of blockage first?? */ if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) return (CTL_ACTION_PASS); /* * Ordered tags have to block until all items ahead of them * have completed. If we get called with an ordered tag, we always * block, if something else is ahead of us in the queue. */ if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) return (CTL_ACTION_BLOCK); /* * Simple tags get blocked until all head of queue and ordered tags * ahead of them have completed. I'm lumping untagged commands in * with simple tags here. XXX KDM is that the right thing to do? */ if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) return (CTL_ACTION_BLOCK); pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); serialize_row = ctl_serialize_table[ooa_entry->seridx]; switch (serialize_row[pending_entry->seridx]) { case CTL_SER_BLOCK: return (CTL_ACTION_BLOCK); case CTL_SER_EXTENT: return (ctl_extent_check(ooa_io, pending_io, (lun->serseq == CTL_LUN_SERSEQ_ON))); case CTL_SER_EXTENTOPT: if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (ctl_extent_check(ooa_io, pending_io, (lun->serseq == CTL_LUN_SERSEQ_ON))); return (CTL_ACTION_PASS); case CTL_SER_EXTENTSEQ: if (lun->serseq != CTL_LUN_SERSEQ_OFF) return (ctl_extent_check_seq(ooa_io, pending_io)); return (CTL_ACTION_PASS); case CTL_SER_PASS: return (CTL_ACTION_PASS); case CTL_SER_BLOCKOPT: if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) return (CTL_ACTION_BLOCK); return (CTL_ACTION_PASS); case CTL_SER_SKIP: return (CTL_ACTION_SKIP); default: panic("invalid serialization value %d", serialize_row[pending_entry->seridx]); } return (CTL_ACTION_ERROR); } /* * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. * Assumptions: * - pending_io is generally either incoming, or on the blocked queue * - starting I/O is the I/O we want to start the check with. */ static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, union ctl_io *starting_io) { union ctl_io *ooa_io; ctl_action action; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run back along the OOA queue, starting with the current * blocked I/O and going through every I/O before it on the * queue. If starting_io is NULL, we'll just end up returning * CTL_ACTION_PASS. */ for (ooa_io = starting_io; ooa_io != NULL; ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, ooa_links)){ /* * This routine just checks to see whether * cur_blocked is blocked by ooa_io, which is ahead * of it in the queue. It doesn't queue/dequeue * cur_blocked. */ action = ctl_check_for_blockage(lun, pending_io, ooa_io); switch (action) { case CTL_ACTION_BLOCK: case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: case CTL_ACTION_SKIP: case CTL_ACTION_ERROR: return (action); break; /* NOTREACHED */ case CTL_ACTION_PASS: break; default: panic("invalid action %d", action); break; /* NOTREACHED */ } } return (CTL_ACTION_PASS); } /* * Assumptions: * - An I/O has just completed, and has been removed from the per-LUN OOA * queue, so some items on the blocked queue may now be unblocked. */ static int ctl_check_blocked(struct ctl_lun *lun) { union ctl_io *cur_blocked, *next_blocked; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run forward from the head of the blocked queue, checking each * entry against the I/Os prior to it on the OOA queue to see if * there is still any blockage. * * We cannot use the TAILQ_FOREACH() macro, because it can't deal * with our removing a variable on it while it is traversing the * list. */ for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); cur_blocked != NULL; cur_blocked = next_blocked) { union ctl_io *prev_ooa; ctl_action action; next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, blocked_links); prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, ctl_ooaq, ooa_links); /* * If cur_blocked happens to be the first item in the OOA * queue now, prev_ooa will be NULL, and the action * returned will just be CTL_ACTION_PASS. */ action = ctl_check_ooa(lun, cur_blocked, prev_ooa); switch (action) { case CTL_ACTION_BLOCK: /* Nothing to do here, still blocked */ break; case CTL_ACTION_OVERLAP: case CTL_ACTION_OVERLAP_TAG: /* * This shouldn't happen! In theory we've already * checked this command for overlap... */ break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: { const struct ctl_cmd_entry *entry; int isc_retval; /* * The skip case shouldn't happen, this transaction * should have never made it onto the blocked queue. */ /* * This I/O is no longer blocked, we can remove it * from the blocked queue. Since this is a TAILQ * (doubly linked list), we can do O(1) removals * from any place on the list. */ TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, blocked_links); cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ /* * Need to send IO back to original side to * run */ union ctl_ha_msg msg_info; msg_info.hdr.original_sc = cur_blocked->io_hdr.original_sc; msg_info.hdr.serializing_sc = cur_blocked; msg_info.hdr.msg_type = CTL_MSG_R2R; if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, sizeof(msg_info), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:Check Blocked error from " "ctl_ha_msg_send %d\n", isc_retval); } break; } entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); /* * Check this I/O for LUN state changes that may * have happened while this command was blocked. * The LUN state may have been changed by a command * ahead of us in the queue, so we need to re-check * for any states that can be caused by SCSI * commands. */ if (ctl_scsiio_lun_check(lun, entry, &cur_blocked->scsiio) == 0) { cur_blocked->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(cur_blocked); } else ctl_done(cur_blocked); break; } default: /* * This probably shouldn't happen -- we shouldn't * get CTL_ACTION_ERROR, or anything else. */ break; } } return (CTL_RETVAL_COMPLETE); } /* * This routine (with one exception) checks LUN flags that can be set by * commands ahead of us in the OOA queue. These flags have to be checked * when a command initially comes in, and when we pull a command off the * blocked queue and are preparing to execute it. The reason we have to * check these flags for commands on the blocked queue is that the LUN * state may have been changed by a command ahead of us while we're on the * blocked queue. * * Ordering is somewhat important with these checks, so please pay * careful attention to the placement of any new checks. */ static int ctl_scsiio_lun_check(struct ctl_lun *lun, const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) { struct ctl_softc *softc = lun->ctl_softc; int retval; uint32_t residx; retval = 0; mtx_assert(&lun->lun_lock, MA_OWNED); /* * If this shelf is a secondary shelf controller, we have to reject * any media access commands. */ if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { ctl_set_lun_standby(ctsio); retval = 1; goto bailout; } if (entry->pattern & CTL_LUN_PAT_WRITE) { if (lun->flags & CTL_LUN_READONLY) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); retval = 1; goto bailout; } if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] .eca_and_aen & SCP_SWP) != 0) { ctl_set_sense(ctsio, /*current_error*/ 1, /*sense_key*/ SSD_KEY_DATA_PROTECT, /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); retval = 1; goto bailout; } } /* * Check for a reservation conflict. If this command isn't allowed * even on reserved LUNs, and if this initiator isn't the one who * reserved us, reject the command with a reservation conflict. */ residx = ctl_get_resindex(&ctsio->io_hdr.nexus); if ((lun->flags & CTL_LUN_RESERVED) && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { if (lun->res_idx != residx) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { /* No reservation or command is allowed. */; } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && (lun->res_type == SPR_TYPE_WR_EX || lun->res_type == SPR_TYPE_WR_EX_RO || lun->res_type == SPR_TYPE_WR_EX_AR)) { /* The command is allowed for Write Exclusive resv. */; } else { /* * if we aren't registered or it's a res holder type * reservation and this isn't the res holder then set a * conflict. */ if (ctl_get_prkey(lun, residx) == 0 || (residx != lun->pr_res_idx && lun->res_type < 4)) { ctl_set_reservation_conflict(ctsio); retval = 1; goto bailout; } } if ((lun->flags & CTL_LUN_OFFLINE) && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { ctl_set_lun_not_ready(ctsio); retval = 1; goto bailout; } /* * If the LUN is stopped, see if this particular command is allowed * for a stopped lun. Otherwise, reject it with 0x04,0x02. */ if ((lun->flags & CTL_LUN_STOPPED) && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { /* "Logical unit not ready, initializing cmd. required" */ ctl_set_lun_stopped(ctsio); retval = 1; goto bailout; } if ((lun->flags & CTL_LUN_INOPERABLE) && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { /* "Medium format corrupted" */ ctl_set_medium_format_corrupted(ctsio); retval = 1; goto bailout; } bailout: return (retval); } static void ctl_failover_io(union ctl_io *io, int have_lock) { ctl_set_busy(&io->scsiio); ctl_done(io); } static void ctl_failover(void) { struct ctl_lun *lun; struct ctl_softc *softc; union ctl_io *next_io, *pending_io; union ctl_io *io; int lun_idx; softc = control_softc; mtx_lock(&softc->ctl_lock); /* * Remove any cmds from the other SC from the rtr queue. These * will obviously only be for LUNs for which we're the primary. * We can't send status or get/send data for these commands. * Since they haven't been executed yet, we can just remove them. * We'll either abort them or delete them below, depending on * which HA mode we're in. */ #ifdef notyet mtx_lock(&softc->queue_lock); for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, ctl_io_hdr, links); } mtx_unlock(&softc->queue_lock); #endif for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { lun = softc->ctl_luns[lun_idx]; if (lun==NULL) continue; /* * Processor LUNs are primary on both sides. * XXX will this always be true? */ if (lun->be_lun->lun_type == T_PROCESSOR) continue; if ((lun->flags & CTL_LUN_PRIMARY_SC) && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { printf("FAILOVER: primary lun %d\n", lun_idx); /* * Remove all commands from the other SC. First from the * blocked queue then from the ooa queue. Once we have * removed them. Call ctl_check_blocked to see if there * is anything that can run. */ for (io = (union ctl_io *)TAILQ_FIRST( &lun->blocked_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT( &io->io_hdr, blocked_links); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->blocked_queue, &io->io_hdr,blocked_links); io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_free_io(io); } } for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT( &io->io_hdr, ooa_links); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_free_io(io); } } ctl_check_blocked(lun); } else if ((lun->flags & CTL_LUN_PRIMARY_SC) && (softc->ha_mode == CTL_HA_MODE_XFER)) { printf("FAILOVER: primary lun %d\n", lun_idx); /* * Abort all commands from the other SC. We can't * send status back for them now. These should get * cleaned up when they are completed or come out * for a datamove operation. */ for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); io != NULL; io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT( &io->io_hdr, ooa_links); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) io->io_hdr.flags |= CTL_FLAG_ABORT; } } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) && (softc->ha_mode == CTL_HA_MODE_XFER)) { printf("FAILOVER: secondary lun %d\n", lun_idx); lun->flags |= CTL_LUN_PRIMARY_SC; /* * We send all I/O that was sent to this controller * and redirected to the other side back with * busy status, and have the initiator retry it. * Figuring out how much data has been transferred, * etc. and picking up where we left off would be * very tricky. * * XXX KDM need to remove I/O from the blocked * queue as well! */ for (pending_io = (union ctl_io *)TAILQ_FIRST( &lun->ooa_queue); pending_io != NULL; pending_io = next_io) { next_io = (union ctl_io *)TAILQ_NEXT( &pending_io->io_hdr, ooa_links); pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; if (pending_io->io_hdr.flags & CTL_FLAG_IO_ACTIVE) { pending_io->io_hdr.flags |= CTL_FLAG_FAILOVER; } else { ctl_set_busy(&pending_io->scsiio); ctl_done(pending_io); } } ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { printf("FAILOVER: secondary lun %d\n", lun_idx); /* * if the first io on the OOA is not on the RtR queue * add it. */ lun->flags |= CTL_LUN_PRIMARY_SC; pending_io = (union ctl_io *)TAILQ_FIRST( &lun->ooa_queue); if (pending_io==NULL) { printf("Nothing on OOA queue\n"); continue; } pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; if ((pending_io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) == 0) { pending_io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(pending_io); } #if 0 else { printf("Tag 0x%04x is running\n", pending_io->scsiio.tag_num); } #endif next_io = (union ctl_io *)TAILQ_NEXT( &pending_io->io_hdr, ooa_links); for (pending_io=next_io; pending_io != NULL; pending_io = next_io) { pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; next_io = (union ctl_io *)TAILQ_NEXT( &pending_io->io_hdr, ooa_links); if (pending_io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) { #if 0 printf("Tag 0x%04x is running\n", pending_io->scsiio.tag_num); #endif continue; } switch (ctl_check_ooa(lun, pending_io, (union ctl_io *)TAILQ_PREV( &pending_io->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: TAILQ_INSERT_TAIL(&lun->blocked_queue, &pending_io->io_hdr, blocked_links); pending_io->io_hdr.flags |= CTL_FLAG_BLOCKED; break; case CTL_ACTION_PASS: case CTL_ACTION_SKIP: pending_io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr(pending_io); break; case CTL_ACTION_OVERLAP: ctl_set_overlapped_cmd( (struct ctl_scsiio *)pending_io); ctl_done(pending_io); break; case CTL_ACTION_OVERLAP_TAG: ctl_set_overlapped_tag( (struct ctl_scsiio *)pending_io, pending_io->scsiio.tag_num & 0xff); ctl_done(pending_io); break; case CTL_ACTION_ERROR: default: ctl_set_internal_failure( (struct ctl_scsiio *)pending_io, 0, // sks_valid 0); //retry count ctl_done(pending_io); break; } } ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); } else { panic("Unhandled HA mode failover, LUN flags = %#x, " "ha_mode = #%x", lun->flags, softc->ha_mode); } } ctl_pause_rtr = 0; mtx_unlock(&softc->ctl_lock); } static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, ctl_ua_type ua_type) { struct ctl_lun *lun; ctl_ua_type *pu; mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { mtx_lock(&lun->lun_lock); pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; if (pu != NULL) pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua_type; mtx_unlock(&lun->lun_lock); } } static int ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) { struct ctl_lun *lun; const struct ctl_cmd_entry *entry; uint32_t initidx, targ_lun; int retval; retval = 0; lun = NULL; targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; if ((targ_lun < CTL_MAX_LUNS) && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { /* * If the LUN is invalid, pretend that it doesn't exist. * It will go away as soon as all pending I/O has been * completed. */ mtx_lock(&lun->lun_lock); if (lun->flags & CTL_LUN_DISABLED) { mtx_unlock(&lun->lun_lock); lun = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; } else { ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = lun->be_lun; if (lun->be_lun->lun_type == T_PROCESSOR) { ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; } /* * Every I/O goes into the OOA queue for a * particular LUN, and stays there until completion. */ TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); } } else { ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; } /* Get command entry and return error if it is unsuppotyed. */ entry = ctl_validate_command(ctsio); if (entry == NULL) { if (lun) mtx_unlock(&lun->lun_lock); return (retval); } ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; /* * Check to see whether we can send this command to LUNs that don't * exist. This should pretty much only be the case for inquiry * and request sense. Further checks, below, really require having * a LUN, so we can't really check the command anymore. Just put * it on the rtr queue. */ if (lun == NULL) { if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; ctl_enqueue_rtr((union ctl_io *)ctsio); return (retval); } ctl_set_unsupported_lun(ctsio); ctl_done((union ctl_io *)ctsio); CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); return (retval); } else { /* * Make sure we support this particular command on this LUN. * e.g., we don't support writes to the control LUN. */ if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { mtx_unlock(&lun->lun_lock); ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (retval); } } initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); #ifdef CTL_WITH_CA /* * If we've got a request sense, it'll clear the contingent * allegiance condition. Otherwise, if we have a CA condition for * this initiator, clear it, because it sent down a command other * than request sense. */ if ((ctsio->cdb[0] != REQUEST_SENSE) && (ctl_is_set(lun->have_ca, initidx))) ctl_clear_mask(lun->have_ca, initidx); #endif /* * If the command has this flag set, it handles its own unit * attention reporting, we shouldn't do anything. Otherwise we * check for any pending unit attentions, and send them back to the * initiator. We only do this when a command initially comes in, * not when we pull it off the blocked queue. * * According to SAM-3, section 5.3.2, the order that things get * presented back to the host is basically unit attentions caused * by some sort of reset event, busy status, reservation conflicts * or task set full, and finally any other status. * * One issue here is that some of the unit attentions we report * don't fall into the "reset" category (e.g. "reported luns data * has changed"). So reporting it here, before the reservation * check, may be technically wrong. I guess the only thing to do * would be to check for and report the reset events here, and then * check for the other unit attention types after we check for a * reservation conflict. * * XXX KDM need to fix this */ if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { ctl_ua_type ua_type; scsi_sense_data_type sense_format; if (lun->flags & CTL_LUN_SENSE_DESC) sense_format = SSD_TYPE_DESC; else sense_format = SSD_TYPE_FIXED; ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, sense_format); if (ua_type != CTL_UA_NONE) { mtx_unlock(&lun->lun_lock); ctsio->scsi_status = SCSI_STATUS_CHECK_COND; ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; ctsio->sense_len = SSD_FULL_SIZE; ctl_done((union ctl_io *)ctsio); return (retval); } } if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { mtx_unlock(&lun->lun_lock); ctl_done((union ctl_io *)ctsio); return (retval); } /* * XXX CHD this is where we want to send IO to other side if * this LUN is secondary on this SC. We will need to make a copy * of the IO and flag the IO on this side as SENT_2OTHER and the flag * the copy we send as FROM_OTHER. * We also need to stuff the address of the original IO so we can * find it easily. Something similar will need be done on the other * side so when we are done we can find the copy. */ if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { union ctl_ha_msg msg_info; int isc_retval; ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; msg_info.hdr.original_sc = (union ctl_io *)ctsio; #if 0 printf("1. ctsio %p\n", ctsio); #endif msg_info.hdr.serializing_sc = NULL; msg_info.hdr.nexus = ctsio->io_hdr.nexus; msg_info.scsi.tag_num = ctsio->tag_num; msg_info.scsi.tag_type = ctsio->tag_type; memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, (void *)&msg_info, sizeof(msg_info), 0)) > CTL_HA_STATUS_SUCCESS) { printf("CTL:precheck, ctl_ha_msg_send returned %d\n", isc_retval); printf("CTL:opcode is %x\n", ctsio->cdb[0]); } else { #if 0 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); #endif } /* * XXX KDM this I/O is off the incoming queue, but hasn't * been inserted on any other queue. We may need to come * up with a holding queue while we wait for serialization * so that we have an idea of what we're waiting for from * the other side. */ mtx_unlock(&lun->lun_lock); return (retval); } switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, ooa_links))) { case CTL_ACTION_BLOCK: ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, blocked_links); mtx_unlock(&lun->lun_lock); return (retval); case CTL_ACTION_PASS: case CTL_ACTION_SKIP: ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_cmd(ctsio); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_OVERLAP_TAG: mtx_unlock(&lun->lun_lock); ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); ctl_done((union ctl_io *)ctsio); break; case CTL_ACTION_ERROR: default: mtx_unlock(&lun->lun_lock); ctl_set_internal_failure(ctsio, /*sks_valid*/ 0, /*retry_count*/ 0); ctl_done((union ctl_io *)ctsio); break; } return (retval); } const struct ctl_cmd_entry * ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) { const struct ctl_cmd_entry *entry; int service_action; entry = &ctl_cmd_table[ctsio->cdb[0]]; if (sa) *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); if (entry->flags & CTL_CMD_FLAG_SA5) { service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; entry = &((const struct ctl_cmd_entry *) entry->execute)[service_action]; } return (entry); } const struct ctl_cmd_entry * ctl_validate_command(struct ctl_scsiio *ctsio) { const struct ctl_cmd_entry *entry; int i, sa; uint8_t diff; entry = ctl_get_cmd_entry(ctsio, &sa); if (entry->execute == NULL) { if (sa) ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 4); else ctl_set_invalid_opcode(ctsio); ctl_done((union ctl_io *)ctsio); return (NULL); } KASSERT(entry->length > 0, ("Not defined length for command 0x%02x/0x%02x", ctsio->cdb[0], ctsio->cdb[1])); for (i = 1; i < entry->length; i++) { diff = ctsio->cdb[i] & ~entry->usage[i - 1]; if (diff == 0) continue; ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, /*command*/ 1, /*field*/ i, /*bit_valid*/ 1, /*bit*/ fls(diff) - 1); ctl_done((union ctl_io *)ctsio); return (NULL); } return (entry); } static int ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) { switch (lun_type) { case T_PROCESSOR: if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) return (0); break; case T_DIRECT: if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) return (0); break; default: return (0); } return (1); } static int ctl_scsiio(struct ctl_scsiio *ctsio) { int retval; const struct ctl_cmd_entry *entry; retval = CTL_RETVAL_COMPLETE; CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); entry = ctl_get_cmd_entry(ctsio, NULL); /* * If this I/O has been aborted, just send it straight to * ctl_done() without executing it. */ if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { ctl_done((union ctl_io *)ctsio); goto bailout; } /* * All the checks should have been handled by ctl_scsiio_precheck(). * We should be clear now to just execute the I/O. */ retval = entry->execute(ctsio); bailout: return (retval); } /* * Since we only implement one target right now, a bus reset simply resets * our single target. */ static int ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) { return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); } static int ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, ctl_ua_type ua_type) { struct ctl_lun *lun; int retval; if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; msg_info.hdr.nexus = io->io_hdr.nexus; if (ua_type==CTL_UA_TARG_RESET) msg_info.task.task_action = CTL_TASK_TARGET_RESET; else msg_info.task.task_action = CTL_TASK_BUS_RESET; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, (void *)&msg_info, sizeof(msg_info), 0)) { } } retval = 0; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) retval += ctl_lun_reset(lun, io, ua_type); mtx_unlock(&softc->ctl_lock); return (retval); } /* * The LUN should always be set. The I/O is optional, and is used to * distinguish between I/Os sent by this initiator, and by other * initiators. We set unit attention for initiators other than this one. * SAM-3 is vague on this point. It does say that a unit attention should * be established for other initiators when a LUN is reset (see section * 5.7.3), but it doesn't specifically say that the unit attention should * be established for this particular initiator when a LUN is reset. Here * is the relevant text, from SAM-3 rev 8: * * 5.7.2 When a SCSI initiator port aborts its own tasks * * When a SCSI initiator port causes its own task(s) to be aborted, no * notification that the task(s) have been aborted shall be returned to * the SCSI initiator port other than the completion response for the * command or task management function action that caused the task(s) to * be aborted and notification(s) associated with related effects of the * action (e.g., a reset unit attention condition). * * XXX KDM for now, we're setting unit attention for all initiators. */ static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) { union ctl_io *xio; #if 0 uint32_t initidx; #endif #ifdef CTL_WITH_CA int i; #endif mtx_lock(&lun->lun_lock); /* * Run through the OOA queue and abort each I/O. */ #if 0 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { #endif for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; } /* * This version sets unit attention for every */ #if 0 initidx = ctl_get_initindex(&io->io_hdr.nexus); ctl_est_ua_all(lun, initidx, ua_type); #else ctl_est_ua_all(lun, -1, ua_type); #endif /* * A reset (any kind, really) clears reservations established with * RESERVE/RELEASE. It does not clear reservations established * with PERSISTENT RESERVE OUT, but we don't support that at the * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address * reservations made with the RESERVE/RELEASE commands, because * those commands are obsolete in SPC-3. */ lun->flags &= ~CTL_LUN_RESERVED; #ifdef CTL_WITH_CA for (i = 0; i < CTL_MAX_INITIATORS; i++) ctl_clear_mask(lun->have_ca, i); #endif mtx_unlock(&lun->lun_lock); return (0); } static void ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, int other_sc) { union ctl_io *xio; mtx_assert(&lun->lun_lock, MA_OWNED); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { if ((targ_port == UINT32_MAX || targ_port == xio->io_hdr.nexus.targ_port) && (init_id == UINT32_MAX || init_id == xio->io_hdr.nexus.initid.id)) { if (targ_port != xio->io_hdr.nexus.targ_port || init_id != xio->io_hdr.nexus.initid.id) xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; xio->io_hdr.flags |= CTL_FLAG_ABORT; if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; msg_info.hdr.nexus = xio->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = xio->scsiio.tag_num; msg_info.task.tag_type = xio->scsiio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; ctl_ha_msg_send(CTL_HA_CHAN_CTL, (void *)&msg_info, sizeof(msg_info), 0); } } } } static int ctl_abort_task_set(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; uint32_t targ_lun; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) lun = softc->ctl_luns[targ_lun]; else { mtx_unlock(&softc->ctl_lock); return (1); } mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid.id, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } else { /* CTL_TASK_CLEAR_TASK_SET */ ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); } mtx_unlock(&lun->lun_lock); return (0); } static int ctl_i_t_nexus_reset(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_lun *lun; uint32_t initidx, residx; initidx = ctl_get_initindex(&io->io_hdr.nexus); residx = ctl_get_resindex(&io->io_hdr.nexus); mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { mtx_lock(&lun->lun_lock); ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.initid.id, (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); #ifdef CTL_WITH_CA ctl_clear_mask(lun->have_ca, initidx); #endif if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) lun->flags &= ~CTL_LUN_RESERVED; ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); return (0); } static int ctl_abort_task(union ctl_io *io) { union ctl_io *xio; struct ctl_lun *lun; struct ctl_softc *softc; #if 0 struct sbuf sb; char printbuf[128]; #endif int found; uint32_t targ_lun; softc = control_softc; found = 0; /* * Look up the LUN. */ targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) lun = softc->ctl_luns[targ_lun]; else { mtx_unlock(&softc->ctl_lock); return (1); } #if 0 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", lun->lun, io->taskio.tag_num, io->taskio.tag_type); #endif mtx_lock(&lun->lun_lock); mtx_unlock(&softc->ctl_lock); /* * Run through the OOA queue and attempt to find the given I/O. * The target port, initiator ID, tag type and tag number have to * match the values that we got from the initiator. If we have an * untagged command to abort, simply abort the first untagged command * we come to. We only allow one untagged command at a time of course. */ #if 0 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { #endif for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { #if 0 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", lun->lun, xio->scsiio.tag_num, xio->scsiio.tag_type, (xio->io_hdr.blocked_links.tqe_prev == NULL) ? "" : " BLOCKED", (xio->io_hdr.flags & CTL_FLAG_DMA_INPROG) ? " DMA" : "", (xio->io_hdr.flags & CTL_FLAG_ABORT) ? " ABORT" : "", (xio->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); ctl_scsi_command_string(&xio->scsiio, NULL, &sb); sbuf_finish(&sb); printf("%s\n", sbuf_data(&sb)); #endif if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id) || (xio->io_hdr.flags & CTL_FLAG_ABORT)) continue; /* * If the abort says that the task is untagged, the * task in the queue must be untagged. Otherwise, * we just check to see whether the tag numbers * match. This is because the QLogic firmware * doesn't pass back the tag type in an abort * request. */ #if 0 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) || (xio->scsiio.tag_num == io->taskio.tag_num)) { #endif /* * XXX KDM we've got problems with FC, because it * doesn't send down a tag type with aborts. So we * can only really go by the tag number... * This may cause problems with parallel SCSI. * Need to figure that out!! */ if (xio->scsiio.tag_num == io->taskio.tag_num) { xio->io_hdr.flags |= CTL_FLAG_ABORT; found = 1; if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && !(lun->flags & CTL_LUN_PRIMARY_SC)) { union ctl_ha_msg msg_info; io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_ABORT_TASK; msg_info.task.tag_num = io->taskio.tag_num; msg_info.task.tag_type = io->taskio.tag_type; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; #if 0 printf("Sent Abort to other side\n"); #endif if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, (void *)&msg_info, sizeof(msg_info), 0) != CTL_HA_STATUS_SUCCESS) { } } #if 0 printf("ctl_abort_task: found I/O to abort\n"); #endif } } mtx_unlock(&lun->lun_lock); if (found == 0) { /* * This isn't really an error. It's entirely possible for * the abort and command completion to cross on the wire. * This is more of an informative/diagnostic error. */ #if 0 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " "%d:%d:%d:%d tag %d type %d\n", io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_lun, io->taskio.tag_num, io->taskio.tag_type); #endif } return (0); } static void ctl_run_task(union ctl_io *io) { struct ctl_softc *softc = control_softc; int retval = 1; const char *task_desc; CTL_DEBUG_PRINT(("ctl_run_task\n")); KASSERT(io->io_hdr.io_type == CTL_IO_TASK, ("ctl_run_task: Unextected io_type %d\n", io->io_hdr.io_type)); task_desc = ctl_scsi_task_string(&io->taskio); if (task_desc != NULL) { #ifdef NEEDTOPORT csevent_log(CSC_CTL | CSC_SHELF_SW | CTL_TASK_REPORT, csevent_LogType_Trace, csevent_Severity_Information, csevent_AlertLevel_Green, csevent_FRU_Firmware, csevent_FRU_Unknown, "CTL: received task: %s",task_desc); #endif } else { #ifdef NEEDTOPORT csevent_log(CSC_CTL | CSC_SHELF_SW | CTL_TASK_REPORT, csevent_LogType_Trace, csevent_Severity_Information, csevent_AlertLevel_Green, csevent_FRU_Firmware, csevent_FRU_Unknown, "CTL: received unknown task " "type: %d (%#x)", io->taskio.task_action, io->taskio.task_action); #endif } switch (io->taskio.task_action) { case CTL_TASK_ABORT_TASK: retval = ctl_abort_task(io); break; case CTL_TASK_ABORT_TASK_SET: case CTL_TASK_CLEAR_TASK_SET: retval = ctl_abort_task_set(io); break; case CTL_TASK_CLEAR_ACA: break; case CTL_TASK_I_T_NEXUS_RESET: retval = ctl_i_t_nexus_reset(io); break; case CTL_TASK_LUN_RESET: { struct ctl_lun *lun; uint32_t targ_lun; targ_lun = io->io_hdr.nexus.targ_mapped_lun; mtx_lock(&softc->ctl_lock); if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) lun = softc->ctl_luns[targ_lun]; else { mtx_unlock(&softc->ctl_lock); retval = 1; break; } if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg_info; io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; msg_info.hdr.nexus = io->io_hdr.nexus; msg_info.task.task_action = CTL_TASK_LUN_RESET; msg_info.hdr.original_sc = NULL; msg_info.hdr.serializing_sc = NULL; if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, (void *)&msg_info, sizeof(msg_info), 0)) { } } retval = ctl_lun_reset(lun, io, CTL_UA_LUN_RESET); mtx_unlock(&softc->ctl_lock); break; } case CTL_TASK_TARGET_RESET: retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); break; case CTL_TASK_BUS_RESET: retval = ctl_bus_reset(softc, io); break; case CTL_TASK_PORT_LOGIN: break; case CTL_TASK_PORT_LOGOUT: break; default: printf("ctl_run_task: got unknown task management event %d\n", io->taskio.task_action); break; } if (retval == 0) io->io_hdr.status = CTL_SUCCESS; else io->io_hdr.status = CTL_ERROR; ctl_done(io); } /* * For HA operation. Handle commands that come in from the other * controller. */ static void ctl_handle_isc(union ctl_io *io) { int free_io; struct ctl_lun *lun; struct ctl_softc *softc; uint32_t targ_lun; softc = control_softc; targ_lun = io->io_hdr.nexus.targ_mapped_lun; lun = softc->ctl_luns[targ_lun]; switch (io->io_hdr.msg_type) { case CTL_MSG_SERIALIZE: free_io = ctl_serialize_other_sc_cmd(&io->scsiio); break; case CTL_MSG_R2R: { const struct ctl_cmd_entry *entry; /* * This is only used in SER_ONLY mode. */ free_io = 0; entry = ctl_get_cmd_entry(&io->scsiio, NULL); mtx_lock(&lun->lun_lock); if (ctl_scsiio_lun_check(lun, entry, (struct ctl_scsiio *)io) != 0) { mtx_unlock(&lun->lun_lock); ctl_done(io); break; } io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; mtx_unlock(&lun->lun_lock); ctl_enqueue_rtr(io); break; } case CTL_MSG_FINISH_IO: if (softc->ha_mode == CTL_HA_MODE_XFER) { free_io = 0; ctl_done(io); } else { free_io = 1; mtx_lock(&lun->lun_lock); TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); ctl_check_blocked(lun); mtx_unlock(&lun->lun_lock); } break; case CTL_MSG_PERS_ACTION: ctl_hndl_per_res_out_on_other_sc( (union ctl_ha_msg *)&io->presio.pr_msg); free_io = 1; break; case CTL_MSG_BAD_JUJU: free_io = 0; ctl_done(io); break; case CTL_MSG_DATAMOVE: /* Only used in XFER mode */ free_io = 0; ctl_datamove_remote(io); break; case CTL_MSG_DATAMOVE_DONE: /* Only used in XFER mode */ free_io = 0; io->scsiio.be_move_done(io); break; default: free_io = 1; printf("%s: Invalid message type %d\n", __func__, io->io_hdr.msg_type); break; } if (free_io) ctl_free_io(io); } /* * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if * there is no match. */ static ctl_lun_error_pattern ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) { const struct ctl_cmd_entry *entry; ctl_lun_error_pattern filtered_pattern, pattern; pattern = desc->error_pattern; /* * XXX KDM we need more data passed into this function to match a * custom pattern, and we actually need to implement custom pattern * matching. */ if (pattern & CTL_LUN_PAT_CMD) return (CTL_LUN_PAT_CMD); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) return (CTL_LUN_PAT_ANY); entry = ctl_get_cmd_entry(ctsio, NULL); filtered_pattern = entry->pattern & pattern; /* * If the user requested specific flags in the pattern (e.g. * CTL_LUN_PAT_RANGE), make sure the command supports all of those * flags. * * If the user did not specify any flags, it doesn't matter whether * or not the command supports the flags. */ if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != (pattern & ~CTL_LUN_PAT_MASK)) return (CTL_LUN_PAT_NONE); /* * If the user asked for a range check, see if the requested LBA * range overlaps with this command's LBA range. */ if (filtered_pattern & CTL_LUN_PAT_RANGE) { uint64_t lba1; uint64_t len1; ctl_action action; int retval; retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); if (retval != 0) return (CTL_LUN_PAT_NONE); action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, desc->lba_range.len, FALSE); /* * A "pass" means that the LBA ranges don't overlap, so * this doesn't match the user's range criteria. */ if (action == CTL_ACTION_PASS) return (CTL_LUN_PAT_NONE); } return (filtered_pattern); } static void ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) { struct ctl_error_desc *desc, *desc2; mtx_assert(&lun->lun_lock, MA_OWNED); STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { ctl_lun_error_pattern pattern; /* * Check to see whether this particular command matches * the pattern in the descriptor. */ pattern = ctl_cmd_pattern_match(&io->scsiio, desc); if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) continue; switch (desc->lun_error & CTL_LUN_INJ_TYPE) { case CTL_LUN_INJ_ABORTED: ctl_set_aborted(&io->scsiio); break; case CTL_LUN_INJ_MEDIUM_ERR: ctl_set_medium_error(&io->scsiio); break; case CTL_LUN_INJ_UA: /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET * OCCURRED */ ctl_set_ua(&io->scsiio, 0x29, 0x00); break; case CTL_LUN_INJ_CUSTOM: /* * We're assuming the user knows what he is doing. * Just copy the sense information without doing * checks. */ bcopy(&desc->custom_sense, &io->scsiio.sense_data, MIN(sizeof(desc->custom_sense), sizeof(io->scsiio.sense_data))); io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; io->scsiio.sense_len = SSD_FULL_SIZE; io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; break; case CTL_LUN_INJ_NONE: default: /* * If this is an error injection type we don't know * about, clear the continuous flag (if it is set) * so it will get deleted below. */ desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; break; } /* * By default, each error injection action is a one-shot */ if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) continue; STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); free(desc, M_CTL); } } #ifdef CTL_IO_DELAY static void ctl_datamove_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_datamove(io); } #endif /* CTL_IO_DELAY */ void ctl_datamove(union ctl_io *io) { void (*fe_datamove)(union ctl_io *io); mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); CTL_DEBUG_PRINT(("ctl_datamove\n")); #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); break; } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if ((lun != NULL) && (lun->delay_info.datamove_delay > 0)) { struct callout *callout; callout = (struct callout *)&io->io_hdr.timer_bytes; callout_init(callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(callout, lun->delay_info.datamove_delay * hz, ctl_datamove_timer_wakeup, io); if (lun->delay_info.datamove_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.datamove_delay = 0; return; } } #endif /* * This command has been aborted. Set the port status, so we fail * the data move. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port, (uintmax_t)io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31337; /* * Note that the backend, in this case, will get the * callback in its context. In other cases it may get * called in the frontend's interrupt thread context. */ io->scsiio.be_move_done(io); return; } /* Don't confuse frontend with zero length data move. */ if (io->scsiio.kern_data_len == 0) { io->scsiio.be_move_done(io); return; } /* * If we're in XFER mode and this I/O is from the other shelf * controller, we need to send the DMA to the other side to * actually transfer the data to/from the host. In serialize only * mode the transfer happens below CTL and ctl_datamove() is only * called on the machine that originally received the I/O. */ if ((control_softc->ha_mode == CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg; uint32_t sg_entries_sent; int do_sg_copy; int i; memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.serializing_sc = io; msg.hdr.nexus = io->io_hdr.nexus; msg.dt.flags = io->io_hdr.flags; /* * We convert everything into a S/G list here. We can't * pass by reference, only by value between controllers. * So we can't pass a pointer to the S/G list, only as many * S/G entries as we can fit in here. If it's possible for * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, * then we need to break this up into multiple transfers. */ if (io->scsiio.kern_sg_entries == 0) { msg.dt.kern_sg_entries = 1; /* * If this is in cached memory, flush the cache * before we send the DMA request to the other * controller. We want to do this in either the * read or the write case. The read case is * straightforward. In the write case, we want to * make sure nothing is in the local cache that * could overwrite the DMAed data. */ if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { /* * XXX KDM use bus_dmamap_sync() here. */ } /* * Convert to a physical address if this is a * virtual address. */ if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { msg.dt.sg_list[0].addr = io->scsiio.kern_data_ptr; } else { /* * XXX KDM use busdma here! */ #if 0 msg.dt.sg_list[0].addr = (void *) vtophys(io->scsiio.kern_data_ptr); #endif } msg.dt.sg_list[0].len = io->scsiio.kern_data_len; do_sg_copy = 0; } else { struct ctl_sg_entry *sgl; do_sg_copy = 1; msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { /* * XXX KDM use bus_dmamap_sync() here. */ } } msg.dt.kern_data_len = io->scsiio.kern_data_len; msg.dt.kern_total_len = io->scsiio.kern_total_len; msg.dt.kern_data_resid = io->scsiio.kern_data_resid; msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; msg.dt.sg_sequence = 0; /* * Loop until we've sent all of the S/G entries. On the * other end, we'll recompose these S/G entries into one * contiguous list before passing it to the */ for (sg_entries_sent = 0; sg_entries_sent < msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ sizeof(msg.dt.sg_list[0])), msg.dt.kern_sg_entries - sg_entries_sent); if (do_sg_copy != 0) { struct ctl_sg_entry *sgl; int j; sgl = (struct ctl_sg_entry *) io->scsiio.kern_data_ptr; /* * If this is in cached memory, flush the cache * before we send the DMA request to the other * controller. We want to do this in either * the * read or the write case. The read * case is straightforward. In the write * case, we want to make sure nothing is * in the local cache that could overwrite * the DMAed data. */ for (i = sg_entries_sent, j = 0; i < msg.dt.cur_sg_entries; i++, j++) { if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { /* * XXX KDM use bus_dmamap_sync() */ } if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* * XXX KDM use busdma. */ #if 0 msg.dt.sg_list[j].addr =(void *) vtophys(sgl[i].addr); #endif } else { msg.dt.sg_list[j].addr = sgl[i].addr; } msg.dt.sg_list[j].len = sgl[i].len; } } sg_entries_sent += msg.dt.cur_sg_entries; if (sg_entries_sent >= msg.dt.kern_sg_entries) msg.dt.sg_last = 1; else msg.dt.sg_last = 0; /* * XXX KDM drop and reacquire the lock here? */ if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { /* * XXX do something here. */ } msg.dt.sent_sg_entries = sg_entries_sent; } io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) ctl_failover_io(io, /*have_lock*/ 0); } else { /* * Lookup the fe_datamove() function for this particular * front end. */ fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; fe_datamove(io); } } static void ctl_send_datamove_done(union ctl_io *io, int have_lock) { union ctl_ha_msg msg; int isc_status; memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; msg.hdr.original_sc = io; msg.hdr.serializing_sc = io->io_hdr.serializing_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.scsi_status = io->scsiio.scsi_status; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, sizeof(io->scsiio.sense_data)); msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.sense_residual = io->scsiio.sense_residual; msg.scsi.fetd_status = io->io_hdr.port_status; msg.scsi.residual = io->scsiio.residual; io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ have_lock); return; } isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); if (isc_status > CTL_HA_STATUS_SUCCESS) { /* XXX do something if this fails */ } } /* * The DMA to the remote side is done, now we need to tell the other side * we're done so it can continue with its data movement. */ static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA write failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* * In this case, we had to malloc the memory locally. Free it. */ if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { int i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); } /* * The data is in local and remote memory, so now we need to send * status (good or back) back to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); } /* * We've moved the data from the host/controller into local memory. Now we * need to push it over to the remote controller's memory. */ static int ctl_datamove_remote_dm_write_cb(union ctl_io *io) { int retval; retval = 0; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, ctl_datamove_remote_write_cb); return (retval); } static void ctl_datamove_remote_write(union ctl_io *io) { int retval; void (*fe_datamove)(union ctl_io *io); /* * - Get the data from the host/HBA into local memory. * - DMA memory from the local controller to the remote controller. * - Send status back to the remote controller. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; fe_datamove(io); return; } static int ctl_datamove_remote_dm_read_cb(union ctl_io *io) { #if 0 char str[256]; char path_str[64]; struct sbuf sb; #endif /* * In this case, we had to malloc the memory locally. Free it. */ if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { int i; for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); } #if 0 scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, io->io_hdr.flags, io->io_hdr.status); sbuf_finish(&sb); printk("%s", sbuf_data(&sb)); #endif /* * The read is done, now we need to send status (good or bad) back * to the other side. */ ctl_send_datamove_done(io, /*have_lock*/ 0); return (0); } static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) { union ctl_io *io; void (*fe_datamove)(union ctl_io *io); io = rq->context; if (rq->ret != CTL_HA_STATUS_SUCCESS) { printf("%s: ISC DMA read failed with error %d", __func__, rq->ret); ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ rq->ret); } ctl_dt_req_free(rq); /* Switch the pointer over so the FETD knows what to do */ io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; /* * Use a custom move done callback, since we need to send completion * back to the other controller, not to the backend on this side. */ io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; /* XXX KDM add checks like the ones in ctl_datamove? */ fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; fe_datamove(io); } static int ctl_datamove_remote_sgl_setup(union ctl_io *io) { struct ctl_sg_entry *local_sglist, *remote_sglist; struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; struct ctl_softc *softc; int retval; int i; retval = 0; softc = control_softc; local_sglist = io->io_hdr.local_sglist; local_dma_sglist = io->io_hdr.local_dma_sglist; remote_sglist = io->io_hdr.remote_sglist; remote_dma_sglist = io->io_hdr.remote_dma_sglist; if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { for (i = 0; i < io->scsiio.kern_sg_entries; i++) { local_sglist[i].len = remote_sglist[i].len; /* * XXX Detect the situation where the RS-level I/O * redirector on the other side has already read the * data off of the AOR RS on this side, and * transferred it to remote (mirror) memory on the * other side. Since we already have the data in * memory here, we just need to use it. * * XXX KDM this can probably be removed once we * get the cache device code in and take the * current AOR implementation out. */ #ifdef NEEDTOPORT if ((remote_sglist[i].addr >= (void *)vtophys(softc->mirr->addr)) && (remote_sglist[i].addr < ((void *)vtophys(softc->mirr->addr) + CacheMirrorOffset))) { local_sglist[i].addr = remote_sglist[i].addr - CacheMirrorOffset; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; } else { local_sglist[i].addr = remote_sglist[i].addr + CacheMirrorOffset; } #endif #if 0 printf("%s: local %p, remote %p, len %d\n", __func__, local_sglist[i].addr, remote_sglist[i].addr, local_sglist[i].len); #endif } } else { uint32_t len_to_go; /* * In this case, we don't have automatically allocated * memory for this I/O on this controller. This typically * happens with internal CTL I/O -- e.g. inquiry, mode * sense, etc. Anything coming from RAIDCore will have * a mirror area available. */ len_to_go = io->scsiio.kern_data_len; /* * Clear the no datasync flag, we have to use malloced * buffers. */ io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; /* * The difficult thing here is that the size of the various * S/G segments may be different than the size from the * remote controller. That'll make it harder when DMAing * the data back to the other side. */ for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / sizeof(io->io_hdr.remote_sglist[0])) && (len_to_go > 0); i++) { local_sglist[i].len = MIN(len_to_go, 131072); CTL_SIZE_8B(local_dma_sglist[i].len, local_sglist[i].len); local_sglist[i].addr = malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); local_dma_sglist[i].addr = local_sglist[i].addr; if (local_sglist[i].addr == NULL) { int j; printf("malloc failed for %zd bytes!", local_dma_sglist[i].len); for (j = 0; j < i; j++) { free(local_sglist[j].addr, M_CTL); } ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, /*retry_count*/ 4857); retval = 1; goto bailout_error; } /* XXX KDM do we need a sync here? */ len_to_go -= local_sglist[i].len; } /* * Reset the number of S/G entries accordingly. The * original number of S/G entries is available in * rem_sg_entries. */ io->scsiio.kern_sg_entries = i; #if 0 printf("%s: kern_sg_entries = %d\n", __func__, io->scsiio.kern_sg_entries); for (i = 0; i < io->scsiio.kern_sg_entries; i++) printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, local_sglist[i].addr, local_sglist[i].len, local_dma_sglist[i].len); #endif } return (retval); bailout_error: ctl_send_datamove_done(io, /*have_lock*/ 0); return (retval); } static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, ctl_ha_dt_cb callback) { struct ctl_ha_dt_req *rq; struct ctl_sg_entry *remote_sglist, *local_sglist; struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; uint32_t local_used, remote_used, total_used; int retval; int i, j; retval = 0; rq = ctl_dt_req_alloc(); /* * If we failed to allocate the request, and if the DMA didn't fail * anyway, set busy status. This is just a resource allocation * failure. */ if ((rq == NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) ctl_set_busy(&io->scsiio); if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { if (rq != NULL) ctl_dt_req_free(rq); /* * The data move failed. We need to return status back * to the other controller. No point in trying to DMA * data to the remote controller. */ ctl_send_datamove_done(io, /*have_lock*/ 0); retval = 1; goto bailout; } local_sglist = io->io_hdr.local_sglist; local_dma_sglist = io->io_hdr.local_dma_sglist; remote_sglist = io->io_hdr.remote_sglist; remote_dma_sglist = io->io_hdr.remote_dma_sglist; local_used = 0; remote_used = 0; total_used = 0; if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { rq->ret = CTL_HA_STATUS_SUCCESS; rq->context = io; callback(rq); goto bailout; } /* * Pull/push the data over the wire from/to the other controller. * This takes into account the possibility that the local and * remote sglists may not be identical in terms of the size of * the elements and the number of elements. * * One fundamental assumption here is that the length allocated for * both the local and remote sglists is identical. Otherwise, we've * essentially got a coding error of some sort. */ for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { int isc_ret; uint32_t cur_len, dma_length; uint8_t *tmp_ptr; rq->id = CTL_HA_DATA_CTL; rq->command = command; rq->context = io; /* * Both pointers should be aligned. But it is possible * that the allocation length is not. They should both * also have enough slack left over at the end, though, * to round up to the next 8 byte boundary. */ cur_len = MIN(local_sglist[i].len - local_used, remote_sglist[j].len - remote_used); /* * In this case, we have a size issue and need to decrease * the size, except in the case where we actually have less * than 8 bytes left. In that case, we need to increase * the DMA length to get the last bit. */ if ((cur_len & 0x7) != 0) { if (cur_len > 0x7) { cur_len = cur_len - (cur_len & 0x7); dma_length = cur_len; } else { CTL_SIZE_8B(dma_length, cur_len); } } else dma_length = cur_len; /* * If we had to allocate memory for this I/O, instead of using * the non-cached mirror memory, we'll need to flush the cache * before trying to DMA to the other controller. * * We could end up doing this multiple times for the same * segment if we have a larger local segment than remote * segment. That shouldn't be an issue. */ if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { /* * XXX KDM use bus_dmamap_sync() here. */ } rq->size = dma_length; tmp_ptr = (uint8_t *)local_sglist[i].addr; tmp_ptr += local_used; /* Use physical addresses when talking to ISC hardware */ if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { /* XXX KDM use busdma */ #if 0 rq->local = vtophys(tmp_ptr); #endif } else rq->local = tmp_ptr; tmp_ptr = (uint8_t *)remote_sglist[j].addr; tmp_ptr += remote_used; rq->remote = tmp_ptr; rq->callback = NULL; local_used += cur_len; if (local_used >= local_sglist[i].len) { i++; local_used = 0; } remote_used += cur_len; if (remote_used >= remote_sglist[j].len) { j++; remote_used = 0; } total_used += cur_len; if (total_used >= io->scsiio.kern_data_len) rq->callback = callback; if ((rq->size & 0x7) != 0) { printf("%s: warning: size %d is not on 8b boundary\n", __func__, rq->size); } if (((uintptr_t)rq->local & 0x7) != 0) { printf("%s: warning: local %p not on 8b boundary\n", __func__, rq->local); } if (((uintptr_t)rq->remote & 0x7) != 0) { printf("%s: warning: remote %p not on 8b boundary\n", __func__, rq->local); } #if 0 printf("%s: %s: local %#x remote %#x size %d\n", __func__, (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", rq->local, rq->remote, rq->size); #endif isc_ret = ctl_dt_single(rq); if (isc_ret == CTL_HA_STATUS_WAIT) continue; if (isc_ret == CTL_HA_STATUS_DISCONNECT) { rq->ret = CTL_HA_STATUS_SUCCESS; } else { rq->ret = isc_ret; } callback(rq); goto bailout; } bailout: return (retval); } static void ctl_datamove_remote_read(union ctl_io *io) { int retval; int i; /* * This will send an error to the other controller in the case of a * failure. */ retval = ctl_datamove_remote_sgl_setup(io); if (retval != 0) return; retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, ctl_datamove_remote_read_cb); if ((retval != 0) && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { /* * Make sure we free memory if there was an error.. The * ctl_datamove_remote_xfer() function will send the * datamove done message, or call the callback with an * error if there is a problem. */ for (i = 0; i < io->scsiio.kern_sg_entries; i++) free(io->io_hdr.local_sglist[i].addr, M_CTL); } return; } /* * Process a datamove request from the other controller. This is used for * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory * first. Once that is complete, the data gets DMAed into the remote * controller's memory. For reads, we DMA from the remote controller's * memory into our memory first, and then move it out to the FETD. */ static void ctl_datamove_remote(union ctl_io *io) { struct ctl_softc *softc; softc = control_softc; mtx_assert(&softc->ctl_lock, MA_NOTOWNED); /* * Note that we look for an aborted I/O here, but don't do some of * the other checks that ctl_datamove() normally does. * We don't need to run the datamove delay code, since that should * have been done if need be on the other controller. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) { printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, io->scsiio.tag_num, io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_lun); io->io_hdr.port_status = 31338; ctl_send_datamove_done(io, /*have_lock*/ 0); return; } if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { ctl_datamove_remote_write(io); } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ ctl_datamove_remote_read(io); } else { union ctl_ha_msg msg; struct scsi_sense_data *sense; uint8_t sks[3]; int retry_count; memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_BAD_JUJU; msg.hdr.status = CTL_SCSI_ERROR; msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; retry_count = 4243; sense = &msg.scsi.sense_data; sks[0] = SSD_SCS_VALID; sks[1] = (retry_count >> 8) & 0xff; sks[2] = retry_count & 0xff; /* "Internal target failure" */ scsi_set_sense_data(sense, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_HARDWARE_ERROR, /*asc*/ 0x44, /*ascq*/ 0x00, /*type*/ SSD_ELEM_SKS, /*size*/ sizeof(sks), /*data*/ sks, SSD_ELEM_NONE); io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { ctl_failover_io(io, /*have_lock*/ 1); return; } if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { /* XXX KDM what to do if this fails? */ } return; } } static int ctl_process_done(union ctl_io *io) { struct ctl_lun *lun; struct ctl_softc *softc = control_softc; void (*fe_done)(union ctl_io *io); uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); CTL_DEBUG_PRINT(("ctl_process_done\n")); fe_done = softc->ctl_ports[targ_port]->fe_done; #ifdef CTL_TIME_IO if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { char str[256]; char path_str[64]; struct sbuf sb; ctl_scsi_path_string(io, path_str, sizeof(path_str)); sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); sbuf_cat(&sb, path_str); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: ctl_scsi_command_string(&io->scsiio, NULL, &sb); sbuf_printf(&sb, "\n"); sbuf_cat(&sb, path_str); sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", io->scsiio.tag_num, io->scsiio.tag_type); break; case CTL_IO_TASK: sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " "Tag Type: %d\n", io->taskio.task_action, io->taskio.tag_num, io->taskio.tag_type); break; default: printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); break; } sbuf_cat(&sb, path_str); sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", (intmax_t)time_uptime - io->io_hdr.start_time); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #endif /* CTL_TIME_IO */ switch (io->io_hdr.io_type) { case CTL_IO_SCSI: break; case CTL_IO_TASK: if (bootverbose || (ctl_debug & CTL_DEBUG_INFO)) ctl_io_error_print(io, NULL); if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) ctl_free_io(io); else fe_done(io); return (CTL_RETVAL_COMPLETE); default: panic("ctl_process_done: invalid io type %d\n", io->io_hdr.io_type); break; /* NOTREACHED */ } lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if (lun == NULL) { CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", io->io_hdr.nexus.targ_mapped_lun)); goto bailout; } mtx_lock(&lun->lun_lock); /* * Check to see if we have any errors to inject here. We only * inject errors for commands that don't already have errors set. */ if ((STAILQ_FIRST(&lun->error_list) != NULL) && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) ctl_inject_error(lun, io); /* * XXX KDM how do we treat commands that aren't completed * successfully? * * XXX KDM should we also track I/O latency? */ if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && io->io_hdr.io_type == CTL_IO_SCSI) { #ifdef CTL_TIME_IO struct bintime cur_bt; #endif int type; if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) type = CTL_STATS_READ; else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) type = CTL_STATS_WRITE; else type = CTL_STATS_NO_IO; lun->stats.ports[targ_port].bytes[type] += io->scsiio.kern_total_len; lun->stats.ports[targ_port].operations[type]++; #ifdef CTL_TIME_IO bintime_add(&lun->stats.ports[targ_port].dma_time[type], &io->io_hdr.dma_bt); lun->stats.ports[targ_port].num_dmas[type] += io->io_hdr.num_dmas; getbintime(&cur_bt); bintime_sub(&cur_bt, &io->io_hdr.start_bt); bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); #endif } /* * Remove this from the OOA queue. */ TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); /* * Run through the blocked queue on this LUN and see if anything * has become unblocked, now that this transaction is done. */ ctl_check_blocked(lun); /* * If the LUN has been invalidated, free it if there is nothing * left on its OOA queue. */ if ((lun->flags & CTL_LUN_INVALID) && TAILQ_EMPTY(&lun->ooa_queue)) { mtx_unlock(&lun->lun_lock); mtx_lock(&softc->ctl_lock); ctl_free_lun(lun); mtx_unlock(&softc->ctl_lock); } else mtx_unlock(&lun->lun_lock); bailout: /* * If this command has been aborted, make sure we set the status * properly. The FETD is responsible for freeing the I/O and doing * whatever it needs to do to clean up its state. */ if (io->io_hdr.flags & CTL_FLAG_ABORT) ctl_set_task_aborted(&io->scsiio); /* * If enabled, print command error status. * We don't print UAs unless debugging was enabled explicitly. */ do { if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) break; if (!bootverbose && (ctl_debug & CTL_DEBUG_INFO) == 0) break; if ((ctl_debug & CTL_DEBUG_INFO) == 0 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) && (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(&io->scsiio.sense_data, io->scsiio.sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 0); if (sense_key == SSD_KEY_UNIT_ATTENTION) break; } ctl_io_error_print(io, NULL); } while (0); /* * Tell the FETD or the other shelf controller we're done with this * command. Note that only SCSI commands get to this point. Task * management commands are completed above. * * We only send status to the other controller if we're in XFER * mode. In SER_ONLY mode, the I/O is done on the controller that * received the I/O (from CTL's perspective), and so the status is * generated there. * * XXX KDM if we hold the lock here, we could cause a deadlock * if the frontend comes back in in this context to queue * something. */ if ((softc->ha_mode == CTL_HA_MODE_XFER) && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { union ctl_ha_msg msg; memset(&msg, 0, sizeof(msg)); msg.hdr.msg_type = CTL_MSG_FINISH_IO; msg.hdr.original_sc = io->io_hdr.original_sc; msg.hdr.nexus = io->io_hdr.nexus; msg.hdr.status = io->io_hdr.status; msg.scsi.scsi_status = io->scsiio.scsi_status; msg.scsi.tag_num = io->scsiio.tag_num; msg.scsi.tag_type = io->scsiio.tag_type; msg.scsi.sense_len = io->scsiio.sense_len; msg.scsi.sense_residual = io->scsiio.sense_residual; msg.scsi.residual = io->scsiio.residual; memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, sizeof(io->scsiio.sense_data)); /* * We copy this whether or not this is an I/O-related * command. Otherwise, we'd have to go and check to see * whether it's a read/write command, and it really isn't * worth it. */ memcpy(&msg.scsi.lbalen, &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, sizeof(msg.scsi.lbalen)); if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { /* XXX do something here */ } ctl_free_io(io); } else fe_done(io); return (CTL_RETVAL_COMPLETE); } #ifdef CTL_WITH_CA /* * Front end should call this if it doesn't do autosense. When the request * sense comes back in from the initiator, we'll dequeue this and send it. */ int ctl_queue_sense(union ctl_io *io) { struct ctl_lun *lun; + struct ctl_port *port; struct ctl_softc *softc; uint32_t initidx, targ_lun; softc = control_softc; CTL_DEBUG_PRINT(("ctl_queue_sense\n")); /* * LUN lookup will likely move to the ctl_work_thread() once we * have our new queueing infrastructure (that doesn't put things on * a per-LUN queue initially). That is so that we can handle * things like an INQUIRY to a LUN that we don't have enabled. We * can't deal with that right now. */ mtx_lock(&softc->ctl_lock); /* * If we don't have a LUN for this, just toss the sense * information. */ - targ_lun = io->io_hdr.nexus.targ_lun; - targ_lun = ctl_map_lun(softc, io->io_hdr.nexus.targ_port, targ_lun); + port = ctl_io_port(&ctsio->io_hdr); + targ_lun = ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) lun = softc->ctl_luns[targ_lun]; else goto bailout; initidx = ctl_get_initindex(&io->io_hdr.nexus); mtx_lock(&lun->lun_lock); /* * Already have CA set for this LUN...toss the sense information. */ if (ctl_is_set(lun->have_ca, initidx)) { mtx_unlock(&lun->lun_lock); goto bailout; } memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, MIN(sizeof(lun->pending_sense[initidx]), sizeof(io->scsiio.sense_data))); ctl_set_mask(lun->have_ca, initidx); mtx_unlock(&lun->lun_lock); bailout: mtx_unlock(&softc->ctl_lock); ctl_free_io(io); return (CTL_RETVAL_COMPLETE); } #endif /* * Primary command inlet from frontend ports. All SCSI and task I/O * requests must go through this function. */ int ctl_queue(union ctl_io *io) { + struct ctl_port *port; CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); #ifdef CTL_TIME_IO io->io_hdr.start_time = time_uptime; getbintime(&io->io_hdr.start_bt); #endif /* CTL_TIME_IO */ /* Map FE-specific LUN ID into global one. */ + port = ctl_io_port(&io->io_hdr); io->io_hdr.nexus.targ_mapped_lun = - ctl_map_lun(control_softc, io->io_hdr.nexus.targ_port, - io->io_hdr.nexus.targ_lun); + ctl_lun_map_from_port(port, io->io_hdr.nexus.targ_lun); switch (io->io_hdr.io_type) { case CTL_IO_SCSI: case CTL_IO_TASK: if (ctl_debug & CTL_DEBUG_CDB) ctl_io_print(io); ctl_enqueue_incoming(io); break; default: printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); return (EINVAL); } return (CTL_RETVAL_COMPLETE); } #ifdef CTL_IO_DELAY static void ctl_done_timer_wakeup(void *arg) { union ctl_io *io; io = (union ctl_io *)arg; ctl_done(io); } #endif /* CTL_IO_DELAY */ void ctl_done(union ctl_io *io) { /* * Enable this to catch duplicate completion issues. */ #if 0 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { printf("%s: type %d msg %d cdb %x iptl: " "%d:%d:%d:%d tag 0x%04x " "flag %#x status %x\n", __func__, io->io_hdr.io_type, io->io_hdr.msg_type, io->scsiio.cdb[0], io->io_hdr.nexus.initid.id, io->io_hdr.nexus.targ_port, io->io_hdr.nexus.targ_target.id, io->io_hdr.nexus.targ_lun, (io->io_hdr.io_type == CTL_IO_TASK) ? io->taskio.tag_num : io->scsiio.tag_num, io->io_hdr.flags, io->io_hdr.status); } else io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; #endif /* * This is an internal copy of an I/O, and should not go through * the normal done processing logic. */ if (io->io_hdr.flags & CTL_FLAG_INT_COPY) return; /* * We need to send a msg to the serializing shelf to finish the IO * as well. We don't send a finish message to the other shelf if * this is a task management command. Task management commands * aren't serialized in the OOA queue, but rather just executed on * both shelf controllers for commands that originated on that * controller. */ if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) && (io->io_hdr.io_type != CTL_IO_TASK)) { union ctl_ha_msg msg_io; msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { } /* continue on to finish IO */ } #ifdef CTL_IO_DELAY if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; } else { struct ctl_lun *lun; lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; if ((lun != NULL) && (lun->delay_info.done_delay > 0)) { struct callout *callout; callout = (struct callout *)&io->io_hdr.timer_bytes; callout_init(callout, /*mpsafe*/ 1); io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; callout_reset(callout, lun->delay_info.done_delay * hz, ctl_done_timer_wakeup, io); if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) lun->delay_info.done_delay = 0; return; } } #endif /* CTL_IO_DELAY */ ctl_enqueue_done(io); } int ctl_isc(struct ctl_scsiio *ctsio) { struct ctl_lun *lun; int retval; lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); retval = lun->backend->data_submit((union ctl_io *)ctsio); return (retval); } static void ctl_work_thread(void *arg) { struct ctl_thread *thr = (struct ctl_thread *)arg; struct ctl_softc *softc = thr->ctl_softc; union ctl_io *io; int retval; CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); for (;;) { retval = 0; /* * We handle the queues in this order: * - ISC * - done queue (to free up resources, unblock other commands) * - RtR queue * - incoming queue * * If those queues are empty, we break out of the loop and * go to sleep. */ mtx_lock(&thr->queue_lock); io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->isc_queue, links); mtx_unlock(&thr->queue_lock); ctl_handle_isc(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->done_queue, links); /* clear any blocked commands, call fe_done */ mtx_unlock(&thr->queue_lock); retval = ctl_process_done(io); continue; } io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); mtx_unlock(&thr->queue_lock); if (io->io_hdr.io_type == CTL_IO_TASK) ctl_run_task(io); else ctl_scsiio_precheck(softc, &io->scsiio); continue; } if (!ctl_pause_rtr) { io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); if (io != NULL) { STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); mtx_unlock(&thr->queue_lock); retval = ctl_scsiio(&io->scsiio); if (retval != CTL_RETVAL_COMPLETE) CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); continue; } } /* Sleep until we have something to do. */ mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); } } static void ctl_lun_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_be_lun *be_lun; int retval; CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); for (;;) { retval = 0; mtx_lock(&softc->ctl_lock); be_lun = STAILQ_FIRST(&softc->pending_lun_queue); if (be_lun != NULL) { STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); mtx_unlock(&softc->ctl_lock); ctl_create_lun(be_lun); continue; } /* Sleep until we have something to do. */ mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, PDROP | PRIBIO, "-", 0); } } static void ctl_thresh_thread(void *arg) { struct ctl_softc *softc = (struct ctl_softc *)arg; struct ctl_lun *lun; struct ctl_be_lun *be_lun; struct scsi_da_rw_recovery_page *rwpage; struct ctl_logical_block_provisioning_page *page; const char *attr; uint64_t thres, val; int i, e; CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); for (;;) { mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(lun, &softc->lun_list, links) { be_lun = lun->be_lun; if ((lun->flags & CTL_LUN_DISABLED) || (lun->flags & CTL_LUN_OFFLINE) || lun->backend->lun_attr == NULL) continue; rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) continue; e = 0; page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) continue; thres = scsi_4btoul(page->descr[i].count); thres <<= CTL_LBP_EXPONENT; switch (page->descr[i].resource) { case 0x01: attr = "blocksavail"; break; case 0x02: attr = "blocksused"; break; case 0xf1: attr = "poolblocksavail"; break; case 0xf2: attr = "poolblocksused"; break; default: continue; } mtx_unlock(&softc->ctl_lock); // XXX val = lun->backend->lun_attr( lun->be_lun->be_lun, attr); mtx_lock(&softc->ctl_lock); if (val == UINT64_MAX) continue; if ((page->descr[i].flags & SLBPPD_ARMING_MASK) == SLBPPD_ARMING_INC) e |= (val >= thres); else e |= (val <= thres); } mtx_lock(&lun->lun_lock); if (e) { if (lun->lasttpt == 0 || time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { lun->lasttpt = time_uptime; ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); } } else { lun->lasttpt = 0; ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); } mtx_unlock(&lun->lun_lock); } mtx_unlock(&softc->ctl_lock); pause("-", CTL_LBP_PERIOD * hz); } } static void ctl_enqueue_incoming(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; u_int idx; idx = (io->io_hdr.nexus.targ_port * 127 + io->io_hdr.nexus.initid.id) % worker_threads; thr = &softc->threads[idx]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_rtr(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_done(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } static void ctl_enqueue_isc(union ctl_io *io) { struct ctl_softc *softc = control_softc; struct ctl_thread *thr; thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; mtx_lock(&thr->queue_lock); STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); mtx_unlock(&thr->queue_lock); wakeup(thr); } /* Initialization and failover */ void ctl_init_isc_msg(void) { printf("CTL: Still calling this thing\n"); } /* * Init component * Initializes component into configuration defined by bootMode * (see hasc-sv.c) * returns hasc_Status: * OK * ERROR - fatal error */ static ctl_ha_comp_status ctl_isc_init(struct ctl_ha_component *c) { ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; c->status = ret; return ret; } /* Start component * Starts component in state requested. If component starts successfully, * it must set its own state to the requestrd state * When requested state is HASC_STATE_HA, the component may refine it * by adding _SLAVE or _MASTER flags. * Currently allowed state transitions are: * UNKNOWN->HA - initial startup * UNKNOWN->SINGLE - initial startup when no parter detected * HA->SINGLE - failover * returns ctl_ha_comp_status: * OK - component successfully started in requested state * FAILED - could not start the requested state, failover may * be possible * ERROR - fatal error detected, no future startup possible */ static ctl_ha_comp_status ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) { ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; printf("%s: go\n", __func__); // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) if (c->state == CTL_HA_STATE_UNKNOWN ) { control_softc->is_single = 0; if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) != CTL_HA_STATUS_SUCCESS) { printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); ret = CTL_HA_COMP_STATUS_ERROR; } } else if (CTL_HA_STATE_IS_HA(c->state) && CTL_HA_STATE_IS_SINGLE(state)){ // HA->SINGLE transition ctl_failover(); control_softc->is_single = 1; } else { printf("ctl_isc_start:Invalid state transition %X->%X\n", c->state, state); ret = CTL_HA_COMP_STATUS_ERROR; } if (CTL_HA_STATE_IS_SINGLE(state)) control_softc->is_single = 1; c->state = state; c->status = ret; return ret; } /* * Quiesce component * The component must clear any error conditions (set status to OK) and * prepare itself to another Start call * returns ctl_ha_comp_status: * OK * ERROR */ static ctl_ha_comp_status ctl_isc_quiesce(struct ctl_ha_component *c) { int ret = CTL_HA_COMP_STATUS_OK; ctl_pause_rtr = 1; c->status = ret; return ret; } struct ctl_ha_component ctl_ha_component_ctlisc = { .name = "CTL ISC", .state = CTL_HA_STATE_UNKNOWN, .init = ctl_isc_init, .start = ctl_isc_start, .quiesce = ctl_isc_quiesce }; /* * vim: ts=8 */ Index: stable/10/sys/cam/ctl/ctl_frontend.c =================================================================== --- stable/10/sys/cam/ctl/ctl_frontend.c (revision 279001) +++ stable/10/sys/cam/ctl/ctl_frontend.c (revision 279002) @@ -1,318 +1,319 @@ /*- * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.c#4 $ */ /* * CAM Target Layer front end interface code * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX KDM move defines from ctl_ioctl.h to somewhere else */ #include #include #include #include extern struct ctl_softc *control_softc; int ctl_frontend_register(struct ctl_frontend *fe) { struct ctl_softc *softc = control_softc; struct ctl_frontend *fe_tmp; KASSERT(softc != NULL, ("CTL is not initialized")); /* * Sanity check, make sure this isn't a duplicate registration. */ mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(fe_tmp, &softc->fe_list, links) { if (strcmp(fe_tmp->name, fe->name) == 0) { mtx_unlock(&softc->ctl_lock); return (-1); } } mtx_unlock(&softc->ctl_lock); STAILQ_INIT(&fe->port_list); /* * Call the frontend's initialization routine. */ if (fe->init != NULL) fe->init(); mtx_lock(&softc->ctl_lock); softc->num_frontends++; STAILQ_INSERT_TAIL(&softc->fe_list, fe, links); mtx_unlock(&softc->ctl_lock); return (0); } int ctl_frontend_deregister(struct ctl_frontend *fe) { struct ctl_softc *softc = control_softc; if (!STAILQ_EMPTY(&fe->port_list)) return (-1); mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->fe_list, fe, ctl_frontend, links); softc->num_frontends--; mtx_unlock(&softc->ctl_lock); /* * Call the frontend's shutdown routine. */ if (fe->shutdown != NULL) fe->shutdown(); return (0); } struct ctl_frontend * ctl_frontend_find(char *frontend_name) { struct ctl_softc *softc = control_softc; struct ctl_frontend *fe; mtx_lock(&softc->ctl_lock); STAILQ_FOREACH(fe, &softc->fe_list, links) { if (strcmp(fe->name, frontend_name) == 0) { mtx_unlock(&softc->ctl_lock); return (fe); } } mtx_unlock(&softc->ctl_lock); return (NULL); } int ctl_port_register(struct ctl_port *port) { struct ctl_softc *softc = control_softc; void *pool; int port_num; int retval; retval = 0; KASSERT(softc != NULL, ("CTL is not initialized")); mtx_lock(&softc->ctl_lock); port_num = ctl_ffz(softc->ctl_port_mask, CTL_MAX_PORTS); if ((port_num == -1) || (ctl_set_mask(softc->ctl_port_mask, port_num) == -1)) { port->targ_port = -1; mtx_unlock(&softc->ctl_lock); return (1); } softc->num_ports++; mtx_unlock(&softc->ctl_lock); /* * Initialize the initiator and portname mappings */ port->max_initiators = CTL_MAX_INIT_PER_PORT; port->wwpn_iid = malloc(sizeof(*port->wwpn_iid) * port->max_initiators, M_CTL, M_NOWAIT | M_ZERO); if (port->wwpn_iid == NULL) { retval = ENOMEM; goto error; } /* * We add 20 to whatever the caller requests, so he doesn't get * burned by queueing things back to the pending sense queue. In * theory, there should probably only be one outstanding item, at * most, on the pending sense queue for a LUN. We'll clear the * pending sense queue on the next command, whether or not it is * a REQUEST SENSE. */ retval = ctl_pool_create(softc, port->port_name, port->num_requested_ctl_io + 20, &pool); if (retval != 0) { free(port->wwpn_iid, M_CTL); error: port->targ_port = -1; mtx_lock(&softc->ctl_lock); ctl_clear_mask(softc->ctl_port_mask, port_num); mtx_unlock(&softc->ctl_lock); return (retval); } port->ctl_pool_ref = pool; if (port->options.stqh_first == NULL) STAILQ_INIT(&port->options); mtx_lock(&softc->ctl_lock); port->targ_port = port_num + softc->port_offset; STAILQ_INSERT_TAIL(&port->frontend->port_list, port, fe_links); STAILQ_INSERT_TAIL(&softc->port_list, port, links); softc->ctl_ports[port_num] = port; mtx_unlock(&softc->ctl_lock); return (retval); } int ctl_port_deregister(struct ctl_port *port) { struct ctl_softc *softc = control_softc; struct ctl_io_pool *pool; int port_num, retval, i; retval = 0; pool = (struct ctl_io_pool *)port->ctl_pool_ref; if (port->targ_port == -1) { retval = 1; goto bailout; } mtx_lock(&softc->ctl_lock); STAILQ_REMOVE(&softc->port_list, port, ctl_port, links); STAILQ_REMOVE(&port->frontend->port_list, port, ctl_port, fe_links); softc->num_ports--; port_num = (port->targ_port < CTL_MAX_PORTS) ? port->targ_port : port->targ_port - CTL_MAX_PORTS; ctl_clear_mask(softc->ctl_port_mask, port_num); softc->ctl_ports[port_num] = NULL; mtx_unlock(&softc->ctl_lock); ctl_pool_free(pool); ctl_free_opts(&port->options); + ctl_lun_map_deinit(port); free(port->port_devid, M_CTL); port->port_devid = NULL; free(port->target_devid, M_CTL); port->target_devid = NULL; free(port->init_devid, M_CTL); port->init_devid = NULL; for (i = 0; i < port->max_initiators; i++) free(port->wwpn_iid[i].name, M_CTL); free(port->wwpn_iid, M_CTL); bailout: return (retval); } void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn, int wwpn_valid, uint64_t wwpn) { struct scsi_vpd_id_descriptor *desc; int len, proto; if (port->port_type == CTL_PORT_FC) proto = SCSI_PROTO_FC << 4; else if (port->port_type == CTL_PORT_ISCSI) proto = SCSI_PROTO_ISCSI << 4; else proto = SCSI_PROTO_SPI << 4; if (wwnn_valid) { port->wwnn = wwnn; free(port->target_devid, M_CTL); len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_NAA; desc->length = CTL_WWPN_LEN; scsi_u64to8b(port->wwnn, desc->identifier); } if (wwpn_valid) { port->wwpn = wwpn; free(port->port_devid, M_CTL); len = sizeof(struct scsi_vpd_device_id) + CTL_WWPN_LEN; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_NAA; desc->length = CTL_WWPN_LEN; scsi_u64to8b(port->wwpn, desc->identifier); } } void ctl_port_online(struct ctl_port *port) { port->port_online(port->onoff_arg); /* XXX KDM need a lock here? */ port->status |= CTL_PORT_STATUS_ONLINE; } void ctl_port_offline(struct ctl_port *port) { port->port_offline(port->onoff_arg); /* XXX KDM need a lock here? */ port->status &= ~CTL_PORT_STATUS_ONLINE; } /* * vim: ts=8 */ Index: stable/10/sys/cam/ctl/ctl_frontend.h =================================================================== --- stable/10/sys/cam/ctl/ctl_frontend.h (revision 279001) +++ stable/10/sys/cam/ctl/ctl_frontend.h (revision 279002) @@ -1,338 +1,337 @@ /*- * Copyright (c) 2003 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_frontend.h#2 $ * $FreeBSD$ */ /* * CAM Target Layer front end registration hooks * * Author: Ken Merry */ #ifndef _CTL_FRONTEND_H_ #define _CTL_FRONTEND_H_ typedef enum { CTL_PORT_STATUS_NONE = 0x00, CTL_PORT_STATUS_ONLINE = 0x01, CTL_PORT_STATUS_TARG_ONLINE = 0x02, CTL_PORT_STATUS_LUN_ONLINE = 0x04 } ctl_port_status; typedef int (*fe_init_t)(void); typedef void (*fe_shutdown_t)(void); typedef void (*port_func_t)(void *onoff_arg); typedef int (*port_info_func_t)(void *onoff_arg, struct sbuf *sb); typedef int (*lun_func_t)(void *arg, struct ctl_id targ_id, int lun_id); -typedef uint32_t (*lun_map_func_t)(void *arg, uint32_t lun_id); typedef int (*fe_ioctl_t)(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); #define CTL_FRONTEND_DECLARE(name, driver) \ static int name ## _modevent(module_t mod, int type, void *data) \ { \ switch (type) { \ case MOD_LOAD: \ ctl_frontend_register( \ (struct ctl_frontend *)data); \ break; \ case MOD_UNLOAD: \ printf(#name " module unload - not possible for this module type\n"); \ return EINVAL; \ default: \ return EOPNOTSUPP; \ } \ return 0; \ } \ static moduledata_t name ## _mod = { \ #name, \ name ## _modevent, \ (void *)&driver \ }; \ DECLARE_MODULE(name, name ## _mod, SI_SUB_CONFIGURE, SI_ORDER_FOURTH); \ MODULE_DEPEND(name, ctl, 1, 1, 1); \ MODULE_DEPEND(name, cam, 1, 1, 1) struct ctl_wwpn_iid { int in_use; time_t last_use; uint64_t wwpn; char *name; }; /* * The ctl_frontend structure is the registration mechanism between a FETD * (Front End Target Driver) and the CTL layer. Here is a description of * the fields: * * port_type: This field tells CTL what kind of front end it is * dealing with. This field serves two purposes. * The first is to let CTL know whether the frontend * in question is inside the main CTL module (i.e. * the ioctl front end), and therefore its module * reference count shouldn't be incremented. The * CTL ioctl front end should continue to use the * CTL_PORT_IOCTL argument as long as it is part of * the main CTL module. The second is to let CTL * know what kind of front end it is dealing with, so * it can return the proper inquiry data for that * particular port. * * num_requested_ctl_io: This is the number of ctl_io structures that the * front end needs for its pool. This should * generally be the maximum number of outstanding * transactions that the FETD can handle. The CTL * layer will add a few to this to account for * ctl_io buffers queued for pending sense data. * (Pending sense only gets queued if the FETD * doesn't support autosense. e.g. non-packetized * parallel SCSI doesn't support autosense.) * * port_name: A string describing the FETD. e.g. "LSI 1030T U320" * or whatever you want to use to describe the driver. * * * physical_port: This is the physical port number of this * particular port within the driver/hardware. This * number is hardware/driver specific. * virtual_port: This is the virtual port number of this * particular port. This is for things like NP-IV. * * port_online(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to start responding to selections on the specified * target ID. (targ_target) * * port_offline(): This function is called, with onoff_arg as its * argument, by the CTL layer when it wants the FETD * to stop responding to selection on the specified * target ID. (targ_target) * * onoff_arg: This is supplied as an argument to port_online() * and port_offline(). This is specified by the * FETD. * * lun_enable(): This function is called, with targ_lun_arg, a target * ID and a LUN ID as its arguments, by CTL when it * wants the FETD to enable a particular LUN. If the * FETD doesn't really know about LUNs, it should * just ignore this call and return 0. If the FETD * cannot enable the requested LUN for some reason, the * FETD should return non-zero status. * * lun_disable(): This function is called, with targ_lun_arg, a target * ID and LUN ID as its arguments, by CTL when it * wants the FETD to disable a particular LUN. If the * FETD doesn't really know about LUNs, it should just * ignore this call and return 0. If the FETD cannot * disable the requested LUN for some reason, the * FETD should return non-zero status. * * targ_lun_arg: This is supplied as an argument to the targ/lun * enable/disable() functions. This is specified by * the FETD. * * fe_datamove(): This function is called one or more times per I/O * by the CTL layer to tell the FETD to initiate a * DMA to or from the data buffer(s) specified by * the passed-in ctl_io structure. * * fe_done(): This function is called by the CTL layer when a * particular SCSI I/O or task management command has * completed. For SCSI I/O requests (CTL_IO_SCSI), * sense data is always supplied if the status is * CTL_SCSI_ERROR and the SCSI status byte is * SCSI_STATUS_CHECK_COND. If the FETD doesn't * support autosense, the sense should be queued * back to the CTL layer via ctl_queue_sense(). * * fe_dump(): This function, if it exists, is called by CTL * to request a dump of any debugging information or * state to the console. * * max_targets: The maximum number of targets that we can create * per-port. * * max_target_id: The highest target ID that we can use. * * targ_port: The CTL layer assigns a "port number" to every * FETD. This port number should be passed back in * in the header of every ctl_io that is queued to * the CTL layer. This enables us to determine * which bus the command came in on. * * ctl_pool_ref: Memory pool reference used by the FETD in calls to * ctl_alloc_io(). * * max_initiators: Maximum number of initiators that the FETD is * allowed to have. Initiators should be numbered * from 0 to max_initiators - 1. This value will * typically be 16, and thus not a problem for * parallel SCSI. This may present issues for Fibre * Channel. * * wwnn World Wide Node Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * wwpn World Wide Port Name to be used by the FETD. * Note that this is set *after* registration. It * will be set prior to the online function getting * called. * * status: Used by CTL to keep track of per-FETD state. * * links: Linked list pointers, used by CTL. The FETD * shouldn't touch this field. */ struct ctl_port { struct ctl_frontend *frontend; ctl_port_type port_type; /* passed to CTL */ int num_requested_ctl_io; /* passed to CTL */ char *port_name; /* passed to CTL */ int physical_port; /* passed to CTL */ int virtual_port; /* passed to CTL */ port_func_t port_online; /* passed to CTL */ port_func_t port_offline; /* passed to CTL */ port_info_func_t port_info; /* passed to CTL */ void *onoff_arg; /* passed to CTL */ lun_func_t lun_enable; /* passed to CTL */ lun_func_t lun_disable; /* passed to CTL */ - lun_map_func_t lun_map; /* passed to CTL */ + uint32_t *lun_map; /* passed to CTL */ void *targ_lun_arg; /* passed to CTL */ void (*fe_datamove)(union ctl_io *io); /* passed to CTL */ void (*fe_done)(union ctl_io *io); /* passed to CTL */ int max_targets; /* passed to CTL */ int max_target_id; /* passed to CTL */ int32_t targ_port; /* passed back to FETD */ void *ctl_pool_ref; /* passed back to FETD */ uint32_t max_initiators; /* passed back to FETD */ struct ctl_wwpn_iid *wwpn_iid; /* used by CTL */ uint64_t wwnn; /* set by CTL before online */ uint64_t wwpn; /* set by CTL before online */ ctl_port_status status; /* used by CTL */ ctl_options_t options; /* passed to CTL */ struct ctl_devid *port_devid; /* passed to CTL */ struct ctl_devid *target_devid; /* passed to CTL */ struct ctl_devid *init_devid; /* passed to CTL */ STAILQ_ENTRY(ctl_port) fe_links; /* used by CTL */ STAILQ_ENTRY(ctl_port) links; /* used by CTL */ }; struct ctl_frontend { char name[CTL_DRIVER_NAME_LEN]; /* passed to CTL */ fe_init_t init; /* passed to CTL */ fe_ioctl_t ioctl; /* passed to CTL */ void (*fe_dump)(void); /* passed to CTL */ fe_shutdown_t shutdown; /* passed to CTL */ STAILQ_HEAD(, ctl_port) port_list; /* used by CTL */ STAILQ_ENTRY(ctl_frontend) links; /* used by CTL */ }; /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_frontend_register(struct ctl_frontend *fe); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_frontend_deregister(struct ctl_frontend *fe); /* * Find the frontend by its name. Returns NULL if not found. */ struct ctl_frontend * ctl_frontend_find(char *frontend_name); /* * This may block until resources are allocated. Called at FETD module load * time. Returns 0 for success, non-zero for failure. */ int ctl_port_register(struct ctl_port *port); /* * Called at FETD module unload time. * Returns 0 for success, non-zero for failure. */ int ctl_port_deregister(struct ctl_port *port); /* * Called to set the WWNN and WWPN for a particular frontend. */ void ctl_port_set_wwns(struct ctl_port *port, int wwnn_valid, uint64_t wwnn, int wwpn_valid, uint64_t wwpn); /* * Called to bring a particular frontend online. */ void ctl_port_online(struct ctl_port *fe); /* * Called to take a particular frontend offline. */ void ctl_port_offline(struct ctl_port *fe); /* * This routine queues I/O and task management requests from the FETD to the * CTL layer. Returns immediately. Returns 0 for success, non-zero for * failure. */ int ctl_queue(union ctl_io *io); /* * This routine is used if the front end interface doesn't support * autosense (e.g. non-packetized parallel SCSI). This will queue the * scsiio structure back to a per-lun pending sense queue. This MUST be * called BEFORE any request sense can get queued to the CTL layer -- I * need it in the queue in order to service the request. The scsiio * structure passed in here will be freed by the CTL layer when sense is * retrieved by the initiator. Returns 0 for success, non-zero for failure. */ int ctl_queue_sense(union ctl_io *io); /* * This routine adds an initiator to CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. * The WWPN should be the FC WWPN, if available. */ int ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name); /* * This routine will remove an initiator from CTL's port database. * The iid field should be the same as the iid passed in the nexus of each * ctl_io from this initiator. */ int ctl_remove_initiator(struct ctl_port *port, int iid); #endif /* _CTL_FRONTEND_H_ */ Index: stable/10/sys/cam/ctl/ctl_frontend_iscsi.c =================================================================== --- stable/10/sys/cam/ctl/ctl_frontend_iscsi.c (revision 279001) +++ stable/10/sys/cam/ctl/ctl_frontend_iscsi.c (revision 279002) @@ -1,2983 +1,2886 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * CTL frontend for the iSCSI protocol. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ICL_KERNEL_PROXY #include #endif #ifdef ICL_KERNEL_PROXY FEATURE(cfiscsi_kernel_proxy, "iSCSI target built with ICL_KERNEL_PROXY"); #endif static MALLOC_DEFINE(M_CFISCSI, "cfiscsi", "Memory used for CTL iSCSI frontend"); static uma_zone_t cfiscsi_data_wait_zone; SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, iscsi, CTLFLAG_RD, 0, "CAM Target Layer iSCSI Frontend"); static int debug = 1; TUNABLE_INT("kern.cam.ctl.iscsi.debug", &debug); SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, debug, CTLFLAG_RWTUN, &debug, 1, "Enable debug messages"); static int ping_timeout = 5; TUNABLE_INT("kern.cam.ctl.iscsi.ping_timeout", &ping_timeout); SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, ping_timeout, CTLFLAG_RWTUN, &ping_timeout, 5, "Interval between ping (NOP-Out) requests, in seconds"); static int login_timeout = 60; TUNABLE_INT("kern.cam.ctl.iscsi.login_timeout", &login_timeout); SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, login_timeout, CTLFLAG_RWTUN, &login_timeout, 60, "Time to wait for ctld(8) to finish Login Phase, in seconds"); static int maxcmdsn_delta = 256; TUNABLE_INT("kern.cam.ctl.iscsi.maxcmdsn_delta", &maxcmdsn_delta); SYSCTL_INT(_kern_cam_ctl_iscsi, OID_AUTO, maxcmdsn_delta, CTLFLAG_RWTUN, &maxcmdsn_delta, 256, "Number of commands the initiator can send " "without confirmation"); #define CFISCSI_DEBUG(X, ...) \ do { \ if (debug > 1) { \ printf("%s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_WARN(X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s: " X "\n", \ __func__, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_DEBUG(S, X, ...) \ do { \ if (debug > 1) { \ printf("%s: %s (%s): " X "\n", \ __func__, S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_WARN(S, X, ...) \ do { \ if (debug > 0) { \ printf("WARNING: %s (%s): " X "\n", \ S->cs_initiator_addr, \ S->cs_initiator_name, ## __VA_ARGS__); \ } \ } while (0) #define CFISCSI_SESSION_LOCK(X) mtx_lock(&X->cs_lock) #define CFISCSI_SESSION_UNLOCK(X) mtx_unlock(&X->cs_lock) #define CFISCSI_SESSION_LOCK_ASSERT(X) mtx_assert(&X->cs_lock, MA_OWNED) #define CONN_SESSION(X) ((struct cfiscsi_session *)(X)->ic_prv0) #define PDU_SESSION(X) CONN_SESSION((X)->ip_conn) #define PDU_EXPDATASN(X) (X)->ip_prv0 #define PDU_TOTAL_TRANSFER_LEN(X) (X)->ip_prv1 #define PDU_R2TSN(X) (X)->ip_prv2 int cfiscsi_init(void); static void cfiscsi_online(void *arg); static void cfiscsi_offline(void *arg); static int cfiscsi_info(void *arg, struct sbuf *sb); static int cfiscsi_lun_enable(void *arg, struct ctl_id target_id, int lun_id); static int cfiscsi_lun_disable(void *arg, struct ctl_id target_id, int lun_id); -static uint32_t cfiscsi_lun_map(void *arg, uint32_t lun); static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td); static void cfiscsi_datamove(union ctl_io *io); static void cfiscsi_datamove_in(union ctl_io *io); static void cfiscsi_datamove_out(union ctl_io *io); static void cfiscsi_done(union ctl_io *io); static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request); static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request); static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request); static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request); static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request); static void cfiscsi_session_terminate(struct cfiscsi_session *cs); static struct cfiscsi_target *cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name); static struct cfiscsi_target *cfiscsi_target_find_or_create( struct cfiscsi_softc *softc, const char *name, const char *alias); static void cfiscsi_target_release(struct cfiscsi_target *ct); static void cfiscsi_session_delete(struct cfiscsi_session *cs); static struct cfiscsi_softc cfiscsi_softc; extern struct ctl_softc *control_softc; static struct ctl_frontend cfiscsi_frontend = { .name = "iscsi", .init = cfiscsi_init, .ioctl = cfiscsi_ioctl, }; CTL_FRONTEND_DECLARE(ctlcfiscsi, cfiscsi_frontend); MODULE_DEPEND(ctlcfiscsi, icl, 1, 1, 1); static struct icl_pdu * cfiscsi_pdu_new_response(struct icl_pdu *request, int flags) { return (icl_pdu_new(request->ip_conn, flags)); } static bool cfiscsi_pdu_update_cmdsn(const struct icl_pdu *request) { const struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; uint32_t cmdsn, expstatsn; cs = PDU_SESSION(request); /* * Every incoming PDU - not just NOP-Out - resets the ping timer. * The purpose of the timeout is to reset the connection when it stalls; * we don't want this to happen when NOP-In or NOP-Out ends up delayed * in some queue. * * XXX: Locking? */ cs->cs_timeout = 0; /* * Data-Out PDUs don't contain CmdSN. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) return (false); /* * We're only using fields common for all the request * (initiator -> target) PDUs. */ bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; cmdsn = ntohl(bhssc->bhssc_cmdsn); expstatsn = ntohl(bhssc->bhssc_expstatsn); CFISCSI_SESSION_LOCK(cs); #if 0 if (expstatsn != cs->cs_statsn) { CFISCSI_SESSION_DEBUG(cs, "received PDU with ExpStatSN %d, " "while current StatSN is %d", expstatsn, cs->cs_statsn); } #endif if ((request->ip_bhs->bhs_opcode & ISCSI_BHS_OPCODE_IMMEDIATE) == 0) { /* * The target MUST silently ignore any non-immediate command * outside of this range. */ if (ISCSI_SNLT(cmdsn, cs->cs_cmdsn) || ISCSI_SNGT(cmdsn, cs->cs_cmdsn + maxcmdsn_delta)) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u", cmdsn, cs->cs_cmdsn); return (true); } /* * We don't support multiple connections now, so any * discontinuity in CmdSN means lost PDUs. Since we don't * support PDU retransmission -- terminate the connection. */ if (cmdsn != cs->cs_cmdsn) { CFISCSI_SESSION_UNLOCK(cs); CFISCSI_SESSION_WARN(cs, "received PDU with CmdSN %u, " "while expected %u; dropping connection", cmdsn, cs->cs_cmdsn); cfiscsi_session_terminate(cs); return (true); } cs->cs_cmdsn++; } CFISCSI_SESSION_UNLOCK(cs); return (false); } static void cfiscsi_pdu_handle(struct icl_pdu *request) { struct cfiscsi_session *cs; bool ignore; cs = PDU_SESSION(request); ignore = cfiscsi_pdu_update_cmdsn(request); if (ignore) { icl_pdu_free(request); return; } /* * Handle the PDU; this includes e.g. receiving the remaining * part of PDU and submitting the SCSI command to CTL * or queueing a reply. The handling routine is responsible * for freeing the PDU when it's no longer needed. */ switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_NOP_OUT: cfiscsi_pdu_handle_nop_out(request); break; case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_pdu_handle_scsi_command(request); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_pdu_handle_task_request(request); break; case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: cfiscsi_pdu_handle_data_out(request); break; case ISCSI_BHS_OPCODE_LOGOUT_REQUEST: cfiscsi_pdu_handle_logout_request(request); break; default: CFISCSI_SESSION_WARN(cs, "received PDU with unsupported " "opcode 0x%x; dropping connection", request->ip_bhs->bhs_opcode); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_receive_callback(struct icl_pdu *request) { struct cfiscsi_session *cs; cs = PDU_SESSION(request); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (cs->cs_login_pdu == NULL) cs->cs_login_pdu = request; else icl_pdu_free(request); cv_signal(&cs->cs_login_cv); return; } #endif cfiscsi_pdu_handle(request); } static void cfiscsi_error_callback(struct icl_conn *ic) { struct cfiscsi_session *cs; cs = CONN_SESSION(ic); CFISCSI_SESSION_WARN(cs, "connection error; dropping connection"); cfiscsi_session_terminate(cs); } static int cfiscsi_pdu_prepare(struct icl_pdu *response) { struct cfiscsi_session *cs; struct iscsi_bhs_scsi_response *bhssr; bool advance_statsn = true; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK_ASSERT(cs); /* * We're only using fields common for all the response * (target -> initiator) PDUs. */ bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; /* * 10.8.3: "The StatSN for this connection is not advanced * after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_R2T) advance_statsn = false; /* * 10.19.2: "However, when the Initiator Task Tag is set to 0xffffffff, * StatSN for the connection is not advanced after this PDU is sent." */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_NOP_IN && bhssr->bhssr_initiator_task_tag == 0xffffffff) advance_statsn = false; /* * See the comment below - StatSN is not meaningful and must * not be advanced. */ if (bhssr->bhssr_opcode == ISCSI_BHS_OPCODE_SCSI_DATA_IN && (bhssr->bhssr_flags & BHSDI_FLAGS_S) == 0) advance_statsn = false; /* * 10.7.3: "The fields StatSN, Status, and Residual Count * only have meaningful content if the S bit is set to 1." */ if (bhssr->bhssr_opcode != ISCSI_BHS_OPCODE_SCSI_DATA_IN || (bhssr->bhssr_flags & BHSDI_FLAGS_S)) bhssr->bhssr_statsn = htonl(cs->cs_statsn); bhssr->bhssr_expcmdsn = htonl(cs->cs_cmdsn); bhssr->bhssr_maxcmdsn = htonl(cs->cs_cmdsn + maxcmdsn_delta); if (advance_statsn) cs->cs_statsn++; return (0); } static void cfiscsi_pdu_queue(struct icl_pdu *response) { struct cfiscsi_session *cs; cs = PDU_SESSION(response); CFISCSI_SESSION_LOCK(cs); cfiscsi_pdu_prepare(response); icl_pdu_queue(response); CFISCSI_SESSION_UNLOCK(cs); } static uint32_t cfiscsi_decode_lun(uint64_t encoded) { uint8_t lun[8]; uint32_t result; /* * The LUN field in iSCSI PDUs may look like an ordinary 64 bit number, * but is in fact an evil, multidimensional structure defined * in SCSI Architecture Model 5 (SAM-5), section 4.6. */ memcpy(lun, &encoded, sizeof(lun)); switch (lun[0] & 0xC0) { case 0x00: if ((lun[0] & 0x3f) != 0 || lun[2] != 0 || lun[3] != 0 || lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0) { CFISCSI_WARN("malformed LUN " "(peripheral device addressing method): 0x%jx", (uintmax_t)encoded); result = 0xffffffff; break; } result = lun[1]; break; case 0x40: if (lun[2] != 0 || lun[3] != 0 || lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0) { CFISCSI_WARN("malformed LUN " "(flat address space addressing method): 0x%jx", (uintmax_t)encoded); result = 0xffffffff; break; } result = ((lun[0] & 0x3f) << 8) + lun[1]; break; case 0xC0: if (lun[0] != 0xD2 || lun[4] != 0 || lun[5] != 0 || lun[6] != 0 || lun[7] != 0) { CFISCSI_WARN("malformed LUN (extended flat " "address space addressing method): 0x%jx", (uintmax_t)encoded); result = 0xffffffff; break; } result = (lun[1] << 16) + (lun[2] << 8) + lun[3]; default: CFISCSI_WARN("unsupported LUN format 0x%jx", (uintmax_t)encoded); result = 0xffffffff; break; } return (result); } static void cfiscsi_pdu_handle_nop_out(struct icl_pdu *request) { struct cfiscsi_session *cs; struct iscsi_bhs_nop_out *bhsno; struct iscsi_bhs_nop_in *bhsni; struct icl_pdu *response; void *data = NULL; size_t datasize; int error; cs = PDU_SESSION(request); bhsno = (struct iscsi_bhs_nop_out *)request->ip_bhs; if (bhsno->bhsno_initiator_task_tag == 0xffffffff) { /* * Nothing to do, iscsi_pdu_update_statsn() already * zeroed the timeout. */ icl_pdu_free(request); return; } datasize = icl_pdu_data_segment_length(request); if (datasize > 0) { data = malloc(datasize, M_CFISCSI, M_NOWAIT | M_ZERO); if (data == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } icl_pdu_get_data(request, 0, data, datasize); } response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "droppping connection"); free(data, M_CFISCSI); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhsni = (struct iscsi_bhs_nop_in *)response->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = bhsno->bhsno_initiator_task_tag; bhsni->bhsni_target_transfer_tag = 0xffffffff; if (datasize > 0) { error = icl_pdu_append_data(response, data, datasize, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); free(data, M_CFISCSI); icl_pdu_free(request); icl_pdu_free(response); cfiscsi_session_terminate(cs); return; } free(data, M_CFISCSI); } icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_pdu_handle_scsi_command(struct icl_pdu *request) { struct iscsi_bhs_scsi_command *bhssc; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); if (request->ip_data_len > 0 && cs->cs_immediate_data == false) { CFISCSI_SESSION_WARN(cs, "unsolicited data with " "ImmediateData=No; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_SCSI; io->io_hdr.nexus.initid.id = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_target.id = 0; io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhssc->bhssc_lun); io->scsiio.tag_num = bhssc->bhssc_initiator_task_tag; switch ((bhssc->bhssc_flags & BHSSC_FLAGS_ATTR)) { case BHSSC_FLAGS_ATTR_UNTAGGED: io->scsiio.tag_type = CTL_TAG_UNTAGGED; break; case BHSSC_FLAGS_ATTR_SIMPLE: io->scsiio.tag_type = CTL_TAG_SIMPLE; break; case BHSSC_FLAGS_ATTR_ORDERED: io->scsiio.tag_type = CTL_TAG_ORDERED; break; case BHSSC_FLAGS_ATTR_HOQ: io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE; break; case BHSSC_FLAGS_ATTR_ACA: io->scsiio.tag_type = CTL_TAG_ACA; break; default: io->scsiio.tag_type = CTL_TAG_UNTAGGED; CFISCSI_SESSION_WARN(cs, "unhandled tag type %d", bhssc->bhssc_flags & BHSSC_FLAGS_ATTR); break; } io->scsiio.cdb_len = sizeof(bhssc->bhssc_cdb); /* Which is 16. */ memcpy(io->scsiio.cdb, bhssc->bhssc_cdb, sizeof(bhssc->bhssc_cdb)); refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static void cfiscsi_pdu_handle_task_request(struct icl_pdu *request) { struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct icl_pdu *response; struct cfiscsi_session *cs; union ctl_io *io; int error; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = request; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_target.id = 0; io->io_hdr.nexus.targ_lun = cfiscsi_decode_lun(bhstmr->bhstmr_lun); io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ switch (bhstmr->bhstmr_function & ~0x80) { case BHSTMR_FUNCTION_ABORT_TASK: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK; io->taskio.tag_num = bhstmr->bhstmr_referenced_task_tag; break; case BHSTMR_FUNCTION_ABORT_TASK_SET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_ABORT_TASK_SET"); #endif io->taskio.task_action = CTL_TASK_ABORT_TASK_SET; break; case BHSTMR_FUNCTION_LOGICAL_UNIT_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_LOGICAL_UNIT_RESET"); #endif io->taskio.task_action = CTL_TASK_LUN_RESET; break; case BHSTMR_FUNCTION_TARGET_WARM_RESET: #if 0 CFISCSI_SESSION_DEBUG(cs, "BHSTMR_FUNCTION_TARGET_WARM_RESET"); #endif io->taskio.task_action = CTL_TASK_TARGET_RESET; break; default: CFISCSI_SESSION_DEBUG(cs, "unsupported function 0x%x", bhstmr->bhstmr_function & ~0x80); ctl_free_io(io); response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; " "dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); return; } refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d; " "dropping connection", error); ctl_free_io(io); refcount_release(&cs->cs_outstanding_ctl_pdus); icl_pdu_free(request); cfiscsi_session_terminate(cs); } } static bool cfiscsi_handle_data_segment(struct icl_pdu *request, struct cfiscsi_data_wait *cdw) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t copy_len, len, off, buffer_offset; int ctl_sg_count; union ctl_io *io; cs = PDU_SESSION(request); KASSERT((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT || (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bad opcode 0x%x", request->ip_bhs->bhs_opcode)); /* * We're only using fields common for Data-Out and SCSI Command PDUs. */ bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); #if 0 CFISCSI_SESSION_DEBUG(cs, "received %zd bytes out of %d", request->ip_data_len, io->scsiio.kern_total_len); #endif if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) buffer_offset = ntohl(bhsdo->bhsdo_buffer_offset); else buffer_offset = 0; len = icl_pdu_data_segment_length(request); /* * Make sure the offset, as sent by the initiator, matches the offset * we're supposed to be at in the scatter-gather list. */ if (buffer_offset > io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled || buffer_offset + len <= io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled) { CFISCSI_SESSION_WARN(cs, "received bad buffer offset %zd, " "expected %zd; dropping connection", buffer_offset, (size_t)io->scsiio.kern_rel_offset + (size_t)io->scsiio.ext_data_filled); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } /* * This is the offset within the PDU data segment, as opposed * to buffer_offset, which is the offset within the task (SCSI * command). */ off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled - buffer_offset; /* * Iterate over the scatter/gather segments, filling them with data * from the PDU data segment. Note that this can get called multiple * times for one SCSI command; the cdw structure holds state for the * scatter/gather list. */ for (;;) { KASSERT(cdw->cdw_sg_index < ctl_sg_count, ("cdw->cdw_sg_index >= ctl_sg_count")); if (cdw->cdw_sg_len == 0) { cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; } KASSERT(off <= len, ("len > off")); copy_len = len - off; if (copy_len > cdw->cdw_sg_len) copy_len = cdw->cdw_sg_len; icl_pdu_get_data(request, off, cdw->cdw_sg_addr, copy_len); cdw->cdw_sg_addr += copy_len; cdw->cdw_sg_len -= copy_len; off += copy_len; io->scsiio.ext_data_filled += copy_len; if (cdw->cdw_sg_len == 0) { /* * End of current segment. */ if (cdw->cdw_sg_index == ctl_sg_count - 1) { /* * Last segment in scatter/gather list. */ break; } cdw->cdw_sg_index++; } if (off == len) { /* * End of PDU payload. */ break; } } if (len > off) { /* * In case of unsolicited data, it's possible that the buffer * provided by CTL is smaller than negotiated FirstBurstLength. * Just ignore the superfluous data; will ask for them with R2T * on next call to cfiscsi_datamove(). * * This obviously can only happen with SCSI Command PDU. */ if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND) return (true); CFISCSI_SESSION_WARN(cs, "received too much data: got %zd bytes, " "expected %zd; dropping connection", icl_pdu_data_segment_length(request), off); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) == 0) { CFISCSI_SESSION_WARN(cs, "got the final packet without " "the F flag; flags = 0x%x; dropping connection", bhsdo->bhsdo_flags); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } if (io->scsiio.ext_data_filled != cdw->cdw_r2t_end && (bhsdo->bhsdo_flags & BHSDO_FLAGS_F) != 0) { if ((request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_DATA_OUT) { CFISCSI_SESSION_WARN(cs, "got the final packet, but the " "transmitted size was %zd bytes instead of %d; " "dropping connection", (size_t)io->scsiio.ext_data_filled, cdw->cdw_r2t_end); ctl_set_data_phase_error(&io->scsiio); cfiscsi_session_terminate(cs); return (true); } else { /* * For SCSI Command PDU, this just means we need to * solicit more data by sending R2T. */ return (false); } } if (io->scsiio.ext_data_filled == cdw->cdw_r2t_end) { #if 0 CFISCSI_SESSION_DEBUG(cs, "no longer expecting Data-Out with target " "transfer tag 0x%x", cdw->cdw_target_transfer_tag); #endif return (true); } return (false); } static void cfiscsi_pdu_handle_data_out(struct icl_pdu *request) { struct iscsi_bhs_data_out *bhsdo; struct cfiscsi_session *cs; struct cfiscsi_data_wait *cdw = NULL; union ctl_io *io; bool done; cs = PDU_SESSION(request); bhsdo = (struct iscsi_bhs_data_out *)request->ip_bhs; CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) { #if 0 CFISCSI_SESSION_DEBUG(cs, "have ttt 0x%x, itt 0x%x; looking for " "ttt 0x%x, itt 0x%x", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag, cdw->cdw_target_transfer_tag, cdw->cdw_initiator_task_tag)); #endif if (bhsdo->bhsdo_target_transfer_tag == cdw->cdw_target_transfer_tag) break; } CFISCSI_SESSION_UNLOCK(cs); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "data transfer tag 0x%x, initiator task tag " "0x%x, not found; dropping connection", bhsdo->bhsdo_target_transfer_tag, bhsdo->bhsdo_initiator_task_tag); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } if (cdw->cdw_datasn != ntohl(bhsdo->bhsdo_datasn)) { CFISCSI_SESSION_WARN(cs, "received Data-Out PDU with " "DataSN %u, while expected %u; dropping connection", ntohl(bhsdo->bhsdo_datasn), cdw->cdw_datasn); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } cdw->cdw_datasn++; io = cdw->cdw_ctl_io; KASSERT((io->io_hdr.flags & CTL_FLAG_DATA_MASK) != CTL_FLAG_DATA_IN, ("CTL_FLAG_DATA_IN")); done = cfiscsi_handle_data_segment(request, cdw); if (done) { CFISCSI_SESSION_LOCK(cs); TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); done = (io->scsiio.ext_data_filled != cdw->cdw_r2t_end || io->scsiio.ext_data_filled == io->scsiio.kern_data_len); uma_zfree(cfiscsi_data_wait_zone, cdw); if (done) io->scsiio.be_move_done(io); else cfiscsi_datamove_out(io); } icl_pdu_free(request); } static void cfiscsi_pdu_handle_logout_request(struct icl_pdu *request) { struct iscsi_bhs_logout_request *bhslr; struct iscsi_bhs_logout_response *bhslr2; struct icl_pdu *response; struct cfiscsi_session *cs; cs = PDU_SESSION(request); bhslr = (struct iscsi_bhs_logout_request *)request->ip_bhs; switch (bhslr->bhslr_reason & 0x7f) { case BHSLR_REASON_CLOSE_SESSION: case BHSLR_REASON_CLOSE_CONNECTION: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_DEBUG(cs, "failed to allocate memory"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_CLOSED_SUCCESSFULLY; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); cfiscsi_session_terminate(cs); break; case BHSLR_REASON_REMOVE_FOR_RECOVERY: response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory; dropping connection"); icl_pdu_free(request); cfiscsi_session_terminate(cs); return; } bhslr2 = (struct iscsi_bhs_logout_response *)response->ip_bhs; bhslr2->bhslr_opcode = ISCSI_BHS_OPCODE_LOGOUT_RESPONSE; bhslr2->bhslr_flags = 0x80; bhslr2->bhslr_response = BHSLR_RESPONSE_RECOVERY_NOT_SUPPORTED; bhslr2->bhslr_initiator_task_tag = bhslr->bhslr_initiator_task_tag; icl_pdu_free(request); cfiscsi_pdu_queue(response); break; default: CFISCSI_SESSION_WARN(cs, "invalid reason 0%x; dropping connection", bhslr->bhslr_reason); icl_pdu_free(request); cfiscsi_session_terminate(cs); break; } } static void cfiscsi_callout(void *context) { struct icl_pdu *cp; struct iscsi_bhs_nop_in *bhsni; struct cfiscsi_session *cs; cs = context; if (cs->cs_terminating) return; callout_schedule(&cs->cs_callout, 1 * hz); atomic_add_int(&cs->cs_timeout, 1); #ifdef ICL_KERNEL_PROXY if (cs->cs_waiting_for_ctld || cs->cs_login_phase) { if (login_timeout > 0 && cs->cs_timeout > login_timeout) { CFISCSI_SESSION_WARN(cs, "login timed out after " "%d seconds; dropping connection", cs->cs_timeout); cfiscsi_session_terminate(cs); } return; } #endif if (ping_timeout <= 0) { /* * Pings are disabled. Don't send NOP-In in this case; * user might have disabled pings to work around problems * with certain initiators that can't properly handle * NOP-In, such as iPXE. Reset the timeout, to avoid * triggering reconnection, should the user decide to * reenable them. */ cs->cs_timeout = 0; return; } if (cs->cs_timeout >= ping_timeout) { CFISCSI_SESSION_WARN(cs, "no ping reply (NOP-Out) after %d seconds; " "dropping connection", ping_timeout); cfiscsi_session_terminate(cs); return; } /* * If the ping was reset less than one second ago - which means * that we've received some PDU during the last second - assume * the traffic flows correctly and don't bother sending a NOP-Out. * * (It's 2 - one for one second, and one for incrementing is_timeout * earlier in this routine.) */ if (cs->cs_timeout < 2) return; cp = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (cp == NULL) { CFISCSI_SESSION_WARN(cs, "failed to allocate memory"); return; } bhsni = (struct iscsi_bhs_nop_in *)cp->ip_bhs; bhsni->bhsni_opcode = ISCSI_BHS_OPCODE_NOP_IN; bhsni->bhsni_flags = 0x80; bhsni->bhsni_initiator_task_tag = 0xffffffff; cfiscsi_pdu_queue(cp); } static void cfiscsi_session_terminate_tasks(struct cfiscsi_session *cs) { struct cfiscsi_data_wait *cdw; union ctl_io *io; int error, last, wait; if (cs->cs_target == NULL) return; /* No target yet, so nothing to do. */ io = ctl_alloc_io(cs->cs_target->ct_port.ctl_pool_ref); ctl_zero_io(io); io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = cs; io->io_hdr.io_type = CTL_IO_TASK; io->io_hdr.nexus.initid.id = cs->cs_ctl_initid; io->io_hdr.nexus.targ_port = cs->cs_target->ct_port.targ_port; io->io_hdr.nexus.targ_target.id = 0; io->io_hdr.nexus.targ_lun = 0; io->taskio.tag_type = CTL_TAG_SIMPLE; /* XXX */ io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET; wait = cs->cs_outstanding_ctl_pdus; refcount_acquire(&cs->cs_outstanding_ctl_pdus); error = ctl_queue(io); if (error != CTL_RETVAL_COMPLETE) { CFISCSI_SESSION_WARN(cs, "ctl_queue() failed; error %d", error); refcount_release(&cs->cs_outstanding_ctl_pdus); ctl_free_io(io); } CFISCSI_SESSION_LOCK(cs); while ((cdw = TAILQ_FIRST(&cs->cs_waiting_for_data_out)) != NULL) { TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * Set nonzero port status; this prevents backends from * assuming that the data transfer actually succeeded * and writing uninitialized data to disk. */ cdw->cdw_ctl_io->scsiio.io_hdr.port_status = 42; cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); uma_zfree(cfiscsi_data_wait_zone, cdw); CFISCSI_SESSION_LOCK(cs); } CFISCSI_SESSION_UNLOCK(cs); /* * Wait for CTL to terminate all the tasks. */ if (wait > 0) CFISCSI_SESSION_WARN(cs, "waiting for CTL to terminate %d tasks", wait); for (;;) { refcount_acquire(&cs->cs_outstanding_ctl_pdus); last = refcount_release(&cs->cs_outstanding_ctl_pdus); if (last != 0) break; tsleep(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus), 0, "cfiscsi_terminate", hz / 100); } if (wait > 0) CFISCSI_SESSION_WARN(cs, "tasks terminated"); } static void cfiscsi_maintenance_thread(void *arg) { struct cfiscsi_session *cs; cs = arg; for (;;) { CFISCSI_SESSION_LOCK(cs); if (cs->cs_terminating == false) cv_wait(&cs->cs_maintenance_cv, &cs->cs_lock); CFISCSI_SESSION_UNLOCK(cs); if (cs->cs_terminating) { /* * We used to wait up to 30 seconds to deliver queued * PDUs to the initiator. We also tried hard to deliver * SCSI Responses for the aborted PDUs. We don't do * that anymore. We might need to revisit that. */ callout_drain(&cs->cs_callout); icl_conn_close(cs->cs_conn); /* * At this point ICL receive thread is no longer * running; no new tasks can be queued. */ cfiscsi_session_terminate_tasks(cs); cfiscsi_session_delete(cs); kthread_exit(); return; } CFISCSI_SESSION_DEBUG(cs, "nothing to do"); } } static void cfiscsi_session_terminate(struct cfiscsi_session *cs) { if (cs->cs_terminating) return; cs->cs_terminating = true; cv_signal(&cs->cs_maintenance_cv); #ifdef ICL_KERNEL_PROXY cv_signal(&cs->cs_login_cv); #endif } static int cfiscsi_session_register_initiator(struct cfiscsi_session *cs) { struct cfiscsi_target *ct; char *name; int i; KASSERT(cs->cs_ctl_initid == -1, ("already registered")); ct = cs->cs_target; name = strdup(cs->cs_initiator_id, M_CTL); i = ctl_add_initiator(&ct->ct_port, -1, 0, name); if (i < 0) { CFISCSI_SESSION_WARN(cs, "ctl_add_initiator failed with error %d", i); cs->cs_ctl_initid = -1; return (1); } cs->cs_ctl_initid = i; #if 0 CFISCSI_SESSION_DEBUG(cs, "added initiator id %d", i); #endif return (0); } static void cfiscsi_session_unregister_initiator(struct cfiscsi_session *cs) { int error; if (cs->cs_ctl_initid == -1) return; error = ctl_remove_initiator(&cs->cs_target->ct_port, cs->cs_ctl_initid); if (error != 0) { CFISCSI_SESSION_WARN(cs, "ctl_remove_initiator failed with error %d", error); } cs->cs_ctl_initid = -1; } static struct cfiscsi_session * cfiscsi_session_new(struct cfiscsi_softc *softc) { struct cfiscsi_session *cs; int error; cs = malloc(sizeof(*cs), M_CFISCSI, M_NOWAIT | M_ZERO); if (cs == NULL) { CFISCSI_WARN("malloc failed"); return (NULL); } cs->cs_ctl_initid = -1; refcount_init(&cs->cs_outstanding_ctl_pdus, 0); TAILQ_INIT(&cs->cs_waiting_for_data_out); mtx_init(&cs->cs_lock, "cfiscsi_lock", NULL, MTX_DEF); cv_init(&cs->cs_maintenance_cv, "cfiscsi_mt"); #ifdef ICL_KERNEL_PROXY cv_init(&cs->cs_login_cv, "cfiscsi_login"); #endif cs->cs_conn = icl_conn_new("cfiscsi", &cs->cs_lock); cs->cs_conn->ic_receive = cfiscsi_receive_callback; cs->cs_conn->ic_error = cfiscsi_error_callback; cs->cs_conn->ic_prv0 = cs; error = kthread_add(cfiscsi_maintenance_thread, cs, NULL, NULL, 0, 0, "cfiscsimt"); if (error != 0) { CFISCSI_SESSION_WARN(cs, "kthread_add(9) failed with error %d", error); free(cs, M_CFISCSI); return (NULL); } mtx_lock(&softc->lock); cs->cs_id = ++softc->last_session_id; TAILQ_INSERT_TAIL(&softc->sessions, cs, cs_next); mtx_unlock(&softc->lock); /* * Start pinging the initiator. */ callout_init(&cs->cs_callout, 1); callout_reset(&cs->cs_callout, 1 * hz, cfiscsi_callout, cs); return (cs); } static void cfiscsi_session_delete(struct cfiscsi_session *cs) { struct cfiscsi_softc *softc; softc = &cfiscsi_softc; KASSERT(cs->cs_outstanding_ctl_pdus == 0, ("destroying session with outstanding CTL pdus")); KASSERT(TAILQ_EMPTY(&cs->cs_waiting_for_data_out), ("destroying session with non-empty queue")); cfiscsi_session_unregister_initiator(cs); if (cs->cs_target != NULL) cfiscsi_target_release(cs->cs_target); icl_conn_close(cs->cs_conn); icl_conn_free(cs->cs_conn); mtx_lock(&softc->lock); TAILQ_REMOVE(&softc->sessions, cs, cs_next); cv_signal(&softc->sessions_cv); mtx_unlock(&softc->lock); free(cs, M_CFISCSI); } int cfiscsi_init(void) { struct cfiscsi_softc *softc; int retval; softc = &cfiscsi_softc; retval = 0; bzero(softc, sizeof(*softc)); mtx_init(&softc->lock, "cfiscsi", NULL, MTX_DEF); cv_init(&softc->sessions_cv, "cfiscsi_sessions"); #ifdef ICL_KERNEL_PROXY cv_init(&softc->accept_cv, "cfiscsi_accept"); #endif TAILQ_INIT(&softc->sessions); TAILQ_INIT(&softc->targets); cfiscsi_data_wait_zone = uma_zcreate("cfiscsi_data_wait", sizeof(struct cfiscsi_data_wait), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); return (0); } #ifdef ICL_KERNEL_PROXY static void cfiscsi_accept(struct socket *so, struct sockaddr *sa, int portal_id) { struct cfiscsi_session *cs; cs = cfiscsi_session_new(&cfiscsi_softc); if (cs == NULL) { CFISCSI_WARN("failed to create session"); return; } icl_conn_handoff_sock(cs->cs_conn, so); cs->cs_initiator_sa = sa; cs->cs_portal_id = portal_id; cs->cs_waiting_for_ctld = true; cv_signal(&cfiscsi_softc.accept_cv); } #endif static void cfiscsi_online(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 1; online = softc->online++; mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY if (softc->listener != NULL) icl_listen_free(softc->listener); softc->listener = icl_listen_new(cfiscsi_accept); #endif } static void cfiscsi_offline(void *arg) { struct cfiscsi_softc *softc; struct cfiscsi_target *ct; struct cfiscsi_session *cs; int online; ct = (struct cfiscsi_target *)arg; softc = ct->ct_softc; mtx_lock(&softc->lock); if (!ct->ct_online) { mtx_unlock(&softc->lock); return; } ct->ct_online = 0; online = --softc->online; TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) cfiscsi_session_terminate(cs); } do { TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cs->cs_target == ct) break; } if (cs != NULL) cv_wait(&softc->sessions_cv, &softc->lock); } while (cs != NULL && ct->ct_online == 0); mtx_unlock(&softc->lock); if (online > 0) return; #ifdef ICL_KERNEL_PROXY icl_listen_free(softc->listener); softc->listener = NULL; #endif } static int cfiscsi_info(void *arg, struct sbuf *sb) { struct cfiscsi_target *ct = (struct cfiscsi_target *)arg; int retval; retval = sbuf_printf(sb, "\t%d\n", ct->ct_state); return (retval); } static void cfiscsi_ioctl_handoff(struct ctl_iscsi *ci) { struct cfiscsi_softc *softc; struct cfiscsi_session *cs, *cs2; struct cfiscsi_target *ct; struct ctl_iscsi_handoff_params *cihp; int error; cihp = (struct ctl_iscsi_handoff_params *)&(ci->data); softc = &cfiscsi_softc; CFISCSI_DEBUG("new connection from %s (%s) to %s", cihp->initiator_name, cihp->initiator_addr, cihp->target_name); ct = cfiscsi_target_find(softc, cihp->target_name); if (ct == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: target not found", __func__); return; } #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0 && cihp->connection_id > 0) { snprintf(ci->error_str, sizeof(ci->error_str), "both socket and connection_id set"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } if (cihp->socket == 0) { mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cihp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; cfiscsi_target_release(ct); return; } mtx_unlock(&cfiscsi_softc.lock); } else { #endif cs = cfiscsi_session_new(softc); if (cs == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: cfiscsi_session_new failed", __func__); cfiscsi_target_release(ct); return; } #ifdef ICL_KERNEL_PROXY } #endif /* * First PDU of Full Feature phase has the same CmdSN as the last * PDU from the Login Phase received from the initiator. Thus, * the -1 below. */ cs->cs_portal_group_tag = cihp->portal_group_tag; cs->cs_cmdsn = cihp->cmdsn; cs->cs_statsn = cihp->statsn; cs->cs_max_data_segment_length = cihp->max_recv_data_segment_length; cs->cs_max_burst_length = cihp->max_burst_length; cs->cs_immediate_data = !!cihp->immediate_data; if (cihp->header_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_header_crc32c = true; if (cihp->data_digest == CTL_ISCSI_DIGEST_CRC32C) cs->cs_conn->ic_data_crc32c = true; strlcpy(cs->cs_initiator_name, cihp->initiator_name, sizeof(cs->cs_initiator_name)); strlcpy(cs->cs_initiator_addr, cihp->initiator_addr, sizeof(cs->cs_initiator_addr)); strlcpy(cs->cs_initiator_alias, cihp->initiator_alias, sizeof(cs->cs_initiator_alias)); memcpy(cs->cs_initiator_isid, cihp->initiator_isid, sizeof(cs->cs_initiator_isid)); snprintf(cs->cs_initiator_id, sizeof(cs->cs_initiator_id), "%s,i,0x%02x%02x%02x%02x%02x%02x", cs->cs_initiator_name, cihp->initiator_isid[0], cihp->initiator_isid[1], cihp->initiator_isid[2], cihp->initiator_isid[3], cihp->initiator_isid[4], cihp->initiator_isid[5]); mtx_lock(&softc->lock); if (ct->ct_online == 0) { mtx_unlock(&softc->lock); cfiscsi_session_terminate(cs); cfiscsi_target_release(ct); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: port offline", __func__); return; } cs->cs_target = ct; mtx_unlock(&softc->lock); refcount_acquire(&cs->cs_outstanding_ctl_pdus); restart: if (!cs->cs_terminating) { mtx_lock(&softc->lock); TAILQ_FOREACH(cs2, &softc->sessions, cs_next) { if (cs2 != cs && cs2->cs_tasks_aborted == false && cs->cs_target == cs2->cs_target && cs->cs_portal_group_tag == cs2->cs_portal_group_tag && strcmp(cs->cs_initiator_id, cs2->cs_initiator_id) == 0) { cfiscsi_session_terminate(cs2); mtx_unlock(&softc->lock); pause("cfiscsi_reinstate", 1); goto restart; } } mtx_unlock(&softc->lock); } /* * Register initiator with CTL. */ cfiscsi_session_register_initiator(cs); #ifdef ICL_KERNEL_PROXY if (cihp->socket > 0) { #endif error = icl_conn_handoff(cs->cs_conn, cihp->socket); if (error != 0) { cfiscsi_session_terminate(cs); refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: icl_conn_handoff failed with error %d", __func__, error); return; } #ifdef ICL_KERNEL_PROXY } #endif #ifdef ICL_KERNEL_PROXY cs->cs_login_phase = false; /* * First PDU of the Full Feature phase has likely already arrived. * We have to pick it up and execute properly. */ if (cs->cs_login_pdu != NULL) { CFISCSI_SESSION_DEBUG(cs, "picking up first PDU"); cfiscsi_pdu_handle(cs->cs_login_pdu); cs->cs_login_pdu = NULL; } #endif refcount_release(&cs->cs_outstanding_ctl_pdus); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_list(struct ctl_iscsi *ci) { struct ctl_iscsi_list_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; struct sbuf *sb; int error; cilp = (struct ctl_iscsi_list_params *)&(ci->data); softc = &cfiscsi_softc; sb = sbuf_new(NULL, NULL, cilp->alloc_len, SBUF_FIXEDLEN); if (sb == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate %d bytes for iSCSI session list", cilp->alloc_len); return; } sbuf_printf(sb, "\n"); mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { #ifdef ICL_KERNEL_PROXY if (cs->cs_target == NULL) continue; #endif error = sbuf_printf(sb, "" "%s" "%s" "%s" "%s" "%s" "%s" "%s" "%zd" "%d" "%d" "\n", cs->cs_id, cs->cs_initiator_name, cs->cs_initiator_addr, cs->cs_initiator_alias, cs->cs_target->ct_name, cs->cs_target->ct_alias, cs->cs_conn->ic_header_crc32c ? "CRC32C" : "None", cs->cs_conn->ic_data_crc32c ? "CRC32C" : "None", cs->cs_max_data_segment_length, cs->cs_immediate_data, cs->cs_conn->ic_iser); if (error != 0) break; } mtx_unlock(&softc->lock); error = sbuf_printf(sb, "\n"); if (error != 0) { sbuf_delete(sb); ci->status = CTL_ISCSI_LIST_NEED_MORE_SPACE; snprintf(ci->error_str, sizeof(ci->error_str), "Out of space, %d bytes is too small", cilp->alloc_len); return; } sbuf_finish(sb); error = copyout(sbuf_data(sb), cilp->conn_xml, sbuf_len(sb) + 1); cilp->fill_len = sbuf_len(sb) + 1; ci->status = CTL_ISCSI_OK; sbuf_delete(sb); } static void cfiscsi_ioctl_terminate(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_terminate_params *citp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; citp = (struct ctl_iscsi_terminate_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (citp->all == 0 && cs->cs_id != citp->connection_id && strcmp(cs->cs_initiator_name, citp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, citp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { /* * Oh well. Just terminate the connection. */ } else { bhsam = (struct iscsi_bhs_asynchronous_message *) response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_0xffffffff = 0xffffffff; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_TERMINATES_SESSION; cfiscsi_pdu_queue(response); } cfiscsi_session_terminate(cs); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_logout(struct ctl_iscsi *ci) { struct icl_pdu *response; struct iscsi_bhs_asynchronous_message *bhsam; struct ctl_iscsi_logout_params *cilp; struct cfiscsi_session *cs; struct cfiscsi_softc *softc; int found = 0; cilp = (struct ctl_iscsi_logout_params *)&(ci->data); softc = &cfiscsi_softc; mtx_lock(&softc->lock); TAILQ_FOREACH(cs, &softc->sessions, cs_next) { if (cilp->all == 0 && cs->cs_id != cilp->connection_id && strcmp(cs->cs_initiator_name, cilp->initiator_name) != 0 && strcmp(cs->cs_initiator_addr, cilp->initiator_addr) != 0) continue; response = icl_pdu_new(cs->cs_conn, M_NOWAIT); if (response == NULL) { ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "Unable to allocate memory"); mtx_unlock(&softc->lock); return; } bhsam = (struct iscsi_bhs_asynchronous_message *)response->ip_bhs; bhsam->bhsam_opcode = ISCSI_BHS_OPCODE_ASYNC_MESSAGE; bhsam->bhsam_flags = 0x80; bhsam->bhsam_async_event = BHSAM_EVENT_TARGET_REQUESTS_LOGOUT; bhsam->bhsam_parameter3 = htons(10); cfiscsi_pdu_queue(response); found++; } mtx_unlock(&softc->lock); if (found == 0) { ci->status = CTL_ISCSI_SESSION_NOT_FOUND; snprintf(ci->error_str, sizeof(ci->error_str), "No matching connections found"); return; } ci->status = CTL_ISCSI_OK; } #ifdef ICL_KERNEL_PROXY static void cfiscsi_ioctl_listen(struct ctl_iscsi *ci) { struct ctl_iscsi_listen_params *cilp; struct sockaddr *sa; int error; cilp = (struct ctl_iscsi_listen_params *)&(ci->data); if (cfiscsi_softc.listener == NULL) { CFISCSI_DEBUG("no listener"); snprintf(ci->error_str, sizeof(ci->error_str), "no listener"); ci->status = CTL_ISCSI_ERROR; return; } error = getsockaddr(&sa, (void *)cilp->addr, cilp->addrlen); if (error != 0) { CFISCSI_DEBUG("getsockaddr, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "getsockaddr failed"); ci->status = CTL_ISCSI_ERROR; return; } error = icl_listen_add(cfiscsi_softc.listener, cilp->iser, cilp->domain, cilp->socktype, cilp->protocol, sa, cilp->portal_id); if (error != 0) { free(sa, M_SONAME); CFISCSI_DEBUG("icl_listen_add, error %d", error); snprintf(ci->error_str, sizeof(ci->error_str), "icl_listen_add failed, error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_accept(struct ctl_iscsi *ci) { struct ctl_iscsi_accept_params *ciap; struct cfiscsi_session *cs; int error; ciap = (struct ctl_iscsi_accept_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); for (;;) { TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_waiting_for_ctld) break; } if (cs != NULL) break; error = cv_wait_sig(&cfiscsi_softc.accept_cv, &cfiscsi_softc.lock); if (error != 0) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted"); ci->status = CTL_ISCSI_ERROR; return; } } mtx_unlock(&cfiscsi_softc.lock); cs->cs_waiting_for_ctld = false; cs->cs_login_phase = true; ciap->connection_id = cs->cs_id; ciap->portal_id = cs->cs_portal_id; ciap->initiator_addrlen = cs->cs_initiator_sa->sa_len; error = copyout(cs->cs_initiator_sa, ciap->initiator_addr, cs->cs_initiator_sa->sa_len); if (error != 0) { snprintf(ci->error_str, sizeof(ci->error_str), "copyout failed with error %d", error); ci->status = CTL_ISCSI_ERROR; return; } ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_send(struct ctl_iscsi *ci) { struct ctl_iscsi_send_params *cisp; struct cfiscsi_session *cs; struct icl_pdu *ip; size_t datalen; void *data; int error; cisp = (struct ctl_iscsi_send_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cisp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (cs->cs_login_phase == false) return (EBUSY); #endif if (cs->cs_terminating) { snprintf(ci->error_str, sizeof(ci->error_str), "connection is terminating"); ci->status = CTL_ISCSI_ERROR; return; } datalen = cisp->data_segment_len; /* * XXX */ //if (datalen > CFISCSI_MAX_DATA_SEGMENT_LENGTH) { if (datalen > 65535) { snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } if (datalen > 0) { data = malloc(datalen, M_CFISCSI, M_WAITOK); error = copyin(cisp->data_segment, data, datalen); if (error != 0) { free(data, M_CFISCSI); snprintf(ci->error_str, sizeof(ci->error_str), "copyin error %d", error); ci->status = CTL_ISCSI_ERROR; return; } } ip = icl_pdu_new(cs->cs_conn, M_WAITOK); memcpy(ip->ip_bhs, cisp->bhs, sizeof(*ip->ip_bhs)); if (datalen > 0) { icl_pdu_append_data(ip, data, datalen, M_WAITOK); free(data, M_CFISCSI); } CFISCSI_SESSION_LOCK(cs); icl_pdu_queue(ip); CFISCSI_SESSION_UNLOCK(cs); ci->status = CTL_ISCSI_OK; } static void cfiscsi_ioctl_receive(struct ctl_iscsi *ci) { struct ctl_iscsi_receive_params *cirp; struct cfiscsi_session *cs; struct icl_pdu *ip; void *data; int error; cirp = (struct ctl_iscsi_receive_params *)&(ci->data); mtx_lock(&cfiscsi_softc.lock); TAILQ_FOREACH(cs, &cfiscsi_softc.sessions, cs_next) { if (cs->cs_id == cirp->connection_id) break; } if (cs == NULL) { mtx_unlock(&cfiscsi_softc.lock); snprintf(ci->error_str, sizeof(ci->error_str), "connection not found"); ci->status = CTL_ISCSI_ERROR; return; } mtx_unlock(&cfiscsi_softc.lock); #if 0 if (is->is_login_phase == false) return (EBUSY); #endif CFISCSI_SESSION_LOCK(cs); while (cs->cs_login_pdu == NULL && cs->cs_terminating == false) { error = cv_wait_sig(&cs->cs_login_cv, &cs->cs_lock); if (error != 0) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "interrupted by signal"); ci->status = CTL_ISCSI_ERROR; return; } } if (cs->cs_terminating) { CFISCSI_SESSION_UNLOCK(cs); snprintf(ci->error_str, sizeof(ci->error_str), "connection terminating"); ci->status = CTL_ISCSI_ERROR; return; } ip = cs->cs_login_pdu; cs->cs_login_pdu = NULL; CFISCSI_SESSION_UNLOCK(cs); if (ip->ip_data_len > cirp->data_segment_len) { icl_pdu_free(ip); snprintf(ci->error_str, sizeof(ci->error_str), "data segment too big"); ci->status = CTL_ISCSI_ERROR; return; } copyout(ip->ip_bhs, cirp->bhs, sizeof(*ip->ip_bhs)); if (ip->ip_data_len > 0) { data = malloc(ip->ip_data_len, M_CFISCSI, M_WAITOK); icl_pdu_get_data(ip, 0, data, ip->ip_data_len); copyout(data, cirp->data_segment, ip->ip_data_len); free(data, M_CFISCSI); } icl_pdu_free(ip); ci->status = CTL_ISCSI_OK; } #endif /* !ICL_KERNEL_PROXY */ static void cfiscsi_ioctl_port_create(struct ctl_req *req) { struct cfiscsi_target *ct; struct ctl_port *port; const char *target, *alias, *tag; struct scsi_vpd_id_descriptor *desc; ctl_options_t opts; int retval, len, idlen; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); alias = ctl_get_opt(&opts, "cfiscsi_target_alias"); tag = ctl_get_opt(&opts, "cfiscsi_portal_group_tag"); if (target == NULL || tag == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); ctl_free_opts(&opts); return; } ct = cfiscsi_target_find_or_create(&cfiscsi_softc, target, alias); if (ct == NULL) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "failed to create target \"%s\"", target); ctl_free_opts(&opts); return; } if (ct->ct_state == CFISCSI_TARGET_STATE_ACTIVE) { req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" already exists", target); cfiscsi_target_release(ct); ctl_free_opts(&opts); return; } port = &ct->ct_port; if (ct->ct_state == CFISCSI_TARGET_STATE_DYING) goto done; port->frontend = &cfiscsi_frontend; port->port_type = CTL_PORT_ISCSI; /* XXX KDM what should the real number be here? */ port->num_requested_ctl_io = 4096; port->port_name = "iscsi"; port->physical_port = strtoul(tag, NULL, 0); port->virtual_port = ct->ct_target_id; port->port_online = cfiscsi_online; port->port_offline = cfiscsi_offline; port->port_info = cfiscsi_info; port->onoff_arg = ct; port->lun_enable = cfiscsi_lun_enable; port->lun_disable = cfiscsi_lun_disable; - port->lun_map = cfiscsi_lun_map; port->targ_lun_arg = ct; port->fe_datamove = cfiscsi_datamove; port->fe_done = cfiscsi_done; /* XXX KDM what should we report here? */ /* XXX These should probably be fetched from CTL. */ port->max_targets = 1; port->max_target_id = 15; port->options = opts; STAILQ_INIT(&opts); /* Generate Port ID. */ idlen = strlen(target) + strlen(",t,0x0001") + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->port_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->port_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->port_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; snprintf(desc->identifier, idlen, "%s,t,0x%4.4x", target, port->physical_port); /* Generate Target ID. */ idlen = strlen(target) + 1; idlen = roundup2(idlen, 4); len = sizeof(struct scsi_vpd_device_id) + idlen; port->target_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->target_devid->len = len; desc = (struct scsi_vpd_id_descriptor *)port->target_devid->data; desc->proto_codeset = (SCSI_PROTO_ISCSI << 4) | SVPD_ID_CODESET_UTF8; desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_TARGET | SVPD_ID_TYPE_SCSI_NAME; desc->length = idlen; strlcpy(desc->identifier, target, idlen); retval = ctl_port_register(port); if (retval != 0) { ctl_free_opts(&port->options); cfiscsi_target_release(ct); free(port->port_devid, M_CFISCSI); free(port->target_devid, M_CFISCSI); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), - "ctl_frontend_register() failed with error %d", retval); + "ctl_port_register() failed with error %d", retval); return; } done: ct->ct_state = CFISCSI_TARGET_STATE_ACTIVE; req->status = CTL_LUN_OK; memcpy(req->kern_args[0].kvalue, &port->targ_port, sizeof(port->targ_port)); //XXX } static void cfiscsi_ioctl_port_remove(struct ctl_req *req) { struct cfiscsi_target *ct; const char *target; ctl_options_t opts; ctl_init_opts(&opts, req->num_args, req->kern_args); target = ctl_get_opt(&opts, "cfiscsi_target"); if (target == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Missing required argument"); return; } ct = cfiscsi_target_find(&cfiscsi_softc, target); if (ct == NULL) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "can't find target \"%s\"", target); return; } if (ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) { ctl_free_opts(&opts); req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "target \"%s\" is already dying", target); return; } ctl_free_opts(&opts); ct->ct_state = CFISCSI_TARGET_STATE_DYING; ctl_port_offline(&ct->ct_port); cfiscsi_target_release(ct); cfiscsi_target_release(ct); } static int cfiscsi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { struct ctl_iscsi *ci; struct ctl_req *req; if (cmd == CTL_PORT_REQ) { req = (struct ctl_req *)addr; switch (req->reqtype) { case CTL_REQ_CREATE: cfiscsi_ioctl_port_create(req); break; case CTL_REQ_REMOVE: cfiscsi_ioctl_port_remove(req); break; default: req->status = CTL_LUN_ERROR; snprintf(req->error_str, sizeof(req->error_str), "Unsupported request type %d", req->reqtype); } return (0); } if (cmd != CTL_ISCSI) return (ENOTTY); ci = (struct ctl_iscsi *)addr; switch (ci->type) { case CTL_ISCSI_HANDOFF: cfiscsi_ioctl_handoff(ci); break; case CTL_ISCSI_LIST: cfiscsi_ioctl_list(ci); break; case CTL_ISCSI_TERMINATE: cfiscsi_ioctl_terminate(ci); break; case CTL_ISCSI_LOGOUT: cfiscsi_ioctl_logout(ci); break; #ifdef ICL_KERNEL_PROXY case CTL_ISCSI_LISTEN: cfiscsi_ioctl_listen(ci); break; case CTL_ISCSI_ACCEPT: cfiscsi_ioctl_accept(ci); break; case CTL_ISCSI_SEND: cfiscsi_ioctl_send(ci); break; case CTL_ISCSI_RECEIVE: cfiscsi_ioctl_receive(ci); break; #else case CTL_ISCSI_LISTEN: case CTL_ISCSI_ACCEPT: case CTL_ISCSI_SEND: case CTL_ISCSI_RECEIVE: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: CTL compiled without ICL_KERNEL_PROXY", __func__); break; #endif /* !ICL_KERNEL_PROXY */ default: ci->status = CTL_ISCSI_ERROR; snprintf(ci->error_str, sizeof(ci->error_str), "%s: invalid iSCSI request type %d", __func__, ci->type); break; } return (0); } static void cfiscsi_target_hold(struct cfiscsi_target *ct) { refcount_acquire(&ct->ct_refcount); } static void cfiscsi_target_release(struct cfiscsi_target *ct) { struct cfiscsi_softc *softc; softc = ct->ct_softc; mtx_lock(&softc->lock); if (refcount_release(&ct->ct_refcount)) { TAILQ_REMOVE(&softc->targets, ct, ct_next); mtx_unlock(&softc->lock); if (ct->ct_state != CFISCSI_TARGET_STATE_INVALID) { ct->ct_state = CFISCSI_TARGET_STATE_INVALID; if (ctl_port_deregister(&ct->ct_port) != 0) printf("%s: ctl_port_deregister() failed\n", __func__); } free(ct, M_CFISCSI); return; } mtx_unlock(&softc->lock); } static struct cfiscsi_target * cfiscsi_target_find(struct cfiscsi_softc *softc, const char *name) { struct cfiscsi_target *ct; mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (strcmp(name, ct->ct_name) != 0 || ct->ct_state != CFISCSI_TARGET_STATE_ACTIVE) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); return (ct); } mtx_unlock(&softc->lock); return (NULL); } static struct cfiscsi_target * cfiscsi_target_find_or_create(struct cfiscsi_softc *softc, const char *name, const char *alias) { struct cfiscsi_target *ct, *newct; - int i; if (name[0] == '\0' || strlen(name) >= CTL_ISCSI_NAME_LEN) return (NULL); newct = malloc(sizeof(*newct), M_CFISCSI, M_WAITOK | M_ZERO); mtx_lock(&softc->lock); TAILQ_FOREACH(ct, &softc->targets, ct_next) { if (strcmp(name, ct->ct_name) != 0 || ct->ct_state == CFISCSI_TARGET_STATE_INVALID) continue; cfiscsi_target_hold(ct); mtx_unlock(&softc->lock); free(newct, M_CFISCSI); return (ct); } - for (i = 0; i < CTL_MAX_LUNS; i++) - newct->ct_luns[i] = UINT32_MAX; - strlcpy(newct->ct_name, name, sizeof(newct->ct_name)); if (alias != NULL) strlcpy(newct->ct_alias, alias, sizeof(newct->ct_alias)); refcount_init(&newct->ct_refcount, 1); newct->ct_softc = softc; if (TAILQ_EMPTY(&softc->targets)) softc->last_target_id = 0; newct->ct_target_id = ++softc->last_target_id; TAILQ_INSERT_TAIL(&softc->targets, newct, ct_next); mtx_unlock(&softc->lock); return (newct); } -/* - * Takes LUN from the target space and returns LUN from the CTL space. - */ -static uint32_t -cfiscsi_lun_map(void *arg, uint32_t lun) -{ - struct cfiscsi_target *ct = arg; - - if (lun >= CTL_MAX_LUNS) { - CFISCSI_DEBUG("requested lun number %d is higher " - "than maximum %d", lun, CTL_MAX_LUNS - 1); - return (UINT32_MAX); - } - return (ct->ct_luns[lun]); -} - static int -cfiscsi_target_set_lun(struct cfiscsi_target *ct, - unsigned long lun_id, unsigned long ctl_lun_id) -{ - - if (lun_id >= CTL_MAX_LUNS) { - CFISCSI_WARN("requested lun number %ld is higher " - "than maximum %d", lun_id, CTL_MAX_LUNS - 1); - return (-1); - } - - if (ct->ct_luns[lun_id] < CTL_MAX_LUNS) { - /* - * CTL calls cfiscsi_lun_enable() twice for each LUN - once - * when the LUN is created, and a second time just before - * the port is brought online; don't emit warnings - * for that case. - */ - if (ct->ct_luns[lun_id] == ctl_lun_id) - return (0); - CFISCSI_WARN("lun %ld already allocated", lun_id); - return (-1); - } - -#if 0 - CFISCSI_DEBUG("adding mapping for lun %ld, target %s " - "to ctl lun %ld", lun_id, ct->ct_name, ctl_lun_id); -#endif - - ct->ct_luns[lun_id] = ctl_lun_id; - - return (0); -} - -static int cfiscsi_lun_enable(void *arg, struct ctl_id target_id, int lun_id) { - struct cfiscsi_softc *softc; - struct cfiscsi_target *ct; - const char *target = NULL; - const char *lun = NULL; - unsigned long tmp; - ct = (struct cfiscsi_target *)arg; - softc = ct->ct_softc; - - target = ctl_get_opt(&control_softc->ctl_luns[lun_id]->be_lun->options, - "cfiscsi_target"); - lun = ctl_get_opt(&control_softc->ctl_luns[lun_id]->be_lun->options, - "cfiscsi_lun"); - - if (target == NULL && lun == NULL) - return (0); - - if (target == NULL || lun == NULL) { - CFISCSI_WARN("lun added with cfiscsi_target, but without " - "cfiscsi_lun, or the other way around; ignoring"); - return (0); - } - - if (strcmp(target, ct->ct_name) != 0) - return (0); - - tmp = strtoul(lun, NULL, 10); - cfiscsi_target_set_lun(ct, tmp, lun_id); return (0); } static int cfiscsi_lun_disable(void *arg, struct ctl_id target_id, int lun_id) { - struct cfiscsi_softc *softc; - struct cfiscsi_target *ct; - int i; - ct = (struct cfiscsi_target *)arg; - softc = ct->ct_softc; - - mtx_lock(&softc->lock); - for (i = 0; i < CTL_MAX_LUNS; i++) { - if (ct->ct_luns[i] != lun_id) - continue; - ct->ct_luns[i] = UINT32_MAX; - break; - } - mtx_unlock(&softc->lock); return (0); } static void cfiscsi_datamove_in(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_data_in *bhsdi; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; size_t len, expected_len, sg_len, buffer_offset; const char *sg_addr; int ctl_sg_count, error, i; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; ctl_sg_count = io->scsiio.kern_sg_entries; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; ctl_sg_count = 1; } /* * This is the total amount of data to be transferred within the current * SCSI command. We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * This is the offset within the current SCSI command; for the first * call to cfiscsi_datamove() it will be 0, and for subsequent ones * it will be the sum of lengths of previous ones. */ buffer_offset = io->scsiio.kern_rel_offset; /* * This is the transfer length expected by the initiator. In theory, * it could be different from the correct amount of data from the SCSI * point of view, even if that doesn't make any sense. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); #if 0 if (expected_len != io->scsiio.kern_total_len) { CFISCSI_SESSION_DEBUG(cs, "expected transfer length %zd, " "actual length %zd", expected_len, (size_t)io->scsiio.kern_total_len); } #endif if (buffer_offset >= expected_len) { #if 0 CFISCSI_SESSION_DEBUG(cs, "buffer_offset = %zd, " "already sent the expected len", buffer_offset); #endif io->scsiio.be_move_done(io); return; } i = 0; sg_addr = NULL; sg_len = 0; response = NULL; bhsdi = NULL; for (;;) { if (response == NULL) { response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } bhsdi = (struct iscsi_bhs_data_in *)response->ip_bhs; bhsdi->bhsdi_opcode = ISCSI_BHS_OPCODE_SCSI_DATA_IN; bhsdi->bhsdi_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsdi->bhsdi_datasn = htonl(PDU_EXPDATASN(request)); PDU_EXPDATASN(request)++; bhsdi->bhsdi_buffer_offset = htonl(buffer_offset); } KASSERT(i < ctl_sg_count, ("i >= ctl_sg_count")); if (sg_len == 0) { sg_addr = ctl_sglist[i].addr; sg_len = ctl_sglist[i].len; KASSERT(sg_len > 0, ("sg_len <= 0")); } len = sg_len; /* * Truncate to maximum data segment length. */ KASSERT(response->ip_data_len < cs->cs_max_data_segment_length, ("ip_data_len %zd >= max_data_segment_length %zd", response->ip_data_len, cs->cs_max_data_segment_length)); if (response->ip_data_len + len > cs->cs_max_data_segment_length) { len = cs->cs_max_data_segment_length - response->ip_data_len; KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } /* * Truncate to expected data transfer length. */ KASSERT(buffer_offset + response->ip_data_len < expected_len, ("buffer_offset %zd + ip_data_len %zd >= expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len + len > expected_len) { CFISCSI_SESSION_DEBUG(cs, "truncating from %zd " "to expected data transfer length %zd", buffer_offset + response->ip_data_len + len, expected_len); len = expected_len - (buffer_offset + response->ip_data_len); KASSERT(len <= sg_len, ("len %zd > sg_len %zd", len, sg_len)); } error = icl_pdu_append_data(response, sg_addr, len, M_NOWAIT); if (error != 0) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); icl_pdu_free(response); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } sg_addr += len; sg_len -= len; KASSERT(buffer_offset + response->ip_data_len <= expected_len, ("buffer_offset %zd + ip_data_len %zd > expected_len %zd", buffer_offset, response->ip_data_len, expected_len)); if (buffer_offset + response->ip_data_len == expected_len) { /* * Already have the amount of data the initiator wanted. */ break; } if (sg_len == 0) { /* * End of scatter-gather segment; * proceed to the next one... */ if (i == ctl_sg_count - 1) { /* * ... unless this was the last one. */ break; } i++; } if (response->ip_data_len == cs->cs_max_data_segment_length) { /* * Can't stuff more data into the current PDU; * queue it. Note that's not enough to check * for kern_data_resid == 0 instead; there * may be several Data-In PDUs for the final * call to cfiscsi_datamove(), and we want * to set the F flag only on the last of them. */ buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { buffer_offset -= response->ip_data_len; break; } cfiscsi_pdu_queue(response); response = NULL; bhsdi = NULL; } } if (response != NULL) { buffer_offset += response->ip_data_len; if (buffer_offset == io->scsiio.kern_total_len || buffer_offset == expected_len) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_F; if (io->io_hdr.status == CTL_SUCCESS) { bhsdi->bhsdi_flags |= BHSDI_FLAGS_S; if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhsdi->bhsdi_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhsdi->bhsdi_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhsdi->bhsdi_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); } bhsdi->bhsdi_status = io->scsiio.scsi_status; io->io_hdr.flags |= CTL_FLAG_STATUS_SENT; } } KASSERT(response->ip_data_len > 0, ("sending empty Data-In")); cfiscsi_pdu_queue(response); } io->scsiio.be_move_done(io); } static void cfiscsi_datamove_out(union ctl_io *io) { struct cfiscsi_session *cs; struct icl_pdu *request, *response; const struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_r2t *bhsr2t; struct cfiscsi_data_wait *cdw; struct ctl_sg_entry ctl_sg_entry, *ctl_sglist; uint32_t expected_len, r2t_off, r2t_len; uint32_t target_transfer_tag; bool done; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (const struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("bhssc->bhssc_opcode != ISCSI_BHS_OPCODE_SCSI_COMMAND")); /* * We need to record it so that we can properly report * underflow/underflow. */ PDU_TOTAL_TRANSFER_LEN(request) = io->scsiio.kern_total_len; /* * Report write underflow as error since CTL and backends don't * really support it, and SCSI does not tell how to do it right. */ expected_len = ntohl(bhssc->bhssc_expected_data_transfer_length); if (io->scsiio.kern_rel_offset + io->scsiio.kern_data_len > expected_len) { io->scsiio.io_hdr.port_status = 43; io->scsiio.be_move_done(io); return; } target_transfer_tag = atomic_fetchadd_32(&cs->cs_target_transfer_tag, 1); #if 0 CFISCSI_SESSION_DEBUG(cs, "expecting Data-Out with initiator " "task tag 0x%x, target transfer tag 0x%x", bhssc->bhssc_initiator_task_tag, target_transfer_tag); #endif cdw = uma_zalloc(cfiscsi_data_wait_zone, M_NOWAIT | M_ZERO); if (cdw == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } cdw->cdw_ctl_io = io; cdw->cdw_target_transfer_tag = target_transfer_tag; cdw->cdw_initiator_task_tag = bhssc->bhssc_initiator_task_tag; cdw->cdw_r2t_end = io->scsiio.kern_data_len; cdw->cdw_datasn = 0; /* Set initial data pointer for the CDW respecting ext_data_filled. */ if (io->scsiio.kern_sg_entries > 0) { ctl_sglist = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; } else { ctl_sglist = &ctl_sg_entry; ctl_sglist->addr = io->scsiio.kern_data_ptr; ctl_sglist->len = io->scsiio.kern_data_len; } cdw->cdw_sg_index = 0; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; r2t_off = io->scsiio.ext_data_filled; while (r2t_off > 0) { if (r2t_off >= cdw->cdw_sg_len) { r2t_off -= cdw->cdw_sg_len; cdw->cdw_sg_index++; cdw->cdw_sg_addr = ctl_sglist[cdw->cdw_sg_index].addr; cdw->cdw_sg_len = ctl_sglist[cdw->cdw_sg_index].len; continue; } cdw->cdw_sg_addr += r2t_off; cdw->cdw_sg_len -= r2t_off; r2t_off = 0; } if (cs->cs_immediate_data && io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled < icl_pdu_data_segment_length(request)) { done = cfiscsi_handle_data_segment(request, cdw); if (done) { uma_zfree(cfiscsi_data_wait_zone, cdw); io->scsiio.be_move_done(io); return; } } r2t_off = io->scsiio.kern_rel_offset + io->scsiio.ext_data_filled; r2t_len = MIN(io->scsiio.kern_data_len - io->scsiio.ext_data_filled, cs->cs_max_burst_length); cdw->cdw_r2t_end = io->scsiio.ext_data_filled + r2t_len; CFISCSI_SESSION_LOCK(cs); TAILQ_INSERT_TAIL(&cs->cs_waiting_for_data_out, cdw, cdw_next); CFISCSI_SESSION_UNLOCK(cs); /* * XXX: We should limit the number of outstanding R2T PDUs * per task to MaxOutstandingR2T. */ response = cfiscsi_pdu_new_response(request, M_NOWAIT); if (response == NULL) { CFISCSI_SESSION_WARN(cs, "failed to " "allocate memory; dropping connection"); ctl_set_busy(&io->scsiio); io->scsiio.be_move_done(io); cfiscsi_session_terminate(cs); return; } bhsr2t = (struct iscsi_bhs_r2t *)response->ip_bhs; bhsr2t->bhsr2t_opcode = ISCSI_BHS_OPCODE_R2T; bhsr2t->bhsr2t_flags = 0x80; bhsr2t->bhsr2t_lun = bhssc->bhssc_lun; bhsr2t->bhsr2t_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhsr2t->bhsr2t_target_transfer_tag = target_transfer_tag; /* * XXX: Here we assume that cfiscsi_datamove() won't ever * be running concurrently on several CPUs for a given * command. */ bhsr2t->bhsr2t_r2tsn = htonl(PDU_R2TSN(request)); PDU_R2TSN(request)++; /* * This is the offset within the current SCSI command; * i.e. for the first call of datamove(), it will be 0, * and for subsequent ones it will be the sum of lengths * of previous ones. * * The ext_data_filled is to account for unsolicited * (immediate) data that might have already arrived. */ bhsr2t->bhsr2t_buffer_offset = htonl(r2t_off); /* * This is the total length (sum of S/G lengths) this call * to cfiscsi_datamove() is supposed to handle, limited by * MaxBurstLength. */ bhsr2t->bhsr2t_desired_data_transfer_length = htonl(r2t_len); cfiscsi_pdu_queue(response); } static void cfiscsi_datamove(union ctl_io *io) { if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) cfiscsi_datamove_in(io); else { /* We hadn't received anything during this datamove yet. */ io->scsiio.ext_data_filled = 0; cfiscsi_datamove_out(io); } } static void cfiscsi_scsi_command_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_scsi_command *bhssc; struct iscsi_bhs_scsi_response *bhssr; #ifdef DIAGNOSTIC struct cfiscsi_data_wait *cdw; #endif struct cfiscsi_session *cs; uint16_t sense_length; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhssc = (struct iscsi_bhs_scsi_command *)request->ip_bhs; KASSERT((bhssc->bhssc_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_SCSI_COMMAND, ("replying to wrong opcode 0x%x", bhssc->bhssc_opcode)); //CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x", // bhssc->bhssc_initiator_task_tag); #ifdef DIAGNOSTIC CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH(cdw, &cs->cs_waiting_for_data_out, cdw_next) KASSERT(bhssc->bhssc_initiator_task_tag != cdw->cdw_initiator_task_tag, ("dangling cdw")); CFISCSI_SESSION_UNLOCK(cs); #endif /* * Do not return status for aborted commands. * There are exceptions, but none supported by CTL yet. */ if (((io->io_hdr.flags & CTL_FLAG_ABORT) && (io->io_hdr.flags & CTL_FLAG_ABORT_STATUS) == 0) || (io->io_hdr.flags & CTL_FLAG_STATUS_SENT)) { ctl_free_io(io); icl_pdu_free(request); return; } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhssr = (struct iscsi_bhs_scsi_response *)response->ip_bhs; bhssr->bhssr_opcode = ISCSI_BHS_OPCODE_SCSI_RESPONSE; bhssr->bhssr_flags = 0x80; /* * XXX: We don't deal with bidirectional under/overflows; * does anything actually support those? */ if (PDU_TOTAL_TRANSFER_LEN(request) < ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_UNDERFLOW; bhssr->bhssr_residual_count = htonl(ntohl(bhssc->bhssc_expected_data_transfer_length) - PDU_TOTAL_TRANSFER_LEN(request)); //CFISCSI_SESSION_DEBUG(cs, "underflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } else if (PDU_TOTAL_TRANSFER_LEN(request) > ntohl(bhssc->bhssc_expected_data_transfer_length)) { bhssr->bhssr_flags |= BHSSR_FLAGS_RESIDUAL_OVERFLOW; bhssr->bhssr_residual_count = htonl(PDU_TOTAL_TRANSFER_LEN(request) - ntohl(bhssc->bhssc_expected_data_transfer_length)); //CFISCSI_SESSION_DEBUG(cs, "overflow; residual count %d", // ntohl(bhssr->bhssr_residual_count)); } bhssr->bhssr_response = BHSSR_RESPONSE_COMMAND_COMPLETED; bhssr->bhssr_status = io->scsiio.scsi_status; bhssr->bhssr_initiator_task_tag = bhssc->bhssc_initiator_task_tag; bhssr->bhssr_expdatasn = htonl(PDU_EXPDATASN(request)); if (io->scsiio.sense_len > 0) { #if 0 CFISCSI_SESSION_DEBUG(cs, "returning %d bytes of sense data", io->scsiio.sense_len); #endif sense_length = htons(io->scsiio.sense_len); icl_pdu_append_data(response, &sense_length, sizeof(sense_length), M_WAITOK); icl_pdu_append_data(response, &io->scsiio.sense_data, io->scsiio.sense_len, M_WAITOK); } ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_task_management_done(union ctl_io *io) { struct icl_pdu *request, *response; struct iscsi_bhs_task_management_request *bhstmr; struct iscsi_bhs_task_management_response *bhstmr2; struct cfiscsi_data_wait *cdw, *tmpcdw; struct cfiscsi_session *cs; request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); bhstmr = (struct iscsi_bhs_task_management_request *)request->ip_bhs; KASSERT((bhstmr->bhstmr_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) == ISCSI_BHS_OPCODE_TASK_REQUEST, ("replying to wrong opcode 0x%x", bhstmr->bhstmr_opcode)); #if 0 CFISCSI_SESSION_DEBUG(cs, "initiator task tag 0x%x; referenced task tag 0x%x", bhstmr->bhstmr_initiator_task_tag, bhstmr->bhstmr_referenced_task_tag); #endif if ((bhstmr->bhstmr_function & ~0x80) == BHSTMR_FUNCTION_ABORT_TASK) { /* * Make sure we no longer wait for Data-Out for this command. */ CFISCSI_SESSION_LOCK(cs); TAILQ_FOREACH_SAFE(cdw, &cs->cs_waiting_for_data_out, cdw_next, tmpcdw) { if (bhstmr->bhstmr_referenced_task_tag != cdw->cdw_initiator_task_tag) continue; #if 0 CFISCSI_SESSION_DEBUG(cs, "removing csw for initiator task " "tag 0x%x", bhstmr->bhstmr_initiator_task_tag); #endif TAILQ_REMOVE(&cs->cs_waiting_for_data_out, cdw, cdw_next); cdw->cdw_ctl_io->scsiio.be_move_done(cdw->cdw_ctl_io); uma_zfree(cfiscsi_data_wait_zone, cdw); } CFISCSI_SESSION_UNLOCK(cs); } response = cfiscsi_pdu_new_response(request, M_WAITOK); bhstmr2 = (struct iscsi_bhs_task_management_response *) response->ip_bhs; bhstmr2->bhstmr_opcode = ISCSI_BHS_OPCODE_TASK_RESPONSE; bhstmr2->bhstmr_flags = 0x80; if (io->io_hdr.status == CTL_SUCCESS) { bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_COMPLETE; } else { /* * XXX: How to figure out what exactly went wrong? iSCSI spec * expects us to provide detailed error, e.g. "Task does * not exist" or "LUN does not exist". */ CFISCSI_SESSION_DEBUG(cs, "BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED"); bhstmr2->bhstmr_response = BHSTMR_RESPONSE_FUNCTION_NOT_SUPPORTED; } bhstmr2->bhstmr_initiator_task_tag = bhstmr->bhstmr_initiator_task_tag; ctl_free_io(io); icl_pdu_free(request); cfiscsi_pdu_queue(response); } static void cfiscsi_done(union ctl_io *io) { struct icl_pdu *request; struct cfiscsi_session *cs; KASSERT(((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE), ("invalid CTL status %#x", io->io_hdr.status)); if (io->io_hdr.io_type == CTL_IO_TASK && io->taskio.task_action == CTL_TASK_I_T_NEXUS_RESET) { /* * Implicit task termination has just completed; nothing to do. */ cs = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs->cs_tasks_aborted = true; refcount_release(&cs->cs_outstanding_ctl_pdus); wakeup(__DEVOLATILE(void *, &cs->cs_outstanding_ctl_pdus)); ctl_free_io(io); return; } request = io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; cs = PDU_SESSION(request); refcount_release(&cs->cs_outstanding_ctl_pdus); switch (request->ip_bhs->bhs_opcode & ~ISCSI_BHS_OPCODE_IMMEDIATE) { case ISCSI_BHS_OPCODE_SCSI_COMMAND: cfiscsi_scsi_command_done(io); break; case ISCSI_BHS_OPCODE_TASK_REQUEST: cfiscsi_task_management_done(io); break; default: panic("cfiscsi_done called with wrong opcode 0x%x", request->ip_bhs->bhs_opcode); } } Index: stable/10/sys/cam/ctl/ctl_frontend_iscsi.h =================================================================== --- stable/10/sys/cam/ctl/ctl_frontend_iscsi.h (revision 279001) +++ stable/10/sys/cam/ctl/ctl_frontend_iscsi.h (revision 279002) @@ -1,126 +1,125 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CTL_FRONTEND_ISCSI_H #define CTL_FRONTEND_ISCSI_H #define CFISCSI_TARGET_STATE_INVALID 0 #define CFISCSI_TARGET_STATE_ACTIVE 1 #define CFISCSI_TARGET_STATE_DYING 2 struct cfiscsi_target { TAILQ_ENTRY(cfiscsi_target) ct_next; - uint32_t ct_luns[CTL_MAX_LUNS]; struct cfiscsi_softc *ct_softc; volatile u_int ct_refcount; char ct_name[CTL_ISCSI_NAME_LEN]; char ct_alias[CTL_ISCSI_ALIAS_LEN]; int ct_state; int ct_online; int ct_target_id; struct ctl_port ct_port; }; struct cfiscsi_data_wait { TAILQ_ENTRY(cfiscsi_data_wait) cdw_next; union ctl_io *cdw_ctl_io; uint32_t cdw_target_transfer_tag; uint32_t cdw_initiator_task_tag; int cdw_sg_index; char *cdw_sg_addr; size_t cdw_sg_len; uint32_t cdw_r2t_end; uint32_t cdw_datasn; }; #define CFISCSI_SESSION_STATE_INVALID 0 #define CFISCSI_SESSION_STATE_BHS 1 #define CFISCSI_SESSION_STATE_AHS 2 #define CFISCSI_SESSION_STATE_HEADER_DIGEST 3 #define CFISCSI_SESSION_STATE_DATA 4 #define CFISCSI_SESSION_STATE_DATA_DIGEST 5 struct cfiscsi_session { TAILQ_ENTRY(cfiscsi_session) cs_next; struct mtx cs_lock; struct icl_conn *cs_conn; uint32_t cs_cmdsn; uint32_t cs_statsn; uint32_t cs_target_transfer_tag; volatile u_int cs_outstanding_ctl_pdus; TAILQ_HEAD(, cfiscsi_data_wait) cs_waiting_for_data_out; struct cfiscsi_target *cs_target; struct callout cs_callout; int cs_timeout; int cs_portal_group_tag; struct cv cs_maintenance_cv; bool cs_terminating; bool cs_tasks_aborted; size_t cs_max_data_segment_length; size_t cs_max_burst_length; bool cs_immediate_data; char cs_initiator_name[CTL_ISCSI_NAME_LEN]; char cs_initiator_addr[CTL_ISCSI_ADDR_LEN]; char cs_initiator_alias[CTL_ISCSI_ALIAS_LEN]; char cs_initiator_isid[6]; char cs_initiator_id[CTL_ISCSI_NAME_LEN + 5 + 6 + 1]; unsigned int cs_id; int cs_ctl_initid; #ifdef ICL_KERNEL_PROXY struct sockaddr *cs_initiator_sa; int cs_portal_id; bool cs_login_phase; bool cs_waiting_for_ctld; struct cv cs_login_cv; struct icl_pdu *cs_login_pdu; #endif }; #ifdef ICL_KERNEL_PROXY struct icl_listen; #endif struct cfiscsi_softc { struct mtx lock; char port_name[32]; int online; int last_target_id; unsigned int last_session_id; TAILQ_HEAD(, cfiscsi_target) targets; TAILQ_HEAD(, cfiscsi_session) sessions; struct cv sessions_cv; #ifdef ICL_KERNEL_PROXY struct icl_listen *listener; struct cv accept_cv; #endif }; #endif /* !CTL_FRONTEND_ISCSI_H */ Index: stable/10/sys/cam/ctl/ctl_ioctl.h =================================================================== --- stable/10/sys/cam/ctl/ctl_ioctl.h (revision 279001) +++ stable/10/sys/cam/ctl/ctl_ioctl.h (revision 279002) @@ -1,840 +1,847 @@ /*- * Copyright (c) 2003 Silicon Graphics International Corp. * Copyright (c) 2011 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_ioctl.h#4 $ * $FreeBSD$ */ /* * CAM Target Layer ioctl interface. * * Author: Ken Merry */ #ifndef _CTL_IOCTL_H_ #define _CTL_IOCTL_H_ #ifdef ICL_KERNEL_PROXY #include #endif #include #define CTL_DEFAULT_DEV "/dev/cam/ctl" /* * Maximum number of targets we support. */ #define CTL_MAX_TARGETS 1 /* * Maximum target ID we support. */ #define CTL_MAX_TARGID 15 /* * Maximum number of LUNs we support at the moment. MUST be a power of 2. */ #define CTL_MAX_LUNS 1024 /* * Maximum number of initiators per port. */ #define CTL_MAX_INIT_PER_PORT 2048 /* * Maximum number of ports registered at one time. */ #define CTL_MAX_PORTS 256 /* * Maximum number of initiators we support. */ #define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * CTL_MAX_PORTS) /* Hopefully this won't conflict with new misc devices that pop up */ #define CTL_MINOR 225 typedef enum { CTL_OOA_INVALID_LUN, CTL_OOA_SUCCESS } ctl_ooa_status; struct ctl_ooa_info { uint32_t target_id; /* Passed in to CTL */ uint32_t lun_id; /* Passed in to CTL */ uint32_t num_entries; /* Returned from CTL */ ctl_ooa_status status; /* Returned from CTL */ }; struct ctl_hard_startstop_info { cfi_mt_status status; int total_luns; int luns_complete; int luns_failed; }; struct ctl_bbrread_info { int lun_num; /* Passed in to CTL */ uint64_t lba; /* Passed in to CTL */ int len; /* Passed in to CTL */ cfi_mt_status status; /* Returned from CTL */ cfi_bbrread_status bbr_status; /* Returned from CTL */ uint8_t scsi_status; /* Returned from CTL */ struct scsi_sense_data sense_data; /* Returned from CTL */ }; typedef enum { CTL_DELAY_TYPE_NONE, CTL_DELAY_TYPE_CONT, CTL_DELAY_TYPE_ONESHOT } ctl_delay_type; typedef enum { CTL_DELAY_LOC_NONE, CTL_DELAY_LOC_DATAMOVE, CTL_DELAY_LOC_DONE, } ctl_delay_location; typedef enum { CTL_DELAY_STATUS_NONE, CTL_DELAY_STATUS_OK, CTL_DELAY_STATUS_INVALID_LUN, CTL_DELAY_STATUS_INVALID_TYPE, CTL_DELAY_STATUS_INVALID_LOC, CTL_DELAY_STATUS_NOT_IMPLEMENTED } ctl_delay_status; struct ctl_io_delay_info { uint32_t target_id; uint32_t lun_id; ctl_delay_type delay_type; ctl_delay_location delay_loc; uint32_t delay_secs; ctl_delay_status status; }; typedef enum { CTL_GS_SYNC_NONE, CTL_GS_SYNC_OK, CTL_GS_SYNC_NO_LUN } ctl_gs_sync_status; /* * The target and LUN id specify which device to modify. The sync interval * means that we will let through every N SYNCHRONIZE CACHE commands. */ struct ctl_sync_info { uint32_t target_id; /* passed to kernel */ uint32_t lun_id; /* passed to kernel */ int sync_interval; /* depends on whether get/set */ ctl_gs_sync_status status; /* passed from kernel */ }; typedef enum { CTL_STATS_NO_IO, CTL_STATS_READ, CTL_STATS_WRITE } ctl_stat_types; #define CTL_STATS_NUM_TYPES 3 typedef enum { CTL_LUN_STATS_NO_BLOCKSIZE = 0x01 } ctl_lun_stats_flags; struct ctl_lun_io_port_stats { uint32_t targ_port; uint64_t bytes[CTL_STATS_NUM_TYPES]; uint64_t operations[CTL_STATS_NUM_TYPES]; struct bintime time[CTL_STATS_NUM_TYPES]; uint64_t num_dmas[CTL_STATS_NUM_TYPES]; struct bintime dma_time[CTL_STATS_NUM_TYPES]; }; struct ctl_lun_io_stats { uint8_t device_type; uint64_t lun_number; uint32_t blocksize; ctl_lun_stats_flags flags; struct ctl_lun_io_port_stats ports[CTL_MAX_PORTS]; }; typedef enum { CTL_SS_OK, CTL_SS_NEED_MORE_SPACE, CTL_SS_ERROR } ctl_stats_status; typedef enum { CTL_STATS_FLAG_NONE = 0x00, CTL_STATS_FLAG_TIME_VALID = 0x01 } ctl_stats_flags; struct ctl_stats { int alloc_len; /* passed to kernel */ struct ctl_lun_io_stats *lun_stats; /* passed to/from kernel */ int fill_len; /* passed to userland */ int num_luns; /* passed to userland */ ctl_stats_status status; /* passed to userland */ ctl_stats_flags flags; /* passed to userland */ struct timespec timestamp; /* passed to userland */ }; /* * The types of errors that can be injected: * * NONE: No error specified. * ABORTED: SSD_KEY_ABORTED_COMMAND, 0x45, 0x00 * MEDIUM_ERR: Medium error, different asc/ascq depending on read/write. * UA: Unit attention. * CUSTOM: User specifies the sense data. * TYPE: Mask to use with error types. * * Flags that affect injection behavior: * CONTINUOUS: This error will stay around until explicitly cleared. * DESCRIPTOR: Use descriptor sense instead of fixed sense. */ typedef enum { CTL_LUN_INJ_NONE = 0x000, CTL_LUN_INJ_ABORTED = 0x001, CTL_LUN_INJ_MEDIUM_ERR = 0x002, CTL_LUN_INJ_UA = 0x003, CTL_LUN_INJ_CUSTOM = 0x004, CTL_LUN_INJ_TYPE = 0x0ff, CTL_LUN_INJ_CONTINUOUS = 0x100, CTL_LUN_INJ_DESCRIPTOR = 0x200 } ctl_lun_error; /* * Flags to specify what type of command the given error pattern will * execute on. The first group of types can be ORed together. * * READ: Any read command. * WRITE: Any write command. * READWRITE: Any read or write command. * READCAP: Any read capacity command. * TUR: Test Unit Ready. * ANY: Any command. * MASK: Mask for basic command patterns. * * Special types: * * CMD: The CDB to act on is specified in struct ctl_error_desc_cmd. * RANGE: For read/write commands, act when the LBA is in the * specified range. */ typedef enum { CTL_LUN_PAT_NONE = 0x000, CTL_LUN_PAT_READ = 0x001, CTL_LUN_PAT_WRITE = 0x002, CTL_LUN_PAT_READWRITE = CTL_LUN_PAT_READ | CTL_LUN_PAT_WRITE, CTL_LUN_PAT_READCAP = 0x004, CTL_LUN_PAT_TUR = 0x008, CTL_LUN_PAT_ANY = 0x0ff, CTL_LUN_PAT_MASK = 0x0ff, CTL_LUN_PAT_CMD = 0x100, CTL_LUN_PAT_RANGE = 0x200 } ctl_lun_error_pattern; /* * This structure allows the user to specify a particular CDB pattern to * look for. * * cdb_pattern: Fill in the relevant bytes to look for in the CDB. * cdb_valid_bytes: Bitmask specifying valid bytes in the cdb_pattern. * flags: Specify any command flags (see ctl_io_flags) that * should be set. */ struct ctl_error_desc_cmd { uint8_t cdb_pattern[CTL_MAX_CDBLEN]; uint32_t cdb_valid_bytes; uint32_t flags; }; /* * Error injection descriptor. * * target_id: Target ID to act on. * lun_id LUN to act on. * lun_error: The type of error to inject. See above for descriptions. * error_pattern: What kind of command to act on. See above. * cmd_desc: For CTL_LUN_PAT_CMD only. * lba_range: For CTL_LUN_PAT_RANGE only. * custom_sense: Specify sense. For CTL_LUN_INJ_CUSTOM only. * serial: Serial number returned by the kernel. Use for deletion. * links: Kernel use only. */ struct ctl_error_desc { uint32_t target_id; /* To kernel */ uint32_t lun_id; /* To kernel */ ctl_lun_error lun_error; /* To kernel */ ctl_lun_error_pattern error_pattern; /* To kernel */ struct ctl_error_desc_cmd cmd_desc; /* To kernel */ struct ctl_lba_len lba_range; /* To kernel */ struct scsi_sense_data custom_sense; /* To kernel */ uint64_t serial; /* From kernel */ STAILQ_ENTRY(ctl_error_desc) links; /* Kernel use only */ }; typedef enum { CTL_OOA_FLAG_NONE = 0x00, CTL_OOA_FLAG_ALL_LUNS = 0x01 } ctl_ooa_flags; typedef enum { CTL_OOA_OK, CTL_OOA_NEED_MORE_SPACE, CTL_OOA_ERROR } ctl_get_ooa_status; typedef enum { CTL_OOACMD_FLAG_NONE = 0x00, CTL_OOACMD_FLAG_DMA = 0x01, CTL_OOACMD_FLAG_BLOCKED = 0x02, CTL_OOACMD_FLAG_ABORT = 0x04, CTL_OOACMD_FLAG_RTR = 0x08, CTL_OOACMD_FLAG_DMA_QUEUED = 0x10 } ctl_ooa_cmd_flags; struct ctl_ooa_entry { ctl_ooa_cmd_flags cmd_flags; uint8_t cdb[CTL_MAX_CDBLEN]; uint8_t cdb_len; uint32_t tag_num; uint32_t lun_num; struct bintime start_bt; }; struct ctl_ooa { ctl_ooa_flags flags; /* passed to kernel */ uint64_t lun_num; /* passed to kernel */ uint32_t alloc_len; /* passed to kernel */ uint32_t alloc_num; /* passed to kernel */ struct ctl_ooa_entry *entries; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ uint32_t fill_num; /* passed to userland */ uint32_t dropped_num; /* passed to userland */ struct bintime cur_bt; /* passed to userland */ ctl_get_ooa_status status; /* passed to userland */ }; typedef enum { CTL_PORT_LIST_NONE, CTL_PORT_LIST_OK, CTL_PORT_LIST_NEED_MORE_SPACE, CTL_PORT_LIST_ERROR } ctl_port_list_status; struct ctl_port_list { uint32_t alloc_len; /* passed to kernel */ uint32_t alloc_num; /* passed to kernel */ struct ctl_port_entry *entries; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ uint32_t fill_num; /* passed to userland */ uint32_t dropped_num; /* passed to userland */ ctl_port_list_status status; /* passed to userland */ }; typedef enum { CTL_LUN_NOSTATUS, CTL_LUN_OK, CTL_LUN_ERROR, CTL_LUN_WARNING } ctl_lun_status; #define CTL_ERROR_STR_LEN 160 #define CTL_BEARG_RD 0x01 #define CTL_BEARG_WR 0x02 #define CTL_BEARG_RW (CTL_BEARG_RD|CTL_BEARG_WR) #define CTL_BEARG_ASCII 0x04 /* * Backend Argument: * * namelen: Length of the name field, including the terminating NUL. * * name: Name of the paramter. This must be NUL-terminated. * * flags: Flags for the parameter, see above for values. * * vallen: Length of the value in bytes. * * value: Value to be set/fetched. * * kname: For kernel use only. * * kvalue: For kernel use only. */ struct ctl_be_arg { int namelen; char *name; int flags; int vallen; void *value; char *kname; void *kvalue; }; typedef enum { CTL_LUNREQ_CREATE, CTL_LUNREQ_RM, CTL_LUNREQ_MODIFY, } ctl_lunreq_type; /* * LUN creation parameters: * * flags: Various LUN flags, see ctl_backend.h for a * description of the flag values and meanings. * * device_type: The SCSI device type. e.g. 0 for Direct Access, * 3 for Processor, etc. Only certain backends may * support setting this field. The CTL_LUN_FLAG_DEV_TYPE * flag should be set in the flags field if the device * type is set. * * lun_size_bytes: The size of the LUN in bytes. For some backends * this is relevant (e.g. ramdisk), for others, it may * be ignored in favor of using the properties of the * backing store. If specified, this should be a * multiple of the blocksize. * * The actual size of the LUN is returned in this * field. * * blocksize_bytes: The LUN blocksize in bytes. For some backends this * is relevant, for others it may be ignored in * favor of using the properties of the backing store. * * The actual blocksize of the LUN is returned in this * field. * * req_lun_id: The requested LUN ID. The CTL_LUN_FLAG_ID_REQ flag * should be set if this is set. The request will be * granted if the LUN number is available, otherwise * the LUN addition request will fail. * * The allocated LUN number is returned in this field. * * serial_num: This is the value returned in SCSI INQUIRY VPD page * 0x80. If it is specified, the CTL_LUN_FLAG_SERIAL_NUM * flag should be set. * * The serial number value used is returned in this * field. * * device_id: This is the value returned in the T10 vendor ID * based DESIGNATOR field in the SCSI INQUIRY VPD page * 0x83 data. If it is specified, the CTL_LUN_FLAG_DEVID * flag should be set. * * The device id value used is returned in this field. */ struct ctl_lun_create_params { ctl_backend_lun_flags flags; uint8_t device_type; uint64_t lun_size_bytes; uint32_t blocksize_bytes; uint32_t req_lun_id; uint8_t serial_num[CTL_SN_LEN]; uint8_t device_id[CTL_DEVID_LEN]; }; /* * LUN removal parameters: * * lun_id: The number of the LUN to delete. This must be set. * The LUN must be backed by the given backend. */ struct ctl_lun_rm_params { uint32_t lun_id; }; /* * LUN modification parameters: * * lun_id: The number of the LUN to modify. This must be set. * The LUN must be backed by the given backend. * * lun_size_bytes: The size of the LUN in bytes. If zero, update * the size using the backing file size, if possible. */ struct ctl_lun_modify_params { uint32_t lun_id; uint64_t lun_size_bytes; }; /* * Union of request type data. Fill in the appropriate union member for * the request type. */ union ctl_lunreq_data { struct ctl_lun_create_params create; struct ctl_lun_rm_params rm; struct ctl_lun_modify_params modify; }; /* * LUN request interface: * * backend: This is required, and is NUL-terminated a string * that is the name of the backend, like "ramdisk" or * "block". * * reqtype: The type of request, CTL_LUNREQ_CREATE to create a * LUN, CTL_LUNREQ_RM to delete a LUN. * * reqdata: Request type-specific information. See the * description of individual the union members above * for more information. * * num_be_args: This is the number of backend-specific arguments * in the be_args array. * * be_args: This is an array of backend-specific arguments. * See above for a description of the fields in this * structure. * * status: Status of the LUN request. * * error_str: If the status is CTL_LUN_ERROR, this will * contain a string describing the error. * * kern_be_args: For kernel use only. */ struct ctl_lun_req { char backend[CTL_BE_NAME_LEN]; ctl_lunreq_type reqtype; union ctl_lunreq_data reqdata; int num_be_args; struct ctl_be_arg *be_args; ctl_lun_status status; char error_str[CTL_ERROR_STR_LEN]; struct ctl_be_arg *kern_be_args; }; /* * LUN list status: * * NONE: No status. * * OK: Request completed successfully. * * NEED_MORE_SPACE: The allocated length of the entries field is too * small for the available data. * * ERROR: An error occured, look at the error string for a * description of the error. */ typedef enum { CTL_LUN_LIST_NONE, CTL_LUN_LIST_OK, CTL_LUN_LIST_NEED_MORE_SPACE, CTL_LUN_LIST_ERROR } ctl_lun_list_status; /* * LUN list interface * * backend_name: This is a NUL-terminated string. If the string * length is 0, then all LUNs on all backends will * be enumerated. Otherwise this is the name of the * backend to be enumerated, like "ramdisk" or "block". * * alloc_len: The length of the data buffer allocated for entries. * In order to properly size the buffer, make one call * with alloc_len set to 0, and then use the returned * dropped_len as the buffer length to allocate and * pass in on a subsequent call. * * lun_xml: XML-formatted information on the requested LUNs. * * fill_len: The amount of data filled in the storage for entries. * * status: The status of the request. See above for the * description of the values of this field. * * error_str: If the status indicates an error, this string will * be filled in to describe the error. */ struct ctl_lun_list { char backend[CTL_BE_NAME_LEN]; /* passed to kernel*/ uint32_t alloc_len; /* passed to kernel */ char *lun_xml; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ ctl_lun_list_status status; /* passed to userland */ char error_str[CTL_ERROR_STR_LEN]; /* passed to userland */ }; /* * Port request interface: * * driver: This is required, and is NUL-terminated a string * that is the name of the frontend, like "iscsi" . * * reqtype: The type of request, CTL_REQ_CREATE to create a * port, CTL_REQ_REMOVE to delete a port. * * num_be_args: This is the number of frontend-specific arguments * in the be_args array. * * be_args: This is an array of frontend-specific arguments. * See above for a description of the fields in this * structure. * * status: Status of the request. * * error_str: If the status is CTL_LUN_ERROR, this will * contain a string describing the error. * * kern_be_args: For kernel use only. */ typedef enum { CTL_REQ_CREATE, CTL_REQ_REMOVE, CTL_REQ_MODIFY, } ctl_req_type; struct ctl_req { char driver[CTL_DRIVER_NAME_LEN]; ctl_req_type reqtype; int num_args; struct ctl_be_arg *args; ctl_lun_status status; char error_str[CTL_ERROR_STR_LEN]; struct ctl_be_arg *kern_args; }; /* * iSCSI status * * OK: Request completed successfully. * * ERROR: An error occured, look at the error string for a * description of the error. * * CTL_ISCSI_LIST_NEED_MORE_SPACE: * User has to pass larger buffer for CTL_ISCSI_LIST ioctl. */ typedef enum { CTL_ISCSI_OK, CTL_ISCSI_ERROR, CTL_ISCSI_LIST_NEED_MORE_SPACE, CTL_ISCSI_SESSION_NOT_FOUND } ctl_iscsi_status; typedef enum { CTL_ISCSI_HANDOFF, CTL_ISCSI_LIST, CTL_ISCSI_LOGOUT, CTL_ISCSI_TERMINATE, #if defined(ICL_KERNEL_PROXY) || 1 /* * We actually need those in all cases, but leave the ICL_KERNEL_PROXY, * to remember to remove them along with rest of proxy code, eventually. */ CTL_ISCSI_LISTEN, CTL_ISCSI_ACCEPT, CTL_ISCSI_SEND, CTL_ISCSI_RECEIVE, #endif } ctl_iscsi_type; typedef enum { CTL_ISCSI_DIGEST_NONE, CTL_ISCSI_DIGEST_CRC32C } ctl_iscsi_digest; #define CTL_ISCSI_NAME_LEN 224 /* 223 bytes, by RFC 3720, + '\0' */ #define CTL_ISCSI_ADDR_LEN 47 /* INET6_ADDRSTRLEN + '\0' */ #define CTL_ISCSI_ALIAS_LEN 128 /* Arbitrary. */ struct ctl_iscsi_handoff_params { char initiator_name[CTL_ISCSI_NAME_LEN]; char initiator_addr[CTL_ISCSI_ADDR_LEN]; char initiator_alias[CTL_ISCSI_ALIAS_LEN]; uint8_t initiator_isid[6]; char target_name[CTL_ISCSI_NAME_LEN]; int socket; int portal_group_tag; /* * Connection parameters negotiated by ctld(8). */ ctl_iscsi_digest header_digest; ctl_iscsi_digest data_digest; uint32_t cmdsn; uint32_t statsn; uint32_t max_recv_data_segment_length; uint32_t max_burst_length; uint32_t first_burst_length; uint32_t immediate_data; #ifdef ICL_KERNEL_PROXY int connection_id; int spare[3]; #else int spare[4]; #endif }; struct ctl_iscsi_list_params { uint32_t alloc_len; /* passed to kernel */ char *conn_xml; /* filled in kernel */ uint32_t fill_len; /* passed to userland */ int spare[4]; }; struct ctl_iscsi_logout_params { int connection_id; /* passed to kernel */ char initiator_name[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ char initiator_addr[CTL_ISCSI_ADDR_LEN]; /* passed to kernel */ int all; /* passed to kernel */ int spare[4]; }; struct ctl_iscsi_terminate_params { int connection_id; /* passed to kernel */ char initiator_name[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ char initiator_addr[CTL_ISCSI_NAME_LEN]; /* passed to kernel */ int all; /* passed to kernel */ int spare[4]; }; #ifdef ICL_KERNEL_PROXY struct ctl_iscsi_listen_params { int iser; int domain; int socktype; int protocol; struct sockaddr *addr; socklen_t addrlen; int portal_id; int spare[4]; }; struct ctl_iscsi_accept_params { int connection_id; int portal_id; struct sockaddr *initiator_addr; socklen_t initiator_addrlen; int spare[4]; }; struct ctl_iscsi_send_params { int connection_id; void *bhs; size_t spare; void *spare2; size_t data_segment_len; void *data_segment; int spare3[4]; }; struct ctl_iscsi_receive_params { int connection_id; void *bhs; size_t spare; void *spare2; size_t data_segment_len; void *data_segment; int spare3[4]; }; #endif /* ICL_KERNEL_PROXY */ union ctl_iscsi_data { struct ctl_iscsi_handoff_params handoff; struct ctl_iscsi_list_params list; struct ctl_iscsi_logout_params logout; struct ctl_iscsi_terminate_params terminate; #ifdef ICL_KERNEL_PROXY struct ctl_iscsi_listen_params listen; struct ctl_iscsi_accept_params accept; struct ctl_iscsi_send_params send; struct ctl_iscsi_receive_params receive; #endif }; /* * iSCSI interface * * status: The status of the request. See above for the * description of the values of this field. * * error_str: If the status indicates an error, this string will * be filled in to describe the error. */ struct ctl_iscsi { ctl_iscsi_type type; /* passed to kernel */ union ctl_iscsi_data data; /* passed to kernel */ ctl_iscsi_status status; /* passed to userland */ char error_str[CTL_ERROR_STR_LEN]; /* passed to userland */ }; +struct ctl_lun_map { + uint32_t port; + uint32_t plun; + uint32_t lun; +}; + #define CTL_IO _IOWR(CTL_MINOR, 0x00, union ctl_io) #define CTL_ENABLE_PORT _IOW(CTL_MINOR, 0x04, struct ctl_port_entry) #define CTL_DISABLE_PORT _IOW(CTL_MINOR, 0x05, struct ctl_port_entry) #define CTL_DUMP_OOA _IO(CTL_MINOR, 0x06) #define CTL_CHECK_OOA _IOWR(CTL_MINOR, 0x07, struct ctl_ooa_info) #define CTL_HARD_STOP _IOR(CTL_MINOR, 0x08, \ struct ctl_hard_startstop_info) #define CTL_HARD_START _IOR(CTL_MINOR, 0x09, \ struct ctl_hard_startstop_info) #define CTL_DELAY_IO _IOWR(CTL_MINOR, 0x10, struct ctl_io_delay_info) #define CTL_REALSYNC_GET _IOR(CTL_MINOR, 0x11, int) #define CTL_REALSYNC_SET _IOW(CTL_MINOR, 0x12, int) #define CTL_SETSYNC _IOWR(CTL_MINOR, 0x13, struct ctl_sync_info) #define CTL_GETSYNC _IOWR(CTL_MINOR, 0x14, struct ctl_sync_info) #define CTL_GETSTATS _IOWR(CTL_MINOR, 0x15, struct ctl_stats) #define CTL_ERROR_INJECT _IOWR(CTL_MINOR, 0x16, struct ctl_error_desc) #define CTL_BBRREAD _IOWR(CTL_MINOR, 0x17, struct ctl_bbrread_info) #define CTL_GET_OOA _IOWR(CTL_MINOR, 0x18, struct ctl_ooa) #define CTL_DUMP_STRUCTS _IO(CTL_MINOR, 0x19) #define CTL_GET_PORT_LIST _IOWR(CTL_MINOR, 0x20, struct ctl_port_list) #define CTL_LUN_REQ _IOWR(CTL_MINOR, 0x21, struct ctl_lun_req) #define CTL_LUN_LIST _IOWR(CTL_MINOR, 0x22, struct ctl_lun_list) #define CTL_ERROR_INJECT_DELETE _IOW(CTL_MINOR, 0x23, struct ctl_error_desc) #define CTL_SET_PORT_WWNS _IOW(CTL_MINOR, 0x24, struct ctl_port_entry) #define CTL_ISCSI _IOWR(CTL_MINOR, 0x25, struct ctl_iscsi) #define CTL_PORT_REQ _IOWR(CTL_MINOR, 0x26, struct ctl_req) #define CTL_PORT_LIST _IOWR(CTL_MINOR, 0x27, struct ctl_lun_list) +#define CTL_LUN_MAP _IOW(CTL_MINOR, 0x28, struct ctl_lun_map) #endif /* _CTL_IOCTL_H_ */ /* * vim: ts=8 */ Index: stable/10/sys/cam/ctl/ctl_private.h =================================================================== --- stable/10/sys/cam/ctl/ctl_private.h (revision 279001) +++ stable/10/sys/cam/ctl/ctl_private.h (revision 279002) @@ -1,552 +1,559 @@ /*- * Copyright (c) 2003, 2004, 2005, 2008 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_private.h#7 $ * $FreeBSD$ */ /* * CAM Target Layer driver private data structures/definitions. * * Author: Ken Merry */ #ifndef _CTL_PRIVATE_H_ #define _CTL_PRIVATE_H_ /* * SCSI vendor and product names. */ #define CTL_VENDOR "FREEBSD " #define CTL_DIRECT_PRODUCT "CTLDISK " #define CTL_PROCESSOR_PRODUCT "CTLPROCESSOR " #define CTL_UNKNOWN_PRODUCT "CTLDEVICE " struct ctl_fe_ioctl_startstop_info { struct cv sem; struct ctl_hard_startstop_info hs_info; }; struct ctl_fe_ioctl_bbrread_info { struct cv sem; struct ctl_bbrread_info *bbr_info; int wakeup_done; struct mtx *lock; }; typedef enum { CTL_IOCTL_INPROG, CTL_IOCTL_DATAMOVE, CTL_IOCTL_DONE } ctl_fe_ioctl_state; struct ctl_fe_ioctl_params { struct cv sem; struct mtx ioctl_mtx; ctl_fe_ioctl_state state; }; #define CTL_POOL_ENTRIES_OTHER_SC 200 struct ctl_io_pool { char name[64]; uint32_t id; struct ctl_softc *ctl_softc; struct uma_zone *zone; }; typedef enum { CTL_IOCTL_FLAG_NONE = 0x00, CTL_IOCTL_FLAG_ENABLED = 0x01 } ctl_ioctl_flags; struct ctl_ioctl_info { ctl_ioctl_flags flags; uint32_t cur_tag_num; struct ctl_port port; char port_name[24]; }; typedef enum { CTL_SER_BLOCK, CTL_SER_BLOCKOPT, CTL_SER_EXTENT, CTL_SER_EXTENTOPT, CTL_SER_EXTENTSEQ, CTL_SER_PASS, CTL_SER_SKIP } ctl_serialize_action; typedef enum { CTL_ACTION_BLOCK, CTL_ACTION_OVERLAP, CTL_ACTION_OVERLAP_TAG, CTL_ACTION_PASS, CTL_ACTION_SKIP, CTL_ACTION_ERROR } ctl_action; /* * WARNING: Keep the bottom nibble here free, we OR in the data direction * flags for each command. * * Note: "OK_ON_ALL_LUNS" == we don't have to have a lun configured * "OK_ON_BOTH" == we have to have a lun configured * "SA5" == command has 5-bit service action at byte 1 */ typedef enum { CTL_CMD_FLAG_NONE = 0x0000, CTL_CMD_FLAG_NO_SENSE = 0x0010, CTL_CMD_FLAG_OK_ON_ALL_LUNS = 0x0020, CTL_CMD_FLAG_ALLOW_ON_RESV = 0x0040, CTL_CMD_FLAG_ALLOW_ON_PR_WRESV = 0x0080, CTL_CMD_FLAG_OK_ON_PROC = 0x0100, CTL_CMD_FLAG_OK_ON_SLUN = 0x0200, CTL_CMD_FLAG_OK_ON_BOTH = 0x0300, CTL_CMD_FLAG_OK_ON_STOPPED = 0x0400, CTL_CMD_FLAG_OK_ON_INOPERABLE = 0x0800, CTL_CMD_FLAG_OK_ON_OFFLINE = 0x1000, CTL_CMD_FLAG_OK_ON_SECONDARY = 0x2000, CTL_CMD_FLAG_ALLOW_ON_PR_RESV = 0x4000, CTL_CMD_FLAG_SA5 = 0x8000 } ctl_cmd_flags; typedef enum { CTL_SERIDX_TUR = 0, CTL_SERIDX_READ, CTL_SERIDX_WRITE, CTL_SERIDX_UNMAP, CTL_SERIDX_MD_SNS, CTL_SERIDX_MD_SEL, CTL_SERIDX_RQ_SNS, CTL_SERIDX_INQ, CTL_SERIDX_RD_CAP, CTL_SERIDX_RES, CTL_SERIDX_LOG_SNS, CTL_SERIDX_FORMAT, CTL_SERIDX_START, /* TBD: others to be filled in as needed */ CTL_SERIDX_COUNT, /* LAST, not a normal code, provides # codes */ CTL_SERIDX_INVLD = CTL_SERIDX_COUNT } ctl_seridx; typedef int ctl_opfunc(struct ctl_scsiio *ctsio); struct ctl_cmd_entry { ctl_opfunc *execute; ctl_seridx seridx; ctl_cmd_flags flags; ctl_lun_error_pattern pattern; uint8_t length; /* CDB length */ uint8_t usage[15]; /* Mask of allowed CDB bits * after the opcode byte. */ }; typedef enum { CTL_LUN_NONE = 0x000, CTL_LUN_CONTROL = 0x001, CTL_LUN_RESERVED = 0x002, CTL_LUN_INVALID = 0x004, CTL_LUN_DISABLED = 0x008, CTL_LUN_MALLOCED = 0x010, CTL_LUN_STOPPED = 0x020, CTL_LUN_INOPERABLE = 0x040, CTL_LUN_OFFLINE = 0x080, CTL_LUN_PR_RESERVED = 0x100, CTL_LUN_PRIMARY_SC = 0x200, CTL_LUN_SENSE_DESC = 0x400, CTL_LUN_READONLY = 0x800 } ctl_lun_flags; typedef enum { CTL_LUN_SERSEQ_OFF, CTL_LUN_SERSEQ_READ, CTL_LUN_SERSEQ_ON } ctl_lun_serseq; typedef enum { CTLBLOCK_FLAG_NONE = 0x00, CTLBLOCK_FLAG_INVALID = 0x01 } ctlblock_flags; union ctl_softcs { struct ctl_softc *ctl_softc; struct ctlblock_softc *ctlblock_softc; }; /* * Mode page defaults. */ #if 0 /* * These values make Solaris trim off some of the capacity. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 63 #define CTL_DEFAULT_HEADS 255 /* * These values seem to work okay. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 63 #define CTL_DEFAULT_HEADS 16 /* * These values work reasonably well. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 512 #define CTL_DEFAULT_HEADS 64 #endif /* * Solaris is somewhat picky about how many heads and sectors per track you * have defined in mode pages 3 and 4. These values seem to cause Solaris * to get the capacity more or less right when you run the format tool. * They still have problems when dealing with devices larger than 1TB, * but there isn't anything we can do about that. * * For smaller LUN sizes, this ends up causing the number of cylinders to * work out to 0. Solaris actually recognizes that and comes up with its * own bogus geometry to fit the actual capacity of the drive. They really * should just give up on geometry and stick to the read capacity * information alone for modern disk drives. * * One thing worth mentioning about Solaris' mkfs command is that it * doesn't like sectors per track values larger than 256. 512 seems to * work okay for format, but causes problems when you try to make a * filesystem. * * Another caveat about these values: the product of these two values * really should be a power of 2. This is because of the simplistic * shift-based calculation that we have to use on the i386 platform to * calculate the number of cylinders here. (If you use a divide, you end * up calling __udivdi3(), which is a hardware FP call on the PC. On the * XScale, it is done in software, so you can do that from inside the * kernel.) * * So for the current values (256 S/T, 128 H), we get 32768, which works * very nicely for calculating cylinders. * * If you want to change these values so that their product is no longer a * power of 2, re-visit the calculation in ctl_init_page_index(). You may * need to make it a bit more complicated to get the number of cylinders * right. */ #define CTL_DEFAULT_SECTORS_PER_TRACK 256 #define CTL_DEFAULT_HEADS 128 #define CTL_DEFAULT_ROTATION_RATE SVPD_NON_ROTATING struct ctl_page_index; typedef int ctl_modesen_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, int pc); typedef int ctl_modesel_handler(struct ctl_scsiio *ctsio, struct ctl_page_index *page_index, uint8_t *page_ptr); typedef enum { CTL_PAGE_FLAG_NONE = 0x00, CTL_PAGE_FLAG_DISK_ONLY = 0x01 } ctl_page_flags; struct ctl_page_index { uint8_t page_code; uint8_t subpage; uint16_t page_len; uint8_t *page_data; ctl_page_flags page_flags; ctl_modesen_handler *sense_handler; ctl_modesel_handler *select_handler; }; #define CTL_PAGE_CURRENT 0x00 #define CTL_PAGE_CHANGEABLE 0x01 #define CTL_PAGE_DEFAULT 0x02 #define CTL_PAGE_SAVED 0x03 #define CTL_NUM_LBP_PARAMS 4 #define CTL_NUM_LBP_THRESH 4 #define CTL_LBP_EXPONENT 11 /* 2048 sectors */ #define CTL_LBP_PERIOD 10 /* 10 seconds */ #define CTL_LBP_UA_PERIOD 300 /* 5 minutes */ struct ctl_logical_block_provisioning_page { struct scsi_logical_block_provisioning_page main; struct scsi_logical_block_provisioning_page_descr descr[CTL_NUM_LBP_THRESH]; }; static const struct ctl_page_index page_index_template[] = { {SMS_RW_ERROR_RECOVERY_PAGE, 0, sizeof(struct scsi_da_rw_recovery_page), NULL, CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL}, {SMS_FORMAT_DEVICE_PAGE, 0, sizeof(struct scsi_format_page), NULL, CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL}, {SMS_RIGID_DISK_PAGE, 0, sizeof(struct scsi_rigid_disk_page), NULL, CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL}, {SMS_CACHING_PAGE, 0, sizeof(struct scsi_caching_page), NULL, CTL_PAGE_FLAG_DISK_ONLY, NULL, ctl_caching_sp_handler}, {SMS_CONTROL_MODE_PAGE, 0, sizeof(struct scsi_control_page), NULL, CTL_PAGE_FLAG_NONE, NULL, ctl_control_page_handler}, {SMS_INFO_EXCEPTIONS_PAGE, 0, sizeof(struct scsi_info_exceptions_page), NULL, CTL_PAGE_FLAG_NONE, NULL, NULL}, {SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 0x02, sizeof(struct ctl_logical_block_provisioning_page), NULL, CTL_PAGE_FLAG_DISK_ONLY, NULL, NULL}, {SMS_VENDOR_SPECIFIC_PAGE | SMPH_SPF, DBGCNF_SUBPAGE_CODE, sizeof(struct copan_debugconf_subpage), NULL, CTL_PAGE_FLAG_NONE, ctl_debugconf_sp_sense_handler, ctl_debugconf_sp_select_handler}, }; #define CTL_NUM_MODE_PAGES sizeof(page_index_template)/ \ sizeof(page_index_template[0]) struct ctl_mode_pages { struct scsi_da_rw_recovery_page rw_er_page[4]; struct scsi_format_page format_page[4]; struct scsi_rigid_disk_page rigid_disk_page[4]; struct scsi_caching_page caching_page[4]; struct scsi_control_page control_page[4]; struct scsi_info_exceptions_page ie_page[4]; struct ctl_logical_block_provisioning_page lbp_page[4]; struct copan_debugconf_subpage debugconf_subpage[4]; struct ctl_page_index index[CTL_NUM_MODE_PAGES]; }; static const struct ctl_page_index log_page_index_template[] = { {SLS_SUPPORTED_PAGES_PAGE, 0, 0, NULL, CTL_PAGE_FLAG_NONE, NULL, NULL}, {SLS_SUPPORTED_PAGES_PAGE, SLS_SUPPORTED_SUBPAGES_SUBPAGE, 0, NULL, CTL_PAGE_FLAG_NONE, NULL, NULL}, {SLS_LOGICAL_BLOCK_PROVISIONING, 0, 0, NULL, CTL_PAGE_FLAG_NONE, ctl_lbp_log_sense_handler, NULL}, }; #define CTL_NUM_LOG_PAGES sizeof(log_page_index_template)/ \ sizeof(log_page_index_template[0]) struct ctl_log_pages { uint8_t pages_page[CTL_NUM_LOG_PAGES]; uint8_t subpages_page[CTL_NUM_LOG_PAGES * 2]; uint8_t lbp_page[12*CTL_NUM_LBP_PARAMS]; struct ctl_page_index index[CTL_NUM_LOG_PAGES]; }; struct ctl_lun_delay_info { ctl_delay_type datamove_type; uint32_t datamove_delay; ctl_delay_type done_type; uint32_t done_delay; }; typedef enum { CTL_ERR_INJ_NONE = 0x00, CTL_ERR_INJ_ABORTED = 0x01 } ctl_err_inject_flags; typedef enum { CTL_PR_FLAG_NONE = 0x00, CTL_PR_FLAG_REGISTERED = 0x01, CTL_PR_FLAG_ACTIVE_RES = 0x02 } ctl_per_res_flags; #define CTL_PR_ALL_REGISTRANTS 0xFFFFFFFF #define CTL_PR_NO_RESERVATION 0xFFFFFFF0 struct ctl_devid { int len; uint8_t data[]; }; /* * For report target port groups. */ #define NUM_TARGET_PORT_GROUPS 2 #define CTL_WRITE_BUFFER_SIZE 262144 struct tpc_list; struct ctl_lun { struct mtx lun_lock; struct ctl_id target; uint64_t lun; ctl_lun_flags flags; ctl_lun_serseq serseq; STAILQ_HEAD(,ctl_error_desc) error_list; uint64_t error_serial; struct ctl_softc *ctl_softc; struct ctl_be_lun *be_lun; struct ctl_backend_driver *backend; int io_count; struct ctl_lun_delay_info delay_info; int sync_interval; int sync_count; TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue; TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue; STAILQ_ENTRY(ctl_lun) links; STAILQ_ENTRY(ctl_lun) run_links; #ifdef CTL_WITH_CA uint32_t have_ca[CTL_MAX_INITIATORS >> 5]; struct scsi_sense_data pending_sense[CTL_MAX_INITIATORS]; #endif ctl_ua_type *pending_ua[CTL_MAX_PORTS]; time_t lasttpt; struct ctl_mode_pages mode_pages; struct ctl_log_pages log_pages; struct ctl_lun_io_stats stats; uint32_t res_idx; unsigned int PRGeneration; uint64_t *pr_keys[2 * CTL_MAX_PORTS]; int pr_key_count; uint32_t pr_res_idx; uint8_t res_type; uint8_t *write_buffer; struct ctl_devid *lun_devid; TAILQ_HEAD(tpc_lists, tpc_list) tpc_lists; }; typedef enum { CTL_FLAG_REAL_SYNC = 0x02, CTL_FLAG_ACTIVE_SHELF = 0x04 } ctl_gen_flags; #define CTL_MAX_THREADS 16 struct ctl_thread { struct mtx_padalign queue_lock; struct ctl_softc *ctl_softc; struct thread *thread; STAILQ_HEAD(, ctl_io_hdr) incoming_queue; STAILQ_HEAD(, ctl_io_hdr) rtr_queue; STAILQ_HEAD(, ctl_io_hdr) done_queue; STAILQ_HEAD(, ctl_io_hdr) isc_queue; }; struct tpc_token; struct ctl_softc { struct mtx ctl_lock; struct cdev *dev; int open_count; struct ctl_id target; int num_disks; int num_luns; ctl_gen_flags flags; ctl_ha_mode ha_mode; int ha_id; int ha_state; int is_single; int port_offset; int persis_offset; int inquiry_pq_no_lun; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct ctl_ioctl_info ioctl_info; void *othersc_pool; struct proc *ctl_proc; int targ_online; uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32]; struct ctl_lun *ctl_luns[CTL_MAX_LUNS]; uint32_t ctl_port_mask[(CTL_MAX_PORTS + 31) / 32]; STAILQ_HEAD(, ctl_lun) lun_list; STAILQ_HEAD(, ctl_be_lun) pending_lun_queue; uint32_t num_frontends; STAILQ_HEAD(, ctl_frontend) fe_list; uint32_t num_ports; STAILQ_HEAD(, ctl_port) port_list; struct ctl_port *ctl_ports[CTL_MAX_PORTS]; uint32_t num_backends; STAILQ_HEAD(, ctl_backend_driver) be_list; struct uma_zone *io_zone; uint32_t cur_pool_id; struct ctl_thread threads[CTL_MAX_THREADS]; TAILQ_HEAD(tpc_tokens, tpc_token) tpc_tokens; struct callout tpc_timeout; }; #ifdef _KERNEL extern const struct ctl_cmd_entry ctl_cmd_table[256]; uint32_t ctl_get_initindex(struct ctl_nexus *nexus); uint32_t ctl_get_resindex(struct ctl_nexus *nexus); uint32_t ctl_port_idx(int port_num); +int ctl_lun_map_init(struct ctl_port *port); +int ctl_lun_map_deinit(struct ctl_port *port); +int ctl_lun_map_set(struct ctl_port *port, uint32_t plun, uint32_t glun); +int ctl_lun_map_unset(struct ctl_port *port, uint32_t plun); +int ctl_lun_map_unsetg(struct ctl_port *port, uint32_t glun); +uint32_t ctl_lun_map_from_port(struct ctl_port *port, uint32_t plun); +uint32_t ctl_lun_map_to_port(struct ctl_port *port, uint32_t glun); int ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, uint32_t total_ctl_io, void **npool); void ctl_pool_free(struct ctl_io_pool *pool); int ctl_scsi_release(struct ctl_scsiio *ctsio); int ctl_scsi_reserve(struct ctl_scsiio *ctsio); int ctl_start_stop(struct ctl_scsiio *ctsio); int ctl_sync_cache(struct ctl_scsiio *ctsio); int ctl_format(struct ctl_scsiio *ctsio); int ctl_read_buffer(struct ctl_scsiio *ctsio); int ctl_write_buffer(struct ctl_scsiio *ctsio); int ctl_write_same(struct ctl_scsiio *ctsio); int ctl_unmap(struct ctl_scsiio *ctsio); int ctl_mode_select(struct ctl_scsiio *ctsio); int ctl_mode_sense(struct ctl_scsiio *ctsio); int ctl_log_sense(struct ctl_scsiio *ctsio); int ctl_read_capacity(struct ctl_scsiio *ctsio); int ctl_read_capacity_16(struct ctl_scsiio *ctsio); int ctl_read_defect(struct ctl_scsiio *ctsio); int ctl_read_write(struct ctl_scsiio *ctsio); int ctl_cnw(struct ctl_scsiio *ctsio); int ctl_report_luns(struct ctl_scsiio *ctsio); int ctl_request_sense(struct ctl_scsiio *ctsio); int ctl_tur(struct ctl_scsiio *ctsio); int ctl_verify(struct ctl_scsiio *ctsio); int ctl_inquiry(struct ctl_scsiio *ctsio); int ctl_persistent_reserve_in(struct ctl_scsiio *ctsio); int ctl_persistent_reserve_out(struct ctl_scsiio *ctsio); int ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio); int ctl_report_supported_opcodes(struct ctl_scsiio *ctsio); int ctl_report_supported_tmf(struct ctl_scsiio *ctsio); int ctl_report_timestamp(struct ctl_scsiio *ctsio); int ctl_isc(struct ctl_scsiio *ctsio); int ctl_get_lba_status(struct ctl_scsiio *ctsio); void ctl_tpc_init(struct ctl_softc *softc); void ctl_tpc_shutdown(struct ctl_softc *softc); void ctl_tpc_lun_init(struct ctl_lun *lun); void ctl_tpc_lun_shutdown(struct ctl_lun *lun); int ctl_inquiry_evpd_tpc(struct ctl_scsiio *ctsio, int alloc_len); int ctl_receive_copy_status_lid1(struct ctl_scsiio *ctsio); int ctl_receive_copy_failure_details(struct ctl_scsiio *ctsio); int ctl_receive_copy_status_lid4(struct ctl_scsiio *ctsio); int ctl_receive_copy_operating_parameters(struct ctl_scsiio *ctsio); int ctl_extended_copy_lid1(struct ctl_scsiio *ctsio); int ctl_extended_copy_lid4(struct ctl_scsiio *ctsio); int ctl_copy_operation_abort(struct ctl_scsiio *ctsio); int ctl_populate_token(struct ctl_scsiio *ctsio); int ctl_write_using_token(struct ctl_scsiio *ctsio); int ctl_receive_rod_token_information(struct ctl_scsiio *ctsio); int ctl_report_all_rod_tokens(struct ctl_scsiio *ctsio); #endif /* _KERNEL */ #endif /* _CTL_PRIVATE_H_ */ /* * vim: ts=8 */ Index: stable/10/sys/cam/ctl/ctl_tpc_local.c =================================================================== --- stable/10/sys/cam/ctl/ctl_tpc_local.c (revision 279001) +++ stable/10/sys/cam/ctl/ctl_tpc_local.c (revision 279002) @@ -1,385 +1,367 @@ /*- * Copyright (c) 2014 Alexander Motin * Copyright (c) 2004, 2005 Silicon Graphics International Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct tpcl_softc { struct ctl_port port; int cur_tag_num; }; static struct tpcl_softc tpcl_softc; static int tpcl_init(void); static void tpcl_shutdown(void); static void tpcl_online(void *arg); static void tpcl_offline(void *arg); static int tpcl_lun_enable(void *arg, struct ctl_id target_id, int lun_id); static int tpcl_lun_disable(void *arg, struct ctl_id target_id, int lun_id); static void tpcl_datamove(union ctl_io *io); static void tpcl_done(union ctl_io *io); static struct ctl_frontend tpcl_frontend = { .name = "tpc", .init = tpcl_init, .shutdown = tpcl_shutdown, }; CTL_FRONTEND_DECLARE(ctltpc, tpcl_frontend); static int tpcl_init(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port; struct scsi_transportid_spi *tid; int len; memset(tsoftc, 0, sizeof(*tsoftc)); port = &tsoftc->port; port->frontend = &tpcl_frontend; port->port_type = CTL_PORT_INTERNAL; port->num_requested_ctl_io = 100; port->port_name = "tpc"; port->port_online = tpcl_online; port->port_offline = tpcl_offline; port->onoff_arg = tsoftc; port->lun_enable = tpcl_lun_enable; port->lun_disable = tpcl_lun_disable; port->targ_lun_arg = tsoftc; port->fe_datamove = tpcl_datamove; port->fe_done = tpcl_done; port->max_targets = 1; port->max_target_id = 0; port->max_initiators = 1; if (ctl_port_register(port) != 0) { printf("%s: tpc frontend registration failed\n", __func__); return (0); } len = sizeof(struct scsi_transportid_spi); port->init_devid = malloc(sizeof(struct ctl_devid) + len, M_CTL, M_WAITOK | M_ZERO); port->init_devid->len = len; tid = (struct scsi_transportid_spi *)port->init_devid->data; tid->format_protocol = SCSI_TRN_SPI_FORMAT_DEFAULT | SCSI_PROTO_SPI; scsi_ulto2b(0, tid->scsi_addr); scsi_ulto2b(port->targ_port, tid->rel_trgt_port_id); ctl_port_online(port); return (0); } void tpcl_shutdown(void) { struct tpcl_softc *tsoftc = &tpcl_softc; struct ctl_port *port; port = &tsoftc->port; ctl_port_offline(port); if (ctl_port_deregister(&tsoftc->port) != 0) printf("%s: ctl_frontend_deregister() failed\n", __func__); } static void tpcl_online(void *arg) { } static void tpcl_offline(void *arg) { } static int tpcl_lun_enable(void *arg, struct ctl_id target_id, int lun_id) { return (0); } static int tpcl_lun_disable(void *arg, struct ctl_id target_id, int lun_id) { return (0); } static void tpcl_datamove(union ctl_io *io) { struct ctl_sg_entry *ext_sglist, *kern_sglist; struct ctl_sg_entry ext_entry, kern_entry; int ext_sg_entries, kern_sg_entries; int ext_sg_start, ext_offset; int len_to_copy, len_copied; int kern_watermark, ext_watermark; struct ctl_scsiio *ctsio; int i, j; ext_sg_start = 0; ext_offset = 0; ext_sglist = NULL; CTL_DEBUG_PRINT(("%s\n", __func__)); ctsio = &io->scsiio; /* * If this is the case, we're probably doing a BBR read and don't * actually need to transfer the data. This will effectively * bit-bucket the data. */ if (ctsio->ext_data_ptr == NULL) goto bailout; /* * To simplify things here, if we have a single buffer, stick it in * a S/G entry and just make it a single entry S/G list. */ if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { int len_seen; ext_sglist = (struct ctl_sg_entry *)ctsio->ext_data_ptr; ext_sg_entries = ctsio->ext_sg_entries; ext_sg_start = 0; ext_offset = 0; len_seen = 0; for (i = 0; i < ext_sg_entries; i++) { if ((len_seen + ext_sglist[i].len) >= ctsio->ext_data_filled) { ext_sg_start = i; ext_offset = ctsio->ext_data_filled - len_seen; break; } len_seen += ext_sglist[i].len; } } else { ext_sglist = &ext_entry; ext_sglist->addr = ctsio->ext_data_ptr; ext_sglist->len = ctsio->ext_data_len; ext_sg_entries = 1; ext_sg_start = 0; ext_offset = ctsio->ext_data_filled; } if (ctsio->kern_sg_entries > 0) { kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; kern_sg_entries = ctsio->kern_sg_entries; } else { kern_sglist = &kern_entry; kern_sglist->addr = ctsio->kern_data_ptr; kern_sglist->len = ctsio->kern_data_len; kern_sg_entries = 1; } kern_watermark = 0; ext_watermark = ext_offset; len_copied = 0; for (i = ext_sg_start, j = 0; i < ext_sg_entries && j < kern_sg_entries;) { uint8_t *ext_ptr, *kern_ptr; len_to_copy = min(ext_sglist[i].len - ext_watermark, kern_sglist[j].len - kern_watermark); ext_ptr = (uint8_t *)ext_sglist[i].addr; ext_ptr = ext_ptr + ext_watermark; if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { /* * XXX KDM fix this! */ panic("need to implement bus address support"); #if 0 kern_ptr = bus_to_virt(kern_sglist[j].addr); #endif } else kern_ptr = (uint8_t *)kern_sglist[j].addr; kern_ptr = kern_ptr + kern_watermark; kern_watermark += len_to_copy; ext_watermark += len_to_copy; if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { CTL_DEBUG_PRINT(("%s: copying %d bytes to user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, kern_ptr, ext_ptr)); memcpy(ext_ptr, kern_ptr, len_to_copy); } else { CTL_DEBUG_PRINT(("%s: copying %d bytes from user\n", __func__, len_to_copy)); CTL_DEBUG_PRINT(("%s: from %p to %p\n", __func__, ext_ptr, kern_ptr)); memcpy(kern_ptr, ext_ptr, len_to_copy); } len_copied += len_to_copy; if (ext_sglist[i].len == ext_watermark) { i++; ext_watermark = 0; } if (kern_sglist[j].len == kern_watermark) { j++; kern_watermark = 0; } } ctsio->ext_data_filled += len_copied; CTL_DEBUG_PRINT(("%s: ext_sg_entries: %d, kern_sg_entries: %d\n", __func__, ext_sg_entries, kern_sg_entries)); CTL_DEBUG_PRINT(("%s: ext_data_len = %d, kern_data_len = %d\n", __func__, ctsio->ext_data_len, ctsio->kern_data_len)); /* XXX KDM set residual?? */ bailout: io->scsiio.be_move_done(io); } static void tpcl_done(union ctl_io *io) { tpc_done(io); } uint64_t tpcl_resolve(struct ctl_softc *softc, int init_port, struct scsi_ec_cscd *cscd, uint32_t *ss) { struct scsi_ec_cscd_id *cscdid; struct ctl_port *port; struct ctl_lun *lun; - uint64_t lunid = UINT64_MAX, l; - int i; + uint64_t lunid = UINT64_MAX; if (cscd->type_code != EC_CSCD_ID) return (lunid); cscdid = (struct scsi_ec_cscd_id *)cscd; mtx_lock(&softc->ctl_lock); - if (init_port >= 0) { + if (init_port >= 0) port = softc->ctl_ports[ctl_port_idx(init_port)]; - if (port == NULL || port->lun_map == NULL) - init_port = -1; - } - if (init_port < 0) { - STAILQ_FOREACH(lun, &softc->lun_list, links) { - if (lun->lun_devid == NULL) - continue; - if (scsi_devid_match(lun->lun_devid->data, - lun->lun_devid->len, &cscdid->codeset, - cscdid->length + 4) == 0) { - lunid = lun->lun; - if (ss && lun->be_lun) - *ss = lun->be_lun->blocksize; - break; - } - } - } else { - for (i = 0; i < CTL_MAX_LUNS; i++) { - l = port->lun_map(port->targ_lun_arg, i); - if (l >= CTL_MAX_LUNS) - continue; - lun = softc->ctl_luns[l]; - if (lun == NULL || lun->lun_devid == NULL) - continue; - if (scsi_devid_match(lun->lun_devid->data, - lun->lun_devid->len, &cscdid->codeset, - cscdid->length + 4) == 0) { - lunid = lun->lun; - if (ss && lun->be_lun) - *ss = lun->be_lun->blocksize; - break; - } + else + port = NULL; + STAILQ_FOREACH(lun, &softc->lun_list, links) { + if (port != NULL && + ctl_lun_map_to_port(port, lun->lun) >= CTL_MAX_LUNS) + continue; + if (lun->lun_devid == NULL) + continue; + if (scsi_devid_match(lun->lun_devid->data, + lun->lun_devid->len, &cscdid->codeset, + cscdid->length + 4) == 0) { + lunid = lun->lun; + if (ss && lun->be_lun) + *ss = lun->be_lun->blocksize; + break; } } mtx_unlock(&softc->ctl_lock); return (lunid); }; union ctl_io * tpcl_alloc_io(void) { struct tpcl_softc *tsoftc = &tpcl_softc; return (ctl_alloc_io(tsoftc->port.ctl_pool_ref)); }; int tpcl_queue(union ctl_io *io, uint64_t lun) { struct tpcl_softc *tsoftc = &tpcl_softc; io->io_hdr.nexus.initid.id = 0; io->io_hdr.nexus.targ_port = tsoftc->port.targ_port; io->io_hdr.nexus.targ_target.id = 0; io->io_hdr.nexus.targ_lun = lun; io->scsiio.tag_num = atomic_fetchadd_int(&tsoftc->cur_tag_num, 1); io->scsiio.ext_data_filled = 0; return (ctl_queue(io)); } Index: stable/10/usr.sbin/ctladm/ctladm.8 =================================================================== --- stable/10/usr.sbin/ctladm/ctladm.8 (revision 279001) +++ stable/10/usr.sbin/ctladm/ctladm.8 (revision 279002) @@ -1,1137 +1,1170 @@ .\" .\" Copyright (c) 2003 Silicon Graphics International Corp. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions, and the following disclaimer, .\" without modification. .\" 2. Redistributions in binary form must reproduce at minimum a disclaimer .\" substantially similar to the "NO WARRANTY" disclaimer below .\" ("Disclaimer") and any redistribution must be conditioned upon .\" including a substantially similar Disclaimer requirement for further .\" binary redistribution. .\" .\" NO WARRANTY .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS .\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT .\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR .\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT .\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, .\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING .\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE .\" POSSIBILITY OF SUCH DAMAGES. .\" .\" ctladm utility man page. .\" .\" Author: Ken Merry .\" .\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $ .\" $FreeBSD$ .\" -.Dd December 17, 2014 +.Dd February 1, 2015 .Dt CTLADM 8 .Os .Sh NAME .Nm ctladm .Nd CAM Target Layer control utility .Sh SYNOPSIS .Nm .Aq Ar command .Op target:lun .Op generic args .Op command args .Nm .Ic tur .Aq target:lun .Op general options .Nm .Ic inquiry .Aq target:lun .Op general options .Nm .Ic reqsense .Aq target:lun .Op general options .Nm .Ic reportluns .Aq target:lun .Op general options .Nm .Ic read .Aq target:lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic write .Aq target:lun .Op general options .Aq Fl l Ar lba .Aq Fl d Ar datalen .Aq Fl f Ar file|- .Aq Fl b Ar blocksize_bytes .Op Fl c Ar cdbsize .Op Fl N .Nm .Ic bbrread .Aq target:lun .Op general options .Aq Fl -l Ar lba .Aq Fl -d Ar datalen .Nm .Ic readcap .Aq target:lun .Op general options .Op Fl c Ar cdbsize .Nm .Ic modesense .Aq target:lun .Aq Fl m Ar page | Fl l .Op Fl P Ar pc .Op Fl d .Op Fl S Ar subpage .Op Fl c Ar size .Nm .Ic start .Aq target:lun .Op general options .Op Fl i .Op Fl o .Nm .Ic stop .Aq target:lun .Op general options .Op Fl i .Op Fl o .Nm .Ic synccache .Aq target:lun .Op general options .Op Fl l Ar lba .Op Fl b Ar blockcount .Op Fl r .Op Fl i .Op Fl c Ar cdbsize .Nm .Ic shutdown .Op general options .Nm .Ic startup .Op general options .Nm .Ic hardstop .Nm .Ic hardstart .Nm .Ic lunlist .Nm .Ic delay .Aq target:lun .Aq Fl l Ar datamove|done .Aq Fl t Ar secs .Op Fl T Ar oneshot|cont .Nm .Ic realsync Aq on|off|query .Nm .Ic setsync interval .Aq target:lun .Aq Fl i Ar interval .Nm .Ic getsync .Aq target:lun .Nm .Ic inject .Aq Fl i Ar action .Aq Fl p Ar pattern .Op Fl r Ar lba,len .Op Fl s Ar len fmt Op Ar args .Op Fl c .Op Fl d Ar delete_id .Nm .Ic create .Aq Fl b Ar backend .Op Fl B Ar blocksize .Op Fl d Ar device_id .Op Fl l Ar lun_id .Op Fl o Ar name=value .Op Fl s Ar size_bytes .Op Fl S Ar serial_num .Op Fl t Ar device_type .Nm .Ic remove .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Op Fl o Ar name=value .Nm .Ic modify .Aq Fl b Ar backend .Aq Fl l Ar lun_id .Aq Fl s Ar size_bytes .Nm .Ic devlist .Op Fl b Ar backend .Op Fl v .Op Fl x .Nm .Ic port .Op Fl l .Op Fl o Ar on|off .Op Fl w Ar wwpn .Op Fl W Ar wwnn .Op Fl p Ar targ_port .Op Fl t Ar fe_type .Op Fl q .Op Fl x .Nm .Ic portlist .Op Fl f Ar frontend .Op Fl i +.Op Fl l .Op Fl p Ar targ_port .Op Fl q .Op Fl v .Op Fl x .Nm +.Ic lunmap +.Aq Fl p Ar targ_port +.Op Fl l Ar pLUN +.Op Fl L Ar cLUN +.Nm .Ic dumpooa .Nm .Ic dumpstructs .Nm .Ic islist .Op Fl v .Op Fl x .Nm .Ic islogout .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic isterminate .Aq Fl a | Fl c Ar connection-id | Fl i Ar name | Fl p Ar portal .Nm .Ic help .Sh DESCRIPTION The .Nm utility is designed to provide a way to access and control the CAM Target Layer (CTL). It provides a way to send .Tn SCSI commands to the CTL layer, and also provides some meta-commands that utilize .Tn SCSI commands. (For instance, the .Ic lunlist command is implemented using the .Tn SCSI REPORT LUNS and INQUIRY commands.) .Pp The .Nm utility has a number of primary functions, many of which require a device identifier. The device identifier takes the following form: .Bl -tag -width 14n .It target:lun Specify the target (almost always 0) and LUN number to operate on. .El Many of the primary functions of the .Nm utility take the following optional arguments: .Bl -tag -width 10n .It Fl C Ar retries Specify the number of times to retry a command in the event of failure. .It Fl D Ar device Specify the device to open. This allows opening a device other than the default device, .Pa /dev/cam/ctl , to be opened for sending commands. .It Fl I Ar id Specify the initiator number to use. By default, .Nm will use 7 as the initiator number. .El .Pp Primary commands: .Bl -tag -width 11n .It Ic tur Send the .Tn SCSI TEST UNIT READY command to the device and report whether or not it is ready. .It Ic inquiry Send the .Tn SCSI INQUIRY command to the device and display some of the returned inquiry data. .It Ic reqsense Send the .Tn SCSI REQUEST SENSE command to the device and display the returned sense information. .It Ic reportluns Send the .Tn SCSI REPORT LUNS command to the device and display supported LUNs. .It Ic read Send a .Tn SCSI READ command to the device, and write the requested data to a file or stdout. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the READ. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar datalen Specify the length, in 512 byte blocks, of the READ request. .It Fl f Ar file Specify the destination for the data read by the READ command. Either a filename or .Sq - for stdout may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm from the kernel when doing a read, just execute the command without copying data. This is to be used for performance testing. .El .It Ic write Read data from a file or stdin, and write the data to the device using the .Tn SCSI WRITE command. .Bl -tag -width 12n .It Fl l Ar lba Specify the starting Logical Block Address for the WRITE. This can be specified in decimal, octal (starting with 0), hexadecimal (starting with 0x) or any other base supported by .Xr strtoull 3 . .It Fl d Ar atalen Specify the length, in 512 byte blocks, of the WRITE request. .It Fl f Ar file Specify the source for the data to be written by the WRITE command. Either a filename or .Sq - for stdin may be specified. .It Fl c Ar cdbsize Specify the minimum .Tn SCSI CDB (Command Data Block) size to be used for the READ request. Allowable values are 6, 10, 12 and 16. Depending upon the LBA and amount of data requested, a larger CDB size may be used to satisfy the request. (e.g., for LBAs above 0xffffffff, READ(16) must be used to satisfy the request.) .It Fl b Ar blocksize Specify the blocksize of the underlying .Tn SCSI device, so the transfer length can be calculated accurately. The blocksize can be obtained via the .Tn SCSI READ CAPACITY command. .It Fl N Do not copy data to .Nm to the kernel when doing a write, just execute the command without copying data. This is to be used for performance testing. .El .It Ic bbrread Issue a SCSI READ command to the logical device to potentially force a bad block on a disk in the RAID set to be reconstructed from the other disks in the array. This command should only be used on an array that is in the normal state. If used on a critical array, it could cause the array to go offline if the bad block to be remapped is on one of the disks that is still active in the array. .Pp The data for this particular command will be discarded, and not returned to the user. .Pp In order to determine which LUN to read from, the user should first determine which LUN the disk with a bad block belongs to. Then he should map the bad disk block back to the logical block address for the array in order to determine which LBA to pass in to the .Ic bbrread command. .Pp This command is primarily intended for testing. In practice, bad block remapping will generally be triggered by the in-kernel Disk Aerobics and Disk Scrubbing code. .Bl -tag -width 10n .It Fl l Ar lba Specify the starting Logical Block Address. .It Fl d Ar datalen Specify the amount of data in bytes to read from the LUN. This must be a multiple of the LUN blocksize. .El .It Ic readcap Send the .Tn SCSI READ CAPACITY command to the device and display the device size and device block size. By default, READ CAPACITY(10) is used. If the device returns a maximum LBA of 0xffffffff, however, .Nm will automatically issue a READ CAPACITY(16), which is implemented as a service action of the SERVICE ACTION IN(16) opcode. The user can specify the minimum CDB size with the .Fl c argument. Valid values for the .Fl c option are 10 and 16. If a 10 byte CDB is specified, the request will be automatically reissued with a 16 byte CDB if the maximum LBA returned is 0xffffffff. .It Ic modesense Send a .Tn SCSI MODE SENSE command to the device, and display the requested mode page(s) or page list. .Bl -tag -width 10n .It Fl m Ar page Specify the mode page to display. This option and the .Fl l option are mutually exclusive. One of the two must be specified, though. Mode page numbers may be specified in decimal or hexadecimal. .It Fl l Request that the list of mode pages supported by the device be returned. This option and the .Fl m option are mutually exclusive. One of the two must be specified, though. .It Fl P Ar pc Specify the mode page control value. Possible values are: .Bl -tag -width 2n -compact .It 0 Current values. .It 1 Changeable value bitmask. .It 2 Default values. .It 3 Saved values. .El .It Fl d Disable block descriptors when sending the mode sense request. .It Fl S Ar subpage Specify the subpage used with the mode sense request. .It Fl c Ar cdbsize Specify the CDB size used for the mode sense request. Supported values are 6 and 10. .El .It Ic start Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit set. .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .It Fl o Set the Copan proprietary on/offline bit in the CDB. When this flag is used, the LUN will be marked online again (see the description of the .Ic shutdown and .Ic startup commands). When this flag is used with a start command, the LUN will NOT be spun up. You need to use a start command without the .Fl o flag to spin up the disks in the LUN. .El .It Ic stop Send the .Tn SCSI START STOP UNIT command to the specified LUN with the start bit cleared. We use an ordered tag to stop the LUN, so we can guarantee that all pending I/O executes before it is stopped. (CTL guarantees this anyway, but .Nm sends an ordered tag for completeness.) .Bl -tag -width 4n .It Fl i Set the immediate bit in the CDB. Note that CTL does not support the immediate bit, so this is primarily useful for making sure that CTL returns the proper error. .It Fl o Set the Copan proprietary on/offline bit in the CDB. When this flag is used, the LUN will be spun down and taken offline ("Logical unit not ready, manual intervention required"). See the description of the .Ic shutdown and .Ic startup options. .El .It Ic synccache Send the .Tn SCSI SYNCHRONIZE CACHE command to the device. By default, SYNCHRONIZE CACHE(10) is used. If the specified starting LBA is greater than 0xffffffff or the length is greater than 0xffff, though, SYNCHRONIZE CACHE(16) will be used. The 16 byte command will also be used if the user specifies a 16 byte CDB with the .Fl c argument. .Bl -tag -width 14n .It Fl l Ar lba Specify the starting LBA of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl b Ar blockcount Specify the length of the cache region to synchronize. This option is a no-op for CTL. If you send a SYNCHRONIZE CACHE command, it will sync the cache for the entire LUN. .It Fl r Specify relative addressing for the starting LBA. CTL does not support relative addressing, since it only works for linked commands, and CTL does not support linked commands. .It Fl i Tell the target to return status immediately after issuing the SYNCHRONIZE CACHE command rather than waiting for the cache to finish syncing. CTL does not support this bit. .It Fl c Ar cdbsize Specify the minimum CDB size. Valid values are 10 and 16 bytes. .El .It Ic shutdown Issue a .Tn SCSI START STOP UNIT command with the start bit cleared and the on/offline bit set to all direct access LUNs. This will spin down all direct access LUNs, and mark them offline ("Logical unit not ready, manual intervention required"). Once marked offline, the state can only be cleared by sending a START STOP UNIT command with the start bit set and the on/offline bit set. The .Nm commands .Ic startup and .Ic start will accomplish this. Note that the on/offline bit is a non-standard Copan extension to the .Tn SCSI START STOP UNIT command, so merely sending a normal start command from an initiator will not clear the condition. (This is by design.) .It Ic startup Issue a .Tn SCSI START STOP UNIT command with the start bit set and the on/offline bit set to all direct access LUNs. This will mark all direct access LUNs "online" again. It will not cause any LUNs to start up. A separate start command without the on/offline bit set is necessary for that. .It Ic hardstop Use the kernel facility for stopping all direct access LUNs and setting the offline bit. Unlike the .Ic shutdown command above, this command allows shutting down LUNs with I/O active. It will also issue a LUN reset to any reserved LUNs to break the reservation so that the LUN can be stopped. .Ic shutdown command instead. .It Ic hardstart This command is functionally identical to the .Ic startup command described above. The primary difference is that the LUNs are enumerated and commands sent by the in-kernel Front End Target Driver instead of by .Nm . .It Ic lunlist List all LUNs registered with CTL. Because this command uses the ioctl port, it will only work when the FETDs (Front End Target Drivers) are enabled. This command is the equivalent of doing a REPORT LUNS on one LUN and then an INQUIRY on each LUN in the system. .It Ic delay Delay commands at the given location. There are two places where commands may be delayed currently: before data is transferred .Pq Dq datamove and just prior to sending status to the host .Pq Dq done . One of the two must be supplied as an argument to the .Fl l option. The .Fl t option must also be specified. .Bl -tag -width 12n .It Fl l Ar delayloc Delay command(s) at the specified location. This can either be at the data movement stage (datamove) or prior to command completion (done). .It Fl t Ar delaytime Delay command(s) for the specified number of seconds. This must be specified. If set to 0, it will clear out any previously set delay for this particular location (datamove or done). .It Fl T Ar delaytype Specify the delay type. By default, the .Ic delay option will delay the next command sent to the given LUN. With the .Fl T Ar cont option, every command will be delayed by the specified period of time. With the .Fl T Ar oneshot the next command sent to the given LUN will be delayed and all subsequent commands will be completed normally. This is the default. .El .It Ic realsync Query and control CTL's SYNCHRONIZE CACHE behavior. The .Sq query argument will show whether SYNCHRONIZE CACHE commands are being sent to the backend or not. The default is to send SYNCHRONIZE CACHE commands to the backend. The .Sq on argument will cause all SYNCHRONIZE CACHE commands sent to all LUNs to be sent to the backend. The .Sq off argument will cause all SYNCHRONIZE CACHE commands sent to all LUNs to be immediately returned to the initiator with successful status. .It Ic setsync For a given lun, only actually service every Nth SYNCHRONIZE CACHE command that is sent. This can be used for debugging the optimal time period for sending SYNCHRONIZE cache commands. An interval of 0 means that the cache will be flushed for this LUN every time a SYNCHRONIZE CACHE command is received. .Pp You must specify the target and LUN you want to modify. .It Ic getsync Get the interval at which we actually service the SYNCHRONIZE CACHE command, as set by the .Ic setsync command above. The reported number means that we will actually flush the cache on every Nth SYNCHRONIZE CACHE command. A value of 0 means that we will flush the cache every time. .Pp You must specify the target and LUN you want to query. .It Ic inject Inject the specified type of error for the LUN specified, when a command that matches the given pattern is seen. The sense data returned is in either fixed or descriptor format, depending upon the status of the D_SENSE bit in the control mode page (page 0xa) for the LUN. .Pp Errors are only injected for commands that have not already failed for other reasons. By default, only the first command matching the pattern specified is returned with the supplied error. .Pp If the .Fl c flag is specified, all commands matching the pattern will be returned with the specified error until the error injection command is deleted with .Fl d flag. .Bl -tag -width 17n .It Fl i Ar action Specify the error to return: .Bl -tag -width 10n .It aborted Return the next matching command on the specified LUN with the sense key ABORTED COMMAND (0x0b), and the ASC/ASCQ 0x45,0x00 ("Select or reselect failure"). .It mediumerr Return the next matching command on the specified LUN with the sense key MEDIUM ERROR (0x03) and the ASC/ASCQ 0x11,0x00 ("Unrecovered read error") for reads, or ASC/ASCQ 0x0c,0x02 ("Write error - auto reallocation failed") for write errors. .It ua Return the next matching command on the specified LUN with the sense key UNIT ATTENTION (0x06) and the ASC/ASCQ 0x29,0x00 ("POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"). .It custom Return the next matching command on the specified LUN with the supplied sense data. The .Fl s argument must be specified. .El .It Fl p Ar pattern Specify which commands should be returned with the given error. .Bl -tag -width 10n .It read The error should apply to READ(6), READ(10), READ(12), READ(16), etc. .It write The error should apply to WRITE(6), WRITE(10), WRITE(12), WRITE(16), WRITE AND VERIFY(10), etc. .It rw The error should apply to both read and write type commands. .It readcap The error should apply to READ CAPACITY(10) and READ CAPACITY(16) commands. .It tur The error should apply to TEST UNIT READY commands. .It any The error should apply to any command. .El .It Fl r Ar lba,len Specify the starting lba and length of the range of LBAs which should trigger an error. This option is only applies when read and/or write patterns are specified. If used with other command types, the error will never be triggered. .It Fl s Ar len fmt Op Ar args Specify the sense data that is to be returned for custom actions. If the format is .Sq - , len bytes of sense data will be read from standard input and written to the sense buffer. If len is longer than 252 bytes (the maximum allowable .Tn SCSI sense data length), it will be truncated to that length. The sense data format is described in .Xr cam_cdparse 3 . .It Fl c The error injection should be persistent, instead of happening once. Persistent errors must be deleted with the .Fl d argument. .It Fl d Ar delete_id Delete the specified error injection serial number. The serial number is returned when the error is injected. .El .It Ic port Perform one of several CTL frontend port operations. Either get a list of frontend ports .Pq Fl l , turn one or more frontends on or off .Pq Fl o Ar on|off , or set the World Wide Node Name .Pq Fl w Ar wwnn or World Wide Port Name .Pq Fl W Ar wwpn for a given port. One of .Fl l , .Fl o , or .Fl w or .Fl W must be specified. The WWNN and WWPN may both be specified at the same time, but cannot be combined with enabling/disabling or listing ports. .Bl -tag -width 12n .It Fl l List all CTL frontend ports or a specific port type or number. .It Fl o Ar on|off Turn the specified CTL frontend ports off or on. If no port number or port type is specified, all ports are turned on or off. .It Fl p Ar targ_port Specify the frontend port number. The port numbers can be found in the frontend port list. .It Fl q Omit the header in the port list output. .It Fl t Ar fe_type Specify the frontend type. Currently defined port types are .Dq fc (Fibre Channel), .Dq scsi (Parallel SCSI), .Dq ioctl (CTL ioctl interface), and .Dq internal (CTL CAM SIM). .It Fl w Ar wwnn Set the World Wide Node Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWNN should be the same across all ports on the system. .It Fl W Ar wwpn Set the World Wide Port Name for the given port. The .Fl n argument must be specified, since this is only possible to implement on a single port. As a general rule, the WWPN must be different for every port in the system. .It Fl x Output the port list in XML format. .El .It Ic portlist List CTL frontend ports. .Bl -tag -width 12n .It Fl f Ar frontend Specify the frontend type. .It Fl i -Report target and connected initiators addresses +Report target and connected initiators addresses. +.It Fl l +Report LUN mapping. .It Fl p Ar targ_port Specify the frontend port number. .It Fl q Omit the header in the port list output. .It Fl v Enable verbose output (report all port options). .It Fl x Output the port list in XML format. +.El +.It Ic lunmap +Change LUN mapping for specified port. +If both +.Ar pLUN +and +.Ar cLUN +are specified -- LUN will be mapped. +If +.Ar pLUN +is specified, but +.Ar cLUN +is not -- LUN will be unmapped. +If neither +.Ar pLUN +nor +.Ar cLUN +are specified -- LUN mapping will be disabled, exposing all CTL LUNs. +.Bl -tag -width 12n +.It Fl p Ar targ_port +Specify the frontend port number. +.It Fl l Ar pLUN +LUN number visible by specified port. +.It Fl L Ar cLUN +CTL LUN number. .El .It Ic dumpooa Dump the OOA (Order Of Arrival) queue for each LUN registered with CTL. .It Ic dumpstructs Dump the CTL structures to the console. .It Ic create Create a new LUN. The backend must be specified, and depending upon the backend requested, some of the other options may be required. If the LUN is created successfully, the LUN configuration will be displayed. If LUN creation fails, a message will be displayed describing the failure. .Bl -tag -width 14n .It Fl b Ar backend The .Fl b flag is required. This specifies the name backend to use when creating the LUN. Examples are .Dq ramdisk and .Dq block . .It Fl B Ar blocksize Specify the blocksize of the backend in bytes. .It Fl d Ar device_id Specify the LUN-associated string to use in the .Tn SCSI INQUIRY VPD page 0x83 data. .It Fl l Ar lun_id Request that a particular LUN number be assigned. If the requested LUN number is not available, the request will fail. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. Some backends may allow setting the size (e.g. the ramdisk backend) and for others the size may be implicit (e.g. the block backend). .It Fl S Ar serial_num Specify the serial number to be used in the .Tn SCSI INQUIRY VPD page 0x80 data. .It Fl t Ar device_type Specify the numeric SCSI device type to use when creating the LUN. For example, the Direct Access type is 0. If this flag is not used, the type of LUN created is backend-specific. Not all LUN types are supported. Currently CTL only supports Direct Access (type 0) and Processor (type 3) LUNs. The backend requested may or may not support all of the LUN types that CTL supports. .El .It Ic remove Remove a LUN. The backend must be specified, and the LUN number must also be specified. Backend-specific options may also be specified with the .Fl o flag. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be removed. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to remove. .It Fl o Ar name=value Specify a backend-specific name/value pair. Multiple .Fl o arguments may be specified. Refer to the backend documentation for arguments that may be used. .El .It Ic modify Modify a LUN size. The backend, the LUN number, and the size must be specified. .Bl -tag -width 14n .It Fl b Ar backend Specify the backend that owns the LUN to be removed. Examples are .Dq ramdisk and .Dq block . .It Fl l Ar lun_id Specify the LUN number to remove. .It Fl s Ar size_bytes Specify the size of the LUN in bytes. For the .Dq block backend, an .Dq auto keyword may be passed instead; this will make CTL use the size of backing file or device. .El .It Ic devlist Get a list of all configured LUNs. This also includes the LUN size and blocksize, serial number and device ID. .Bl -tag -width 11n .It Fl b Ar backend Specify the backend. This restricts the LUN list to the named backend. Examples are .Dq ramdisk and .Dq block . .It Fl v Be verbose. This will also display any backend-specific LUN attributes in addition to the standard per-LUN information. .It Fl x Dump the raw XML. The LUN list information from the kernel comes in XML format, and this option allows the display of the raw XML data. This option and the .Fl v and .Fl b options are mutually exclusive. If you specify .Fl x , the entire LUN database is displayed in XML format. .El .It Ic islist Get a list of currently running iSCSI connections. This includes initiator and target names and the unique connection IDs. .Bl -tag -width 11n .It Fl v Verbose mode. .It Fl x Dump the raw XML. The connections list information from the kernel comes in XML format, and this option allows the display of the raw XML data. .El .It Ic islogout Ask the initiator to log out iSCSI connections matching criteria. .Bl -tag -width 11n .It Fl a Log out all connections. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic isterminate Forcibly terminate iSCSI connections matching criteria. .Bl -tag -width 11n .It Fl a Terminate all connections. .It Fl c Specify connection ID. .It Fl i Specify initiator name. .It Fl p Specify initiator portal (hostname or IP address). .El .It Ic help Display .Nm usage information. .El .Sh OPTIONS Number of additional configuration options may be specified for LUNs. Some options are global, others are backend-specific. .Pp Global options: .Bl -tag -width 12n .It Va vendor Specifies LUN vendor string up to 8 chars. .It Va product Specifies LUN product string up to 16 chars. .It Va revision Specifies LUN revision string up to 4 chars. .It Va scsiname Specifies LUN SCSI name string. .It Va eui Specifies LUN EUI-64 identifier. .It Va naa Specifies LUN NAA identifier. Either EUI or NAA identifier should be set to UNIQUE value to allow EXTENDED COPY command access the LUN. Non-unique LUN identifiers may lead to data corruption. .It Va insecure_tpc Setting to "on" allows EXTENDED COPY command sent to this LUN access other LUNs on this host, not accessible otherwise. This allows to offload copying between different iSCSI targets residing on the same host in trusted environments. .It Va readcache Set to "off", disables read caching for the LUN, if supported by the backend. .It Va readonly Set to "on", blocks all media write operations to the LUN, reporting it as write protected. .It Va reordering Set to "unrestricted", allows target to process commands with SIMPLE task attribute in arbitrary order. Any data integrity exposures related to command sequence order shall be explicitly handled by the application client through the selection of appropriate commands and task attributes. The default value is "restricted". It improves data integrity, but may introduce some additional delays. .It Va serseq Set to "on" to serialize conseсutive reads/writes. Set to "read" to serialize conseсutive reads. Set to "off" to allow them be issued in parallel. Parallel issue of consecutive operations may confuse logic of the backing file system, hurting performance; but it may improve performance of backing stores without prefetch/write-back. .It Va pblocksize .It Va pblockoffset Specify physical block size and offset of the device. .It Va ublocksize .It Va ublockoffset Specify UNMAP block size and offset of the device. .It Va rpm .It Va rpm Specifies medium rotation rate of the device: 0 -- not reported, 1 -- non-rotating (SSD), >1024 -- value in revolutions per minute. .It Va formfactor Specifies nominal form factor of the device: 0 -- not reported, 1 -- 5.25", 2 -- 3.5", 3 -- 2.5", 4 -- 1.8", 5 -- less then 1.8". .It Va unmap Set to "on", enables UNMAP support for the LUN, if supported by the backend. .It Va avail-threshold .It Va used-threshold .It Va pool-avail-threshold .It Va pool-used-threshold Set per-LUN/-pool thin provisioning soft thresholds. LUN will establish UNIT ATTENTION condition if its or pool available space get below configured avail values, or its or pool used space get above configured used values. Pool thresholds are working only for ZVOL-backed LUNs. .It Va writecache Set to "off", disables write caching for the LUN, if supported by the backend. .El .Pp Options specific for block backend: .Bl -tag -width 12n .It Va file Specifies file or device name to use for backing store. .It Va num_threads Specifies number of backend threads to use for this LUN. .El .Sh EXAMPLES .Dl ctladm tur 0:1 .Pp Send a .Tn SCSI TEST UNIT READY command to LUN 1. .Pp .Dl ctladm modesense 0:1 -l .Pp Display the list of mode pages supported by LUN 1. .Pp .Dl ctladm modesense 0:0 -m 10 -P 3 -d -c 10 .Pp Display the saved version of the Control mode page (page 10) on LUN 0. Disable fetching block descriptors, and use a 10 byte MODE SENSE command instead of the default 6 byte command. .Bd -literal ctladm read 0:2 -l 0 -d 1 -b 512 -f - > foo .Ed .Pp Read the first 512 byte block from LUN 2 and dump it to the file .Pa foo . .Bd -literal ctladm write 0:3 -l 0xff432140 -d 20 -b 512 -f /tmp/bar .Ed .Pp Read 10240 bytes from the file .Pa /tmp/bar and write it to target 0, LUN 3. starting at LBA 0xff432140. .Pp .Dl ctladm create -b ramdisk -s 10485760000000000 .Pp Create a LUN with the .Dq fake ramdisk as a backing store. The LUN will claim to have a size of approximately 10 terabytes. .Pp .Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8 .Pp Create a LUN using the block backend, and specify the file .Pa src/usr.sbin/ctladm/ctladm.8 as the backing store. The size of the LUN will be derived from the size of the file. .Pp .Dl ctladm create -b block -o file=src/usr.sbin/ctladm/ctladm.8 -S MYSERIAL321 -d MYDEVID123 .Pp Create a LUN using the block backend, specify the file .Pa src/usr.sbin/ctladm/ctladm.8 as the backing store, and specify the .Tn SCSI VPD page 0x80 and 0x83 serial number .Fl ( S ) and device ID .Fl ( d ) . .Pp .Dl ctladm remove -b block -l 12 .Pp Remove LUN 12, which is handled by the block backend, from the system. .Pp .Dl ctladm devlist .Pp List configured LUNs in the system, along with their backend and serial number. This works when the Front End Target Drivers are enabled or disabled. .Pp .Dl ctladm lunlist .Pp List all LUNs in the system, along with their inquiry data and device type. This only works when the FETDs are enabled, since the commands go through the ioctl port. .Pp .Dl ctladm inject 0:6 -i mediumerr -p read -r 0,512 -c .Pp Inject a medium error on LUN 6 for every read that covers the first 512 blocks of the LUN. .Bd -literal -offset indent ctladm inject 0:6 -i custom -p tur -s 18 "f0 0 02 s12 04 02" .Ed .Pp Inject a custom error on LUN 6 for the next TEST UNIT READY command only. This will result in a sense key of NOT READY (0x02), and an ASC/ASCQ of 0x04,0x02 ("Logical unit not ready, initializing command required"). .Sh SEE ALSO .Xr cam 3 , .Xr cam_cdbparse 3 , .Xr cam 4 , .Xr ctl 4 , .Xr xpt 4 , .Xr camcontrol 8 , .Xr ctld 8 .Sh HISTORY The .Nm utility was originally written during the Winter/Spring of 2003 as an interface to CTL. .Sh AUTHORS .An Ken Merry Aq ken@FreeBSD.org Index: stable/10/usr.sbin/ctladm/ctladm.c =================================================================== --- stable/10/usr.sbin/ctladm/ctladm.c (revision 279001) +++ stable/10/usr.sbin/ctladm/ctladm.c (revision 279002) @@ -1,4861 +1,4929 @@ /*- * Copyright (c) 2003, 2004 Silicon Graphics International Corp. * Copyright (c) 1997-2007 Kenneth D. Merry * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.c#4 $ */ /* * CAM Target Layer exercise program. * * Author: Ken Merry */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ctladm.h" #ifdef min #undef min #endif #define min(x,y) (x < y) ? x : y typedef enum { CTLADM_CMD_TUR, CTLADM_CMD_INQUIRY, CTLADM_CMD_REQ_SENSE, CTLADM_CMD_ARRAYLIST, CTLADM_CMD_REPORT_LUNS, CTLADM_CMD_HELP, CTLADM_CMD_DEVLIST, CTLADM_CMD_ADDDEV, CTLADM_CMD_RM, CTLADM_CMD_CREATE, CTLADM_CMD_READ, CTLADM_CMD_WRITE, CTLADM_CMD_PORT, CTLADM_CMD_PORTLIST, CTLADM_CMD_READCAPACITY, CTLADM_CMD_MODESENSE, CTLADM_CMD_DUMPOOA, CTLADM_CMD_DUMPSTRUCTS, CTLADM_CMD_START, CTLADM_CMD_STOP, CTLADM_CMD_SYNC_CACHE, CTLADM_CMD_SHUTDOWN, CTLADM_CMD_STARTUP, CTLADM_CMD_LUNLIST, CTLADM_CMD_HARDSTOP, CTLADM_CMD_HARDSTART, CTLADM_CMD_DELAY, CTLADM_CMD_REALSYNC, CTLADM_CMD_SETSYNC, CTLADM_CMD_GETSYNC, CTLADM_CMD_ERR_INJECT, CTLADM_CMD_BBRREAD, CTLADM_CMD_PRES_IN, CTLADM_CMD_PRES_OUT, CTLADM_CMD_INQ_VPD_DEVID, CTLADM_CMD_RTPG, CTLADM_CMD_MODIFY, CTLADM_CMD_ISLIST, CTLADM_CMD_ISLOGOUT, - CTLADM_CMD_ISTERMINATE + CTLADM_CMD_ISTERMINATE, + CTLADM_CMD_LUNMAP } ctladm_cmdfunction; typedef enum { CTLADM_ARG_NONE = 0x0000000, CTLADM_ARG_AUTOSENSE = 0x0000001, CTLADM_ARG_DEVICE = 0x0000002, CTLADM_ARG_ARRAYSIZE = 0x0000004, CTLADM_ARG_BACKEND = 0x0000008, CTLADM_ARG_CDBSIZE = 0x0000010, CTLADM_ARG_DATALEN = 0x0000020, CTLADM_ARG_FILENAME = 0x0000040, CTLADM_ARG_LBA = 0x0000080, CTLADM_ARG_PC = 0x0000100, CTLADM_ARG_PAGE_CODE = 0x0000200, CTLADM_ARG_PAGE_LIST = 0x0000400, CTLADM_ARG_SUBPAGE = 0x0000800, CTLADM_ARG_PAGELIST = 0x0001000, CTLADM_ARG_DBD = 0x0002000, CTLADM_ARG_TARG_LUN = 0x0004000, CTLADM_ARG_BLOCKSIZE = 0x0008000, CTLADM_ARG_IMMED = 0x0010000, CTLADM_ARG_RELADR = 0x0020000, CTLADM_ARG_RETRIES = 0x0040000, CTLADM_ARG_ONOFFLINE = 0x0080000, CTLADM_ARG_ONESHOT = 0x0100000, CTLADM_ARG_TIMEOUT = 0x0200000, CTLADM_ARG_INITIATOR = 0x0400000, CTLADM_ARG_NOCOPY = 0x0800000, CTLADM_ARG_NEED_TL = 0x1000000 } ctladm_cmdargs; struct ctladm_opts { const char *optname; uint32_t cmdnum; ctladm_cmdargs argnum; const char *subopt; }; typedef enum { CC_OR_NOT_FOUND, CC_OR_AMBIGUOUS, CC_OR_FOUND } ctladm_optret; static const char rw_opts[] = "Nb:c:d:f:l:"; static const char startstop_opts[] = "io"; static struct ctladm_opts option_table[] = { {"adddev", CTLADM_CMD_ADDDEV, CTLADM_ARG_NONE, NULL}, {"bbrread", CTLADM_CMD_BBRREAD, CTLADM_ARG_NEED_TL, "d:l:"}, {"create", CTLADM_CMD_CREATE, CTLADM_ARG_NONE, "b:B:d:l:o:s:S:t:"}, {"delay", CTLADM_CMD_DELAY, CTLADM_ARG_NEED_TL, "T:l:t:"}, {"devid", CTLADM_CMD_INQ_VPD_DEVID, CTLADM_ARG_NEED_TL, NULL}, {"devlist", CTLADM_CMD_DEVLIST, CTLADM_ARG_NONE, "b:vx"}, {"dumpooa", CTLADM_CMD_DUMPOOA, CTLADM_ARG_NONE, NULL}, {"dumpstructs", CTLADM_CMD_DUMPSTRUCTS, CTLADM_ARG_NONE, NULL}, {"getsync", CTLADM_CMD_GETSYNC, CTLADM_ARG_NEED_TL, NULL}, {"hardstart", CTLADM_CMD_HARDSTART, CTLADM_ARG_NONE, NULL}, {"hardstop", CTLADM_CMD_HARDSTOP, CTLADM_ARG_NONE, NULL}, {"help", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {"inject", CTLADM_CMD_ERR_INJECT, CTLADM_ARG_NEED_TL, "cd:i:p:r:s:"}, {"inquiry", CTLADM_CMD_INQUIRY, CTLADM_ARG_NEED_TL, NULL}, {"islist", CTLADM_CMD_ISLIST, CTLADM_ARG_NONE, "vx"}, {"islogout", CTLADM_CMD_ISLOGOUT, CTLADM_ARG_NONE, "ac:i:p:"}, {"isterminate", CTLADM_CMD_ISTERMINATE, CTLADM_ARG_NONE, "ac:i:p:"}, {"lunlist", CTLADM_CMD_LUNLIST, CTLADM_ARG_NONE, NULL}, + {"lunmap", CTLADM_CMD_LUNMAP, CTLADM_ARG_NONE, "p:l:L:"}, {"modesense", CTLADM_CMD_MODESENSE, CTLADM_ARG_NEED_TL, "P:S:dlm:c:"}, {"modify", CTLADM_CMD_MODIFY, CTLADM_ARG_NONE, "b:l:s:"}, {"port", CTLADM_CMD_PORT, CTLADM_ARG_NONE, "lo:p:qt:w:W:x"}, - {"portlist", CTLADM_CMD_PORTLIST, CTLADM_ARG_NONE, "f:ip:qvx"}, + {"portlist", CTLADM_CMD_PORTLIST, CTLADM_ARG_NONE, "f:ilp:qvx"}, {"prin", CTLADM_CMD_PRES_IN, CTLADM_ARG_NEED_TL, "a:"}, {"prout", CTLADM_CMD_PRES_OUT, CTLADM_ARG_NEED_TL, "a:k:r:s:"}, {"read", CTLADM_CMD_READ, CTLADM_ARG_NEED_TL, rw_opts}, {"readcapacity", CTLADM_CMD_READCAPACITY, CTLADM_ARG_NEED_TL, "c:"}, {"realsync", CTLADM_CMD_REALSYNC, CTLADM_ARG_NONE, NULL}, {"remove", CTLADM_CMD_RM, CTLADM_ARG_NONE, "b:l:o:"}, {"reportluns", CTLADM_CMD_REPORT_LUNS, CTLADM_ARG_NEED_TL, NULL}, {"reqsense", CTLADM_CMD_REQ_SENSE, CTLADM_ARG_NEED_TL, NULL}, {"rtpg", CTLADM_CMD_RTPG, CTLADM_ARG_NEED_TL, NULL}, {"setsync", CTLADM_CMD_SETSYNC, CTLADM_ARG_NEED_TL, "i:"}, {"shutdown", CTLADM_CMD_SHUTDOWN, CTLADM_ARG_NONE, NULL}, {"start", CTLADM_CMD_START, CTLADM_ARG_NEED_TL, startstop_opts}, {"startup", CTLADM_CMD_STARTUP, CTLADM_ARG_NONE, NULL}, {"stop", CTLADM_CMD_STOP, CTLADM_ARG_NEED_TL, startstop_opts}, {"synccache", CTLADM_CMD_SYNC_CACHE, CTLADM_ARG_NEED_TL, "b:c:il:r"}, {"tur", CTLADM_CMD_TUR, CTLADM_ARG_NEED_TL, NULL}, {"write", CTLADM_CMD_WRITE, CTLADM_ARG_NEED_TL, rw_opts}, {"-?", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {"-h", CTLADM_CMD_HELP, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; ctladm_optret getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum, ctladm_cmdargs *argnum, const char **subopt); static int cctl_parse_tl(char *str, int *target, int *lun); static int cctl_dump_ooa(int fd, int argc, char **argv); static int cctl_port_dump(int fd, int quiet, int xml, int32_t fe_num, ctl_port_type port_type); static int cctl_port(int fd, int argc, char **argv, char *combinedopt); static int cctl_do_io(int fd, int retries, union ctl_io *io, const char *func); static int cctl_delay(int fd, int target, int lun, int argc, char **argv, char *combinedopt); static int cctl_lunlist(int fd); static void cctl_cfi_mt_statusstr(cfi_mt_status status, char *str, int str_len); static void cctl_cfi_bbr_statusstr(cfi_bbrread_status, char *str, int str_len); static int cctl_hardstopstart(int fd, ctladm_cmdfunction command); static int cctl_bbrread(int fd, int target, int lun, int iid, int argc, char **argv, char *combinedopt); static int cctl_startup_shutdown(int fd, int target, int lun, int iid, ctladm_cmdfunction command); static int cctl_sync_cache(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_start_stop(int fd, int target, int lun, int iid, int retries, int start, int argc, char **argv, char *combinedopt); static int cctl_mode_sense(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_read_capacity(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt); static int cctl_read_write(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt, ctladm_cmdfunction command); static int cctl_get_luns(int fd, int target, int lun, int iid, int retries, struct scsi_report_luns_data **lun_data, uint32_t *num_luns); static int cctl_report_luns(int fd, int target, int lun, int iid, int retries); static int cctl_tur(int fd, int target, int lun, int iid, int retries); static int cctl_get_inquiry(int fd, int target, int lun, int iid, int retries, char *path_str, int path_len, struct scsi_inquiry_data *inq_data); static int cctl_inquiry(int fd, int target, int lun, int iid, int retries); static int cctl_req_sense(int fd, int target, int lun, int iid, int retries); static int cctl_persistent_reserve_in(int fd, int target, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count); static int cctl_persistent_reserve_out(int fd, int target, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count); static int cctl_create_lun(int fd, int argc, char **argv, char *combinedopt); static int cctl_inquiry_vpd_devid(int fd, int target, int lun, int initiator); static int cctl_report_target_port_group(int fd, int target, int lun, int initiator); static int cctl_modify_lun(int fd, int argc, char **argv, char *combinedopt); ctladm_optret getoption(struct ctladm_opts *table, char *arg, uint32_t *cmdnum, ctladm_cmdargs *argnum, const char **subopt) { struct ctladm_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (strcmp(opts->optname, arg) == 0) return (CC_OR_FOUND); if (++num_matches > 1) return(CC_OR_AMBIGUOUS); } } if (num_matches > 0) return(CC_OR_FOUND); else return(CC_OR_NOT_FOUND); } static int cctl_parse_tl(char *str, int *target, int *lun) { char *tmpstr; int retval; retval = 0; while (isspace(*str) && (*str != '\0')) str++; tmpstr = (char *)strtok(str, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); } else retval = -1; } else retval = -1; return (retval); } static int cctl_dump_ooa(int fd, int argc, char **argv) { struct ctl_ooa ooa; long double cmd_latency; int num_entries, len; int target = -1, lun = -1; int retval; unsigned int i; num_entries = 104; if ((argc > 2) && (isdigit(argv[2][0]))) { retval = cctl_parse_tl(argv[2], &target, &lun); if (retval != 0) warnx("invalid target:lun argument %s", argv[2]); } retry: len = num_entries * sizeof(struct ctl_ooa_entry); bzero(&ooa, sizeof(ooa)); ooa.entries = malloc(len); if (ooa.entries == NULL) { warn("%s: error mallocing %d bytes", __func__, len); return (1); } if (argc > 2) { ooa.lun_num = lun; } else ooa.flags |= CTL_OOA_FLAG_ALL_LUNS; ooa.alloc_len = len; ooa.alloc_num = num_entries; if (ioctl(fd, CTL_GET_OOA, &ooa) == -1) { warn("%s: CTL_GET_OOA ioctl failed", __func__); retval = 1; goto bailout; } if (ooa.status == CTL_OOA_NEED_MORE_SPACE) { num_entries = num_entries * 2; free(ooa.entries); ooa.entries = NULL; goto retry; } if (ooa.status != CTL_OOA_OK) { warnx("%s: CTL_GET_OOA ioctl returned error %d", __func__, ooa.status); retval = 1; goto bailout; } fprintf(stdout, "Dumping OOA queues\n"); for (i = 0; i < ooa.fill_num; i++) { struct ctl_ooa_entry *entry; char cdb_str[(SCSI_MAX_CDBLEN * 3) +1]; struct bintime delta_bt; struct timespec ts; entry = &ooa.entries[i]; delta_bt = ooa.cur_bt; bintime_sub(&delta_bt, &entry->start_bt); bintime2timespec(&delta_bt, &ts); cmd_latency = ts.tv_sec * 1000; if (ts.tv_nsec > 0) cmd_latency += ts.tv_nsec / 1000000; fprintf(stdout, "LUN %jd tag 0x%04x%s%s%s%s%s: %s. CDB: %s " "(%0.0Lf ms)\n", (intmax_t)entry->lun_num, entry->tag_num, (entry->cmd_flags & CTL_OOACMD_FLAG_BLOCKED) ? " BLOCKED" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_DMA) ? " DMA" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_DMA_QUEUED) ? " DMAQUEUED" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_ABORT) ? " ABORT" : "", (entry->cmd_flags & CTL_OOACMD_FLAG_RTR) ? " RTR" :"", scsi_op_desc(entry->cdb[0], NULL), scsi_cdb_string(entry->cdb, cdb_str, sizeof(cdb_str)), cmd_latency); } fprintf(stdout, "OOA queues dump done\n"); #if 0 if (ioctl(fd, CTL_DUMP_OOA) == -1) { warn("%s: CTL_DUMP_OOA ioctl failed", __func__); return (1); } #endif bailout: free(ooa.entries); return (0); } static int cctl_dump_structs(int fd, ctladm_cmdargs cmdargs __unused) { if (ioctl(fd, CTL_DUMP_STRUCTS) == -1) { warn(__func__); return (1); } return (0); } static int cctl_port_dump(int fd, int quiet, int xml, int32_t targ_port, ctl_port_type port_type) { struct ctl_port_list port_list; struct ctl_port_entry *entries; struct sbuf *sb = NULL; int num_entries; int did_print = 0; unsigned int i; num_entries = 16; retry: entries = malloc(sizeof(*entries) * num_entries); bzero(&port_list, sizeof(port_list)); port_list.entries = entries; port_list.alloc_num = num_entries; port_list.alloc_len = num_entries * sizeof(*entries); if (ioctl(fd, CTL_GET_PORT_LIST, &port_list) != 0) { warn("%s: CTL_GET_PORT_LIST ioctl failed", __func__); return (1); } if (port_list.status == CTL_PORT_LIST_NEED_MORE_SPACE) { printf("%s: allocated %d, need %d, retrying\n", __func__, num_entries, port_list.fill_num + port_list.dropped_num); free(entries); num_entries = port_list.fill_num + port_list.dropped_num; goto retry; } if ((quiet == 0) && (xml == 0)) printf("Port Online Type Name pp vp %-18s %-18s\n", "WWNN", "WWPN"); if (xml != 0) { sb = sbuf_new_auto(); sbuf_printf(sb, "\n"); } for (i = 0; i < port_list.fill_num; i++) { struct ctl_port_entry *entry; const char *type; entry = &entries[i]; switch (entry->port_type) { case CTL_PORT_FC: type = "FC"; break; case CTL_PORT_SCSI: type = "SCSI"; break; case CTL_PORT_IOCTL: type = "IOCTL"; break; case CTL_PORT_INTERNAL: type = "INTERNAL"; break; case CTL_PORT_ISC: type = "ISC"; break; case CTL_PORT_ISCSI: type = "ISCSI"; break; case CTL_PORT_SAS: type = "SAS"; break; default: type = "UNKNOWN"; break; } /* * If the user specified a frontend number or a particular * frontend type, only print out that particular frontend * or frontend type. */ if ((targ_port != -1) && (targ_port != entry->targ_port)) continue; else if ((port_type != CTL_PORT_NONE) && ((port_type & entry->port_type) == 0)) continue; did_print = 1; #if 0 printf("Num: %ju Type: %s (%#x) Name: %s Physical Port: %d " "Virtual Port: %d\n", (uintmax_t)entry->fe_num, type, entry->port_type, entry->fe_name, entry->physical_port, entry->virtual_port); printf("WWNN %#jx WWPN %#jx Online: %s\n", (uintmax_t)entry->wwnn, (uintmax_t)entry->wwpn, (entry->online) ? "YES" : "NO" ); #endif if (xml == 0) { printf("%-4d %-6s %-8s %-12s %-2d %-2d %#-18jx " "%#-18jx\n", entry->targ_port, (entry->online) ? "YES" : "NO", type, entry->port_name, entry->physical_port, entry->virtual_port, (uintmax_t)entry->wwnn, (uintmax_t)entry->wwpn); } else { sbuf_printf(sb, "\n", entry->targ_port); sbuf_printf(sb, "%s\n", (entry->online) ? "YES" : "NO"); sbuf_printf(sb, "%s\n", type); sbuf_printf(sb, "%s\n", entry->port_name); sbuf_printf(sb, "%d\n", entry->physical_port); sbuf_printf(sb, "%d\n", entry->virtual_port); sbuf_printf(sb, "%#jx\n", (uintmax_t)entry->wwnn); sbuf_printf(sb, "%#jx\n", (uintmax_t)entry->wwpn); sbuf_printf(sb, "\n"); } } if (xml != 0) { sbuf_printf(sb, "\n"); if (sbuf_finish(sb) != 0) err(1, "%s: sbuf_finish", __func__); printf("%s", sbuf_data(sb)); sbuf_delete(sb); } /* * Give some indication that we didn't find the frontend or * frontend type requested by the user. We could print something * out, but it would probably be better to hide that behind a * verbose flag. */ if ((did_print == 0) && ((targ_port != -1) || (port_type != CTL_PORT_NONE))) return (1); else return (0); } typedef enum { CCTL_PORT_MODE_NONE, CCTL_PORT_MODE_LIST, CCTL_PORT_MODE_SET, CCTL_PORT_MODE_ON, CCTL_PORT_MODE_OFF } cctl_port_mode; static struct ctladm_opts cctl_fe_table[] = { {"fc", CTL_PORT_FC, CTLADM_ARG_NONE, NULL}, {"scsi", CTL_PORT_SCSI, CTLADM_ARG_NONE, NULL}, {"internal", CTL_PORT_INTERNAL, CTLADM_ARG_NONE, NULL}, {"iscsi", CTL_PORT_ISCSI, CTLADM_ARG_NONE, NULL}, {"sas", CTL_PORT_SAS, CTLADM_ARG_NONE, NULL}, {"all", CTL_PORT_ALL, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int cctl_port(int fd, int argc, char **argv, char *combinedopt) { int c; int32_t targ_port = -1; int retval = 0; int wwnn_set = 0, wwpn_set = 0; uint64_t wwnn = 0, wwpn = 0; cctl_port_mode port_mode = CCTL_PORT_MODE_NONE; struct ctl_port_entry entry; ctl_port_type port_type = CTL_PORT_NONE; int quiet = 0, xml = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': if (port_mode != CCTL_PORT_MODE_NONE) goto bailout_badarg; port_mode = CCTL_PORT_MODE_LIST; break; case 'o': if (port_mode != CCTL_PORT_MODE_NONE) goto bailout_badarg; if (strcasecmp(optarg, "on") == 0) port_mode = CCTL_PORT_MODE_ON; else if (strcasecmp(optarg, "off") == 0) port_mode = CCTL_PORT_MODE_OFF; else { warnx("Invalid -o argument %s, \"on\" or " "\"off\" are the only valid args", optarg); retval = 1; goto bailout; } break; case 'p': targ_port = strtol(optarg, NULL, 0); break; case 'q': quiet = 1; break; case 't': { ctladm_optret optret; ctladm_cmdargs argnum; const char *subopt; ctl_port_type tmp_port_type; optret = getoption(cctl_fe_table, optarg, &tmp_port_type, &argnum, &subopt); if (optret == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous frontend type %s", __func__, optarg); retval = 1; goto bailout; } else if (optret == CC_OR_NOT_FOUND) { warnx("%s: invalid frontend type %s", __func__, optarg); retval = 1; goto bailout; } port_type |= tmp_port_type; break; } case 'w': if ((port_mode != CCTL_PORT_MODE_NONE) && (port_mode != CCTL_PORT_MODE_SET)) goto bailout_badarg; port_mode = CCTL_PORT_MODE_SET; wwnn = strtoull(optarg, NULL, 0); wwnn_set = 1; break; case 'W': if ((port_mode != CCTL_PORT_MODE_NONE) && (port_mode != CCTL_PORT_MODE_SET)) goto bailout_badarg; port_mode = CCTL_PORT_MODE_SET; wwpn = strtoull(optarg, NULL, 0); wwpn_set = 1; break; case 'x': xml = 1; break; } } /* * The user can specify either one or more frontend types (-t), or * a specific frontend, but not both. * * If the user didn't specify a frontend type or number, set it to * all. This is primarily needed for the enable/disable ioctls. * This will be a no-op for the listing code. For the set ioctl, * we'll throw an error, since that only works on one port at a time. */ if ((port_type != CTL_PORT_NONE) && (targ_port != -1)) { warnx("%s: can only specify one of -t or -n", __func__); retval = 1; goto bailout; } else if ((targ_port == -1) && (port_type == CTL_PORT_NONE)) port_type = CTL_PORT_ALL; bzero(&entry, sizeof(entry)); /* * These are needed for all but list/dump mode. */ entry.port_type = port_type; entry.targ_port = targ_port; switch (port_mode) { case CCTL_PORT_MODE_LIST: cctl_port_dump(fd, quiet, xml, targ_port, port_type); break; case CCTL_PORT_MODE_SET: if (targ_port == -1) { warnx("%s: -w and -W require -n", __func__); retval = 1; goto bailout; } if (wwnn_set) { entry.flags |= CTL_PORT_WWNN_VALID; entry.wwnn = wwnn; } if (wwpn_set) { entry.flags |= CTL_PORT_WWPN_VALID; entry.wwpn = wwpn; } if (ioctl(fd, CTL_SET_PORT_WWNS, &entry) == -1) { warn("%s: CTL_SET_PORT_WWNS ioctl failed", __func__); retval = 1; goto bailout; } break; case CCTL_PORT_MODE_ON: if (ioctl(fd, CTL_ENABLE_PORT, &entry) == -1) { warn("%s: CTL_ENABLE_PORT ioctl failed", __func__); retval = 1; goto bailout; } fprintf(stdout, "Front End Ports enabled\n"); break; case CCTL_PORT_MODE_OFF: if (ioctl(fd, CTL_DISABLE_PORT, &entry) == -1) { warn("%s: CTL_DISABLE_PORT ioctl failed", __func__); retval = 1; goto bailout; } fprintf(stdout, "Front End Ports disabled\n"); break; default: warnx("%s: one of -l, -o or -w/-W must be specified", __func__); retval = 1; goto bailout; break; } bailout: return (retval); bailout_badarg: warnx("%s: only one of -l, -o or -w/-W may be specified", __func__); return (1); } static int cctl_do_io(int fd, int retries, union ctl_io *io, const char *func) { do { if (ioctl(fd, CTL_IO, io) == -1) { warn("%s: error sending CTL_IO ioctl", func); return (-1); } } while (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) && (retries-- > 0)); return (0); } static int cctl_delay(int fd, int target, int lun, int argc, char **argv, char *combinedopt) { struct ctl_io_delay_info delay_info; char *delayloc = NULL; char *delaytype = NULL; int delaytime = -1; int retval; int c; retval = 0; memset(&delay_info, 0, sizeof(delay_info)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'T': delaytype = strdup(optarg); break; case 'l': delayloc = strdup(optarg); break; case 't': delaytime = strtoul(optarg, NULL, 0); break; } } if (delaytime == -1) { warnx("%s: you must specify the delaytime with -t", __func__); retval = 1; goto bailout; } if (strcasecmp(delayloc, "datamove") == 0) delay_info.delay_loc = CTL_DELAY_LOC_DATAMOVE; else if (strcasecmp(delayloc, "done") == 0) delay_info.delay_loc = CTL_DELAY_LOC_DONE; else { warnx("%s: invalid delay location %s", __func__, delayloc); retval = 1; goto bailout; } if ((delaytype == NULL) || (strcmp(delaytype, "oneshot") == 0)) delay_info.delay_type = CTL_DELAY_TYPE_ONESHOT; else if (strcmp(delaytype, "cont") == 0) delay_info.delay_type = CTL_DELAY_TYPE_CONT; else { warnx("%s: invalid delay type %s", __func__, delaytype); retval = 1; goto bailout; } delay_info.target_id = target; delay_info.lun_id = lun; delay_info.delay_secs = delaytime; if (ioctl(fd, CTL_DELAY_IO, &delay_info) == -1) { warn("%s: CTL_DELAY_IO ioctl failed", __func__); retval = 1; goto bailout; } switch (delay_info.status) { case CTL_DELAY_STATUS_NONE: warnx("%s: no delay status??", __func__); retval = 1; break; case CTL_DELAY_STATUS_OK: break; case CTL_DELAY_STATUS_INVALID_LUN: warnx("%s: invalid lun %d", __func__, lun); retval = 1; break; case CTL_DELAY_STATUS_INVALID_TYPE: warnx("%s: invalid delay type %d", __func__, delay_info.delay_type); retval = 1; break; case CTL_DELAY_STATUS_INVALID_LOC: warnx("%s: delay location %s not implemented?", __func__, delayloc); retval = 1; break; case CTL_DELAY_STATUS_NOT_IMPLEMENTED: warnx("%s: delay not implemented in the kernel", __func__); warnx("%s: recompile with the CTL_IO_DELAY flag set", __func__); retval = 1; break; default: warnx("%s: unknown delay return status %d", __func__, delay_info.status); retval = 1; break; } bailout: /* delayloc should never be NULL, but just in case...*/ if (delayloc != NULL) free(delayloc); return (retval); } static int cctl_realsync(int fd, int argc, char **argv) { int syncstate; int retval; char *syncarg; retval = 0; if (argc != 3) { warnx("%s %s takes exactly one argument", argv[0], argv[1]); retval = 1; goto bailout; } syncarg = argv[2]; if (strncasecmp(syncarg, "query", min(strlen(syncarg), strlen("query"))) == 0) { if (ioctl(fd, CTL_REALSYNC_GET, &syncstate) == -1) { warn("%s: CTL_REALSYNC_GET ioctl failed", __func__); retval = 1; goto bailout; } fprintf(stdout, "SYNCHRONIZE CACHE support is: "); switch (syncstate) { case 0: fprintf(stdout, "OFF\n"); break; case 1: fprintf(stdout, "ON\n"); break; default: fprintf(stdout, "unknown (%d)\n", syncstate); break; } goto bailout; } else if (strcasecmp(syncarg, "on") == 0) { syncstate = 1; } else if (strcasecmp(syncarg, "off") == 0) { syncstate = 0; } else { warnx("%s: invalid realsync argument %s", __func__, syncarg); retval = 1; goto bailout; } if (ioctl(fd, CTL_REALSYNC_SET, &syncstate) == -1) { warn("%s: CTL_REALSYNC_SET ioctl failed", __func__); retval = 1; goto bailout; } bailout: return (retval); } static int cctl_getsetsync(int fd, int target, int lun, ctladm_cmdfunction command, int argc, char **argv, char *combinedopt) { struct ctl_sync_info sync_info; uint32_t ioctl_cmd; int sync_interval = -1; int retval; int c; retval = 0; memset(&sync_info, 0, sizeof(sync_info)); sync_info.target_id = target; sync_info.lun_id = lun; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'i': sync_interval = strtoul(optarg, NULL, 0); break; default: break; } } if (command == CTLADM_CMD_SETSYNC) { if (sync_interval == -1) { warnx("%s: you must specify the sync interval with -i", __func__); retval = 1; goto bailout; } sync_info.sync_interval = sync_interval; ioctl_cmd = CTL_SETSYNC; } else { ioctl_cmd = CTL_GETSYNC; } if (ioctl(fd, ioctl_cmd, &sync_info) == -1) { warn("%s: CTL_%sSYNC ioctl failed", __func__, (command == CTLADM_CMD_SETSYNC) ? "SET" : "GET"); retval = 1; goto bailout; } switch (sync_info.status) { case CTL_GS_SYNC_OK: if (command == CTLADM_CMD_GETSYNC) { fprintf(stdout, "%d:%d: sync interval: %d\n", target, lun, sync_info.sync_interval); } break; case CTL_GS_SYNC_NO_LUN: warnx("%s: unknown target:LUN %d:%d", __func__, target, lun); retval = 1; break; case CTL_GS_SYNC_NONE: default: warnx("%s: unknown CTL_%sSYNC status %d", __func__, (command == CTLADM_CMD_SETSYNC) ? "SET" : "GET", sync_info.status); retval = 1; break; } bailout: return (retval); } static struct ctladm_opts cctl_err_types[] = { {"aborted", CTL_LUN_INJ_ABORTED, CTLADM_ARG_NONE, NULL}, {"mediumerr", CTL_LUN_INJ_MEDIUM_ERR, CTLADM_ARG_NONE, NULL}, {"ua", CTL_LUN_INJ_UA, CTLADM_ARG_NONE, NULL}, {"custom", CTL_LUN_INJ_CUSTOM, CTLADM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static struct ctladm_opts cctl_err_patterns[] = { {"read", CTL_LUN_PAT_READ, CTLADM_ARG_NONE, NULL}, {"write", CTL_LUN_PAT_WRITE, CTLADM_ARG_NONE, NULL}, {"rw", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL}, {"readwrite", CTL_LUN_PAT_READWRITE, CTLADM_ARG_NONE, NULL}, {"readcap", CTL_LUN_PAT_READCAP, CTLADM_ARG_NONE, NULL}, {"tur", CTL_LUN_PAT_TUR, CTLADM_ARG_NONE, NULL}, {"any", CTL_LUN_PAT_ANY, CTLADM_ARG_NONE, NULL}, #if 0 {"cmd", CTL_LUN_PAT_CMD, CTLADM_ARG_NONE, NULL}, #endif {NULL, 0, 0, NULL} }; static int cctl_error_inject(int fd, uint32_t target, uint32_t lun, int argc, char **argv, char *combinedopt) { int retval = 0; struct ctl_error_desc err_desc; uint64_t lba = 0; uint32_t len = 0; uint64_t delete_id = 0; int delete_id_set = 0; int continuous = 0; int sense_len = 0; int fd_sense = 0; int c; bzero(&err_desc, sizeof(err_desc)); err_desc.target_id = target; err_desc.lun_id = lun; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': continuous = 1; break; case 'd': delete_id = strtoull(optarg, NULL, 0); delete_id_set = 1; break; case 'i': case 'p': { ctladm_optret optret; ctladm_cmdargs argnum; const char *subopt; if (c == 'i') { ctl_lun_error err_type; if (err_desc.lun_error != CTL_LUN_INJ_NONE) { warnx("%s: can't specify multiple -i " "arguments", __func__); retval = 1; goto bailout; } optret = getoption(cctl_err_types, optarg, &err_type, &argnum, &subopt); err_desc.lun_error = err_type; } else { ctl_lun_error_pattern pattern; optret = getoption(cctl_err_patterns, optarg, &pattern, &argnum, &subopt); err_desc.error_pattern |= pattern; } if (optret == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous argument %s", __func__, optarg); retval = 1; goto bailout; } else if (optret == CC_OR_NOT_FOUND) { warnx("%s: argument %s not found", __func__, optarg); retval = 1; goto bailout; } break; } case 'r': { char *tmpstr, *tmpstr2; tmpstr = strdup(optarg); if (tmpstr == NULL) { warn("%s: error duplicating string %s", __func__, optarg); retval = 1; goto bailout; } tmpstr2 = strsep(&tmpstr, ","); if (tmpstr2 == NULL) { warnx("%s: invalid -r argument %s", __func__, optarg); retval = 1; free(tmpstr); goto bailout; } lba = strtoull(tmpstr2, NULL, 0); tmpstr2 = strsep(&tmpstr, ","); if (tmpstr2 == NULL) { warnx("%s: no len argument for -r lba,len, got" " %s", __func__, optarg); retval = 1; free(tmpstr); goto bailout; } len = strtoul(tmpstr2, NULL, 0); free(tmpstr); break; } case 's': { struct get_hook hook; char *sensestr; sense_len = strtol(optarg, NULL, 0); if (sense_len <= 0) { warnx("invalid number of sense bytes %d", sense_len); retval = 1; goto bailout; } sense_len = MIN(sense_len, SSD_FULL_SIZE); hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; sensestr = cget(&hook, NULL); if ((sensestr != NULL) && (sensestr[0] == '-')) { fd_sense = 1; } else { buff_encode_visit( (uint8_t *)&err_desc.custom_sense, sense_len, sensestr, iget, &hook); } optind += hook.got; break; } default: break; } } if (delete_id_set != 0) { err_desc.serial = delete_id; if (ioctl(fd, CTL_ERROR_INJECT_DELETE, &err_desc) == -1) { warn("%s: error issuing CTL_ERROR_INJECT_DELETE ioctl", __func__); retval = 1; } goto bailout; } if (err_desc.lun_error == CTL_LUN_INJ_NONE) { warnx("%s: error injection command (-i) needed", __func__); retval = 1; goto bailout; } else if ((err_desc.lun_error == CTL_LUN_INJ_CUSTOM) && (sense_len == 0)) { warnx("%s: custom error requires -s", __func__); retval = 1; goto bailout; } if (continuous != 0) err_desc.lun_error |= CTL_LUN_INJ_CONTINUOUS; /* * If fd_sense is set, we need to read the sense data the user * wants returned from stdin. */ if (fd_sense == 1) { ssize_t amt_read; int amt_to_read = sense_len; u_int8_t *buf_ptr = (uint8_t *)&err_desc.custom_sense; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading sense data from stdin"); retval = 1; goto bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (err_desc.error_pattern == CTL_LUN_PAT_NONE) { warnx("%s: command pattern (-p) needed", __func__); retval = 1; goto bailout; } if (len != 0) { err_desc.error_pattern |= CTL_LUN_PAT_RANGE; /* * We could check here to see whether it's a read/write * command, but that will be pointless once we allow * custom patterns. At that point, the user could specify * a READ(6) CDB type, and we wouldn't have an easy way here * to verify whether range checking is possible there. The * user will just figure it out when his error never gets * executed. */ #if 0 if ((err_desc.pattern & CTL_LUN_PAT_READWRITE) == 0) { warnx("%s: need read and/or write pattern if range " "is specified", __func__); retval = 1; goto bailout; } #endif err_desc.lba_range.lba = lba; err_desc.lba_range.len = len; } if (ioctl(fd, CTL_ERROR_INJECT, &err_desc) == -1) { warn("%s: error issuing CTL_ERROR_INJECT ioctl", __func__); retval = 1; } else { printf("Error injection succeeded, serial number is %ju\n", (uintmax_t)err_desc.serial); } bailout: return (retval); } static int cctl_lunlist(int fd) { struct scsi_report_luns_data *lun_data; struct scsi_inquiry_data *inq_data; uint32_t num_luns; int target; int initid; unsigned int i; int retval; retval = 0; inq_data = NULL; target = 6; initid = 7; /* * XXX KDM assuming LUN 0 is fine, but we may need to change this * if we ever acquire the ability to have multiple targets. */ if ((retval = cctl_get_luns(fd, target, /*lun*/ 0, initid, /*retries*/ 2, &lun_data, &num_luns)) != 0) goto bailout; inq_data = malloc(sizeof(*inq_data)); if (inq_data == NULL) { warn("%s: couldn't allocate memory for inquiry data\n", __func__); retval = 1; goto bailout; } for (i = 0; i < num_luns; i++) { char scsi_path[40]; int lun_val; switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: lun_val = lun_data->luns[i].lundata[1]; break; case RPL_LUNDATA_ATYP_FLAT: lun_val = (lun_data->luns[i].lundata[0] & RPL_LUNDATA_FLAT_LUN_MASK) | (lun_data->luns[i].lundata[1] << RPL_LUNDATA_FLAT_LUN_BITS); break; case RPL_LUNDATA_ATYP_LUN: case RPL_LUNDATA_ATYP_EXTLUN: default: fprintf(stdout, "Unsupported LUN format %d\n", lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); lun_val = -1; break; } if (lun_val == -1) continue; if ((retval = cctl_get_inquiry(fd, target, lun_val, initid, /*retries*/ 2, scsi_path, sizeof(scsi_path), inq_data)) != 0) { goto bailout; } printf("%s", scsi_path); scsi_print_inquiry(inq_data); } bailout: if (lun_data != NULL) free(lun_data); if (inq_data != NULL) free(inq_data); return (retval); } static void cctl_cfi_mt_statusstr(cfi_mt_status status, char *str, int str_len) { switch (status) { case CFI_MT_PORT_OFFLINE: snprintf(str, str_len, "Port Offline"); break; case CFI_MT_ERROR: snprintf(str, str_len, "Error"); break; case CFI_MT_SUCCESS: snprintf(str, str_len, "Success"); break; case CFI_MT_NONE: snprintf(str, str_len, "None??"); break; default: snprintf(str, str_len, "Unknown status: %d", status); break; } } static void cctl_cfi_bbr_statusstr(cfi_bbrread_status status, char *str, int str_len) { switch (status) { case CFI_BBR_SUCCESS: snprintf(str, str_len, "Success"); break; case CFI_BBR_LUN_UNCONFIG: snprintf(str, str_len, "LUN not configured"); break; case CFI_BBR_NO_LUN: snprintf(str, str_len, "LUN does not exist"); break; case CFI_BBR_NO_MEM: snprintf(str, str_len, "Memory allocation error"); break; case CFI_BBR_BAD_LEN: snprintf(str, str_len, "Length is not a multiple of blocksize"); break; case CFI_BBR_RESERV_CONFLICT: snprintf(str, str_len, "Reservation conflict"); break; case CFI_BBR_LUN_STOPPED: snprintf(str, str_len, "LUN is powered off"); break; case CFI_BBR_LUN_OFFLINE_CTL: snprintf(str, str_len, "LUN is offline"); break; case CFI_BBR_LUN_OFFLINE_RC: snprintf(str, str_len, "RAIDCore array is offline (double " "failure?)"); break; case CFI_BBR_SCSI_ERROR: snprintf(str, str_len, "SCSI Error"); break; case CFI_BBR_ERROR: snprintf(str, str_len, "Error"); break; default: snprintf(str, str_len, "Unknown status: %d", status); break; } } static int cctl_hardstopstart(int fd, ctladm_cmdfunction command) { struct ctl_hard_startstop_info hs_info; char error_str[256]; int do_start; int retval; retval = 0; if (command == CTLADM_CMD_HARDSTART) do_start = 1; else do_start = 0; if (ioctl(fd, (do_start == 1) ? CTL_HARD_START : CTL_HARD_STOP, &hs_info) == -1) { warn("%s: CTL_HARD_%s ioctl failed", __func__, (do_start == 1) ? "START" : "STOP"); retval = 1; goto bailout; } fprintf(stdout, "Hard %s Status: ", (command == CTLADM_CMD_HARDSTOP) ? "Stop" : "Start"); cctl_cfi_mt_statusstr(hs_info.status, error_str, sizeof(error_str)); fprintf(stdout, "%s\n", error_str); fprintf(stdout, "Total LUNs: %d\n", hs_info.total_luns); fprintf(stdout, "LUNs complete: %d\n", hs_info.luns_complete); fprintf(stdout, "LUNs failed: %d\n", hs_info.luns_failed); bailout: return (retval); } static int cctl_bbrread(int fd, int target __unused, int lun, int iid __unused, int argc, char **argv, char *combinedopt) { struct ctl_bbrread_info bbr_info; char error_str[256]; int datalen = -1; uint64_t lba = 0; int lba_set = 0; int retval; int c; retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'd': datalen = strtoul(optarg, NULL, 0); break; case 'l': lba = strtoull(optarg, NULL, 0); lba_set = 1; break; default: break; } } if (lba_set == 0) { warnx("%s: you must specify an LBA with -l", __func__); retval = 1; goto bailout; } if (datalen == -1) { warnx("%s: you must specify a length with -d", __func__); retval = 1; goto bailout; } bbr_info.lun_num = lun; bbr_info.lba = lba; /* * XXX KDM get the blocksize first?? */ if ((datalen % 512) != 0) { warnx("%s: data length %d is not a multiple of 512 bytes", __func__, datalen); retval = 1; goto bailout; } bbr_info.len = datalen; if (ioctl(fd, CTL_BBRREAD, &bbr_info) == -1) { warn("%s: CTL_BBRREAD ioctl failed", __func__); retval = 1; goto bailout; } cctl_cfi_mt_statusstr(bbr_info.status, error_str, sizeof(error_str)); fprintf(stdout, "BBR Read Overall Status: %s\n", error_str); cctl_cfi_bbr_statusstr(bbr_info.bbr_status, error_str, sizeof(error_str)); fprintf(stdout, "BBR Read Status: %s\n", error_str); /* * XXX KDM should we bother printing out SCSI status if we get * CFI_BBR_SCSI_ERROR back? * * Return non-zero if this fails? */ bailout: return (retval); } static int cctl_startup_shutdown(int fd, int target, int lun, int iid, ctladm_cmdfunction command) { union ctl_io *io; struct ctl_id id; struct scsi_report_luns_data *lun_data; struct scsi_inquiry_data *inq_data; uint32_t num_luns; unsigned int i; int retval; retval = 0; inq_data = NULL; /* * - report luns * - step through each lun, do an inquiry * - check OOA queue on direct access luns * - send stop with offline bit to each direct access device with a * clear OOA queue * - if we get a reservation conflict, reset the LUN to clear it * and reissue the stop with the offline bit set */ id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } if ((retval = cctl_get_luns(fd, target, lun, iid, /*retries*/ 2, &lun_data, &num_luns)) != 0) goto bailout; inq_data = malloc(sizeof(*inq_data)); if (inq_data == NULL) { warn("%s: couldn't allocate memory for inquiry data\n", __func__); retval = 1; goto bailout; } for (i = 0; i < num_luns; i++) { char scsi_path[40]; int lun_val; /* * XXX KDM figure out a way to share this code with * cctl_lunlist()? */ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: lun_val = lun_data->luns[i].lundata[1]; break; case RPL_LUNDATA_ATYP_FLAT: lun_val = (lun_data->luns[i].lundata[0] & RPL_LUNDATA_FLAT_LUN_MASK) | (lun_data->luns[i].lundata[1] << RPL_LUNDATA_FLAT_LUN_BITS); break; case RPL_LUNDATA_ATYP_LUN: case RPL_LUNDATA_ATYP_EXTLUN: default: fprintf(stdout, "Unsupported LUN format %d\n", lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); lun_val = -1; break; } if (lun_val == -1) continue; if ((retval = cctl_get_inquiry(fd, target, lun_val, iid, /*retries*/ 2, scsi_path, sizeof(scsi_path), inq_data)) != 0) { goto bailout; } printf("%s", scsi_path); scsi_print_inquiry(inq_data); /* * We only want to shutdown direct access devices. */ if (SID_TYPE(inq_data) != T_DIRECT) { printf("%s LUN is not direct access, skipped\n", scsi_path); continue; } if (command == CTLADM_CMD_SHUTDOWN) { struct ctl_ooa_info ooa_info; ooa_info.target_id = target; ooa_info.lun_id = lun_val; if (ioctl(fd, CTL_CHECK_OOA, &ooa_info) == -1) { printf("%s CTL_CHECK_OOA ioctl failed\n", scsi_path); continue; } if (ooa_info.status != CTL_OOA_SUCCESS) { printf("%s CTL_CHECK_OOA returned status %d\n", scsi_path, ooa_info.status); continue; } if (ooa_info.num_entries != 0) { printf("%s %d entr%s in the OOA queue, " "skipping shutdown\n", scsi_path, ooa_info.num_entries, (ooa_info.num_entries > 1)?"ies" : "y" ); continue; } } ctl_scsi_start_stop(/*io*/ io, /*start*/(command == CTLADM_CMD_STARTUP) ? 1 : 0, /*load_eject*/ 0, /*immediate*/ 0, /*power_conditions*/ SSS_PC_START_VALID, /*onoffline*/ 1, /*ctl_tag_type*/ (command == CTLADM_CMD_STARTUP) ? CTL_TAG_SIMPLE :CTL_TAG_ORDERED, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun_val; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, /*retries*/ 3, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ctl_io_error_print(io, inq_data, stderr); else { printf("%s LUN is now %s\n", scsi_path, (command == CTLADM_CMD_STARTUP) ? "online" : "offline"); } } bailout: if (lun_data != NULL) free(lun_data); if (inq_data != NULL) free(inq_data); if (io != NULL) ctl_scsi_free_io(io); return (retval); } static int cctl_sync_cache(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; struct ctl_id id; int cdb_size = -1; int retval; uint64_t our_lba = 0; uint32_t our_block_count = 0; int reladr = 0, immed = 0; int c; id.id = iid; retval = 0; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': our_block_count = strtoul(optarg, NULL, 0); break; case 'c': cdb_size = strtol(optarg, NULL, 0); break; case 'i': immed = 1; break; case 'l': our_lba = strtoull(optarg, NULL, 0); break; case 'r': reladr = 1; break; default: break; } } if (cdb_size != -1) { switch (cdb_size) { case 10: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 10 " "and 16", __func__, cdb_size); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdb_size = 10; ctl_scsi_sync_cache(/*io*/ io, /*immed*/ immed, /*reladr*/ reladr, /*minimum_cdb_size*/ cdb_size, /*starting_lba*/ our_lba, /*block_count*/ our_block_count, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { fprintf(stdout, "Cache synchronized successfully\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_start_stop(int fd, int target, int lun, int iid, int retries, int start, int argc, char **argv, char *combinedopt) { union ctl_io *io; struct ctl_id id; char scsi_path[40]; int immed = 0, onoffline = 0; int retval, c; id.id = iid; retval = 0; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'i': immed = 1; break; case 'o': onoffline = 1; break; default: break; } } /* * Use an ordered tag for the stop command, to guarantee that any * pending I/O will finish before the stop command executes. This * would normally be the case anyway, since CTL will basically * treat the start/stop command as an ordered command with respect * to any other command except an INQUIRY. (See ctl_ser_table.c.) */ ctl_scsi_start_stop(/*io*/ io, /*start*/ start, /*load_eject*/ 0, /*immediate*/ immed, /*power_conditions*/ SSS_PC_START_VALID, /*onoffline*/ onoffline, /*ctl_tag_type*/ start ? CTL_TAG_SIMPLE : CTL_TAG_ORDERED, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path)); if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { fprintf(stdout, "%s LUN %s successfully\n", scsi_path, (start) ? "started" : "stopped"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_mode_sense(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; struct ctl_id id; uint32_t datalen; uint8_t *dataptr; int pc = -1, cdbsize, retval, dbd = 0, subpage = -1; int list = 0; int page_code = -1; int c; id.id = iid; cdbsize = 0; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'P': pc = strtoul(optarg, NULL, 0); break; case 'S': subpage = strtoul(optarg, NULL, 0); break; case 'd': dbd = 1; break; case 'l': list = 1; break; case 'm': page_code = strtoul(optarg, NULL, 0); break; case 'c': cdbsize = strtol(optarg, NULL, 0); break; default: break; } } if (((list == 0) && (page_code == -1)) || ((list != 0) && (page_code != -1))) { warnx("%s: you must specify either a page code (-m) or -l", __func__); retval = 1; goto bailout; } if ((page_code != -1) && ((page_code > SMS_ALL_PAGES_PAGE) || (page_code < 0))) { warnx("%s: page code %d is out of range", __func__, page_code); retval = 1; goto bailout; } if (list == 1) { page_code = SMS_ALL_PAGES_PAGE; if (pc != -1) { warnx("%s: arg -P makes no sense with -l", __func__); retval = 1; goto bailout; } if (subpage != -1) { warnx("%s: arg -S makes no sense with -l", __func__); retval = 1; goto bailout; } } if (pc == -1) pc = SMS_PAGE_CTRL_CURRENT; else { if ((pc > 3) || (pc < 0)) { warnx("%s: page control value %d is out of range: 0-3", __func__, pc); retval = 1; goto bailout; } } if ((subpage != -1) && ((subpage > 255) || (subpage < 0))) { warnx("%s: subpage code %d is out of range: 0-255", __func__, subpage); retval = 1; goto bailout; } if (cdbsize != 0) { switch (cdbsize) { case 6: case 10: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 6 " "and 10", __func__, cdbsize); retval = 1; goto bailout; break; } } else cdbsize = 6; if (subpage == -1) subpage = 0; if (cdbsize == 6) datalen = 255; else datalen = 65535; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_mode_sense(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*dbd*/ dbd, /*llbaa*/ 0, /*page_code*/ page_code, /*pc*/ pc << 6, /*subpage*/ subpage, /*minimum_cdb_size*/ cdbsize, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int pages_len, used_len; uint32_t returned_len; uint8_t *ndataptr; if (io->scsiio.cdb[0] == MODE_SENSE_6) { struct scsi_mode_hdr_6 *hdr6; int bdlen; hdr6 = (struct scsi_mode_hdr_6 *)dataptr; returned_len = hdr6->datalen + 1; bdlen = hdr6->block_descr_len; ndataptr = (uint8_t *)((uint8_t *)&hdr6[1] + bdlen); } else { struct scsi_mode_hdr_10 *hdr10; int bdlen; hdr10 = (struct scsi_mode_hdr_10 *)dataptr; returned_len = scsi_2btoul(hdr10->datalen) + 2; bdlen = scsi_2btoul(hdr10->block_descr_len); ndataptr = (uint8_t *)((uint8_t *)&hdr10[1] + bdlen); } /* just in case they can give us more than we allocated for */ returned_len = min(returned_len, datalen); pages_len = returned_len - (ndataptr - dataptr); #if 0 fprintf(stdout, "returned_len = %d, pages_len = %d\n", returned_len, pages_len); #endif if (list == 1) { fprintf(stdout, "Supported mode pages:\n"); for (used_len = 0; used_len < pages_len;) { struct scsi_mode_page_header *header; header = (struct scsi_mode_page_header *) &ndataptr[used_len]; fprintf(stdout, "%d\n", header->page_code); used_len += header->page_length + 2; } } else { for (used_len = 0; used_len < pages_len; used_len++) { fprintf(stdout, "0x%x ", ndataptr[used_len]); if (((used_len+1) % 16) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_read_capacity(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt) { union ctl_io *io; struct ctl_id id; struct scsi_read_capacity_data *data; struct scsi_read_capacity_data_long *longdata; int cdbsize = -1, retval; uint8_t *dataptr; int c; cdbsize = 10; dataptr = NULL; retval = 0; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory\n", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': cdbsize = strtol(optarg, NULL, 0); break; default: break; } } if (cdbsize != -1) { switch (cdbsize) { case 10: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 10 " "and 16", __func__, cdbsize); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdbsize = 10; dataptr = (uint8_t *)malloc(sizeof(*longdata)); if (dataptr == NULL) { warn("%s: can't allocate %zd bytes\n", __func__, sizeof(*longdata)); retval = 1; goto bailout; } memset(dataptr, 0, sizeof(*longdata)); retry: switch (cdbsize) { case 10: ctl_scsi_read_capacity(io, /*data_ptr*/ dataptr, /*data_len*/ sizeof(*longdata), /*addr*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); break; case 16: ctl_scsi_read_capacity_16(io, /*data_ptr*/ dataptr, /*data_len*/ sizeof(*longdata), /*addr*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); break; } io->io_hdr.nexus.initid = id; io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { uint64_t maxlba; uint32_t blocksize; if (cdbsize == 10) { data = (struct scsi_read_capacity_data *)dataptr; maxlba = scsi_4btoul(data->addr); blocksize = scsi_4btoul(data->length); if (maxlba == 0xffffffff) { cdbsize = 16; goto retry; } } else { longdata=(struct scsi_read_capacity_data_long *)dataptr; maxlba = scsi_8btou64(longdata->addr); blocksize = scsi_4btoul(longdata->length); } fprintf(stdout, "Disk Capacity: %ju, Blocksize: %d\n", (uintmax_t)maxlba, blocksize); } else { ctl_io_error_print(io, NULL, stderr); } bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_read_write(int fd, int target, int lun, int iid, int retries, int argc, char **argv, char *combinedopt, ctladm_cmdfunction command) { union ctl_io *io; struct ctl_id id; int file_fd, do_stdio; int cdbsize = -1, databytes; uint8_t *dataptr; char *filename = NULL; int datalen = -1, blocksize = -1; uint64_t lba = 0; int lba_set = 0; int retval; int c; retval = 0; do_stdio = 0; dataptr = NULL; file_fd = -1; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory\n", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': io->io_hdr.flags |= CTL_FLAG_NO_DATAMOVE; break; case 'b': blocksize = strtoul(optarg, NULL, 0); break; case 'c': cdbsize = strtoul(optarg, NULL, 0); break; case 'd': datalen = strtoul(optarg, NULL, 0); break; case 'f': filename = strdup(optarg); break; case 'l': lba = strtoull(optarg, NULL, 0); lba_set = 1; break; default: break; } } if (filename == NULL) { warnx("%s: you must supply a filename using -f", __func__); retval = 1; goto bailout; } if (datalen == -1) { warnx("%s: you must specify the data length with -d", __func__); retval = 1; goto bailout; } if (lba_set == 0) { warnx("%s: you must specify the LBA with -l", __func__); retval = 1; goto bailout; } if (blocksize == -1) { warnx("%s: you must specify the blocksize with -b", __func__); retval = 1; goto bailout; } if (cdbsize != -1) { switch (cdbsize) { case 6: case 10: case 12: case 16: break; default: warnx("%s: invalid cdbsize %d, valid sizes are 6, " "10, 12 or 16", __func__, cdbsize); retval = 1; goto bailout; break; /* NOTREACHED */ } } else cdbsize = 6; databytes = datalen * blocksize; dataptr = (uint8_t *)malloc(databytes); if (dataptr == NULL) { warn("%s: can't allocate %d bytes\n", __func__, databytes); retval = 1; goto bailout; } if (strcmp(filename, "-") == 0) { if (command == CTLADM_CMD_READ) file_fd = STDOUT_FILENO; else file_fd = STDIN_FILENO; do_stdio = 1; } else { file_fd = open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (file_fd == -1) { warn("%s: can't open file %s", __func__, filename); retval = 1; goto bailout; } } memset(dataptr, 0, databytes); if (command == CTLADM_CMD_WRITE) { int bytes_read; bytes_read = read(file_fd, dataptr, databytes); if (bytes_read == -1) { warn("%s: error reading file %s", __func__, filename); retval = 1; goto bailout; } if (bytes_read != databytes) { warnx("%s: only read %d bytes from file %s", __func__, bytes_read, filename); retval = 1; goto bailout; } } ctl_scsi_read_write(io, /*data_ptr*/ dataptr, /*data_len*/ databytes, /*read_op*/ (command == CTLADM_CMD_READ) ? 1 : 0, /*byte2*/ 0, /*minimum_cdb_size*/ cdbsize, /*lba*/ lba, /*num_blocks*/ datalen, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if (((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && (command == CTLADM_CMD_READ)) { int bytes_written; bytes_written = write(file_fd, dataptr, databytes); if (bytes_written == -1) { warn("%s: can't write to %s", __func__, filename); goto bailout; } } else if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); if ((do_stdio == 0) && (file_fd != -1)) close(file_fd); return (retval); } static int cctl_get_luns(int fd, int target, int lun, int iid, int retries, struct scsi_report_luns_data **lun_data, uint32_t *num_luns) { union ctl_io *io; struct ctl_id id; uint32_t nluns; int lun_datalen; int retval; retval = 0; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("%s: can't allocate memory", __func__); return (1); } /* * lun_data includes space for 1 lun, allocate space for 4 initially. * If that isn't enough, we'll allocate more. */ nluns = 4; retry: lun_datalen = sizeof(*lun_data) + (nluns * sizeof(struct scsi_report_luns_lundata)); *lun_data = malloc(lun_datalen); if (*lun_data == NULL) { warnx("%s: can't allocate memory", __func__); ctl_scsi_free_io(io); return (1); } ctl_scsi_report_luns(io, /*data_ptr*/ (uint8_t *)*lun_data, /*data_len*/ lun_datalen, /*select_report*/ RPL_REPORT_ALL, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.initid = id; io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { uint32_t returned_len, returned_luns; returned_len = scsi_4btoul((*lun_data)->length); returned_luns = returned_len / 8; if (returned_luns > nluns) { nluns = returned_luns; free(*lun_data); goto retry; } /* These should be the same */ *num_luns = MIN(returned_luns, nluns); } else { ctl_io_error_print(io, NULL, stderr); retval = 1; } bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_report_luns(int fd, int target, int lun, int iid, int retries) { struct scsi_report_luns_data *lun_data; uint32_t num_luns, i; int retval; lun_data = NULL; if ((retval = cctl_get_luns(fd, target, lun, iid, retries, &lun_data, &num_luns)) != 0) goto bailout; fprintf(stdout, "%u LUNs returned\n", num_luns); for (i = 0; i < num_luns; i++) { int lun_val; /* * XXX KDM figure out a way to share this code with * cctl_lunlist()? */ switch (lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: lun_val = lun_data->luns[i].lundata[1]; break; case RPL_LUNDATA_ATYP_FLAT: lun_val = (lun_data->luns[i].lundata[0] & RPL_LUNDATA_FLAT_LUN_MASK) | (lun_data->luns[i].lundata[1] << RPL_LUNDATA_FLAT_LUN_BITS); break; case RPL_LUNDATA_ATYP_LUN: case RPL_LUNDATA_ATYP_EXTLUN: default: fprintf(stdout, "Unsupported LUN format %d\n", lun_data->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); lun_val = -1; break; } if (lun_val == -1) continue; fprintf(stdout, "%d\n", lun_val); } bailout: if (lun_data != NULL) free(lun_data); return (retval); } static int cctl_tur(int fd, int target, int lun, int iid, int retries) { union ctl_io *io; struct ctl_id id; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { fprintf(stderr, "can't allocate memory\n"); return (1); } ctl_scsi_tur(io, /* tag_type */ CTL_TAG_SIMPLE, /* control */ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { ctl_scsi_free_io(io); return (1); } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) fprintf(stdout, "Unit is ready\n"); else ctl_io_error_print(io, NULL, stderr); return (0); } static int cctl_get_inquiry(int fd, int target, int lun, int iid, int retries, char *path_str, int path_len, struct scsi_inquiry_data *inq_data) { union ctl_io *io; struct ctl_id id; int retval; retval = 0; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("cctl_inquiry: can't allocate memory\n"); return (1); } ctl_scsi_inquiry(/*io*/ io, /*data_ptr*/ (uint8_t *)inq_data, /*data_len*/ sizeof(*inq_data), /*byte2*/ 0, /*page_code*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) { retval = 1; ctl_io_error_print(io, NULL, stderr); } else if (path_str != NULL) ctl_scsi_path_string(io, path_str, path_len); bailout: ctl_scsi_free_io(io); return (retval); } static int cctl_inquiry(int fd, int target, int lun, int iid, int retries) { struct scsi_inquiry_data *inq_data; char scsi_path[40]; int retval; retval = 0; inq_data = malloc(sizeof(*inq_data)); if (inq_data == NULL) { warnx("%s: can't allocate inquiry data", __func__); retval = 1; goto bailout; } if ((retval = cctl_get_inquiry(fd, target, lun, iid, retries, scsi_path, sizeof(scsi_path), inq_data)) != 0) goto bailout; printf("%s", scsi_path); scsi_print_inquiry(inq_data); bailout: if (inq_data != NULL) free(inq_data); return (retval); } static int cctl_req_sense(int fd, int target, int lun, int iid, int retries) { union ctl_io *io; struct scsi_sense_data *sense_data; struct ctl_id id; int retval; retval = 0; id.id = iid; io = ctl_scsi_alloc_io(id); if (io == NULL) { warnx("cctl_req_sense: can't allocate memory\n"); return (1); } sense_data = malloc(sizeof(*sense_data)); memset(sense_data, 0, sizeof(*sense_data)); ctl_scsi_request_sense(/*io*/ io, /*data_ptr*/ (uint8_t *)sense_data, /*data_len*/ sizeof(*sense_data), /*byte2*/ 0, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retries, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { bcopy(sense_data, &io->scsiio.sense_data, sizeof(*sense_data)); io->scsiio.sense_len = sizeof(*sense_data); ctl_scsi_sense_print(&io->scsiio, NULL, stdout); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); free(sense_data); return (retval); } static int cctl_report_target_port_group(int fd, int target, int lun, int initiator) { union ctl_io *io; struct ctl_id id; uint32_t datalen; uint8_t *dataptr; int retval; id.id = initiator; dataptr = NULL; retval = 0; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } datalen = 64; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_maintenance_in(/*io*/ io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ SA_RPRT_TRGT_GRP, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, 0, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; returned_len = scsi_4btoul(&dataptr[0]) + 4; for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_inquiry_vpd_devid(int fd, int target, int lun, int initiator) { union ctl_io *io; struct ctl_id id; uint32_t datalen; uint8_t *dataptr; int retval; id.id = initiator; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } datalen = 256; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_inquiry(/*io*/ io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*byte2*/ SI_EVPD, /*page_code*/ SVPD_DEVICE_ID, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, 0, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; returned_len = scsi_2btoul(&dataptr[2]) + 4; for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_persistent_reserve_in(int fd, int target, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count) { union ctl_io *io; struct ctl_id id; uint32_t datalen; uint8_t *dataptr; int action = -1; int retval; int c; id.id = initiator; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': action = strtol(optarg, NULL, 0); break; default: break; } } if (action < 0 || action > 2) { warn("action must be specified and in the range: 0-2"); retval = 1; goto bailout; } datalen = 256; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_persistent_res_in(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ action, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retry_count, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { int returned_len, used_len; returned_len = 0; switch (action) { case 0: returned_len = scsi_4btoul(&dataptr[4]) + 8; returned_len = min(returned_len, 256); break; case 1: returned_len = scsi_4btoul(&dataptr[4]) + 8; break; case 2: returned_len = 8; break; default: warnx("%s: invalid action %d", __func__, action); goto bailout; break; /* NOTREACHED */ } for (used_len = 0; used_len < returned_len; used_len++) { fprintf(stdout, "0x%02x ", dataptr[used_len]); if (((used_len+1) % 8) == 0) fprintf(stdout, "\n"); } fprintf(stdout, "\n"); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } static int cctl_persistent_reserve_out(int fd, int target, int lun, int initiator, int argc, char **argv, char *combinedopt, int retry_count) { union ctl_io *io; struct ctl_id id; uint32_t datalen; uint64_t key = 0, sa_key = 0; int action = -1, restype = -1; uint8_t *dataptr; int retval; int c; id.id = initiator; retval = 0; dataptr = NULL; io = ctl_scsi_alloc_io(id); if (io == NULL) { warn("%s: can't allocate memory", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': action = strtol(optarg, NULL, 0); break; case 'k': key = strtoull(optarg, NULL, 0); break; case 'r': restype = strtol(optarg, NULL, 0); break; case 's': sa_key = strtoull(optarg, NULL, 0); break; default: break; } } if (action < 0 || action > 5) { warn("action must be specified and in the range: 0-5"); retval = 1; goto bailout; } if (restype < 0 || restype > 5) { if (action != 0 && action != 5 && action != 3) { warn("'restype' must specified and in the range: 0-5"); retval = 1; goto bailout; } } datalen = 24; dataptr = (uint8_t *)malloc(datalen); if (dataptr == NULL) { warn("%s: can't allocate %d bytes", __func__, datalen); retval = 1; goto bailout; } memset(dataptr, 0, datalen); ctl_scsi_persistent_res_out(io, /*data_ptr*/ dataptr, /*data_len*/ datalen, /*action*/ action, /*type*/ restype, /*key*/ key, /*sa key*/ sa_key, /*tag_type*/ CTL_TAG_SIMPLE, /*control*/ 0); io->io_hdr.nexus.targ_target.id = target; io->io_hdr.nexus.targ_lun = lun; io->io_hdr.nexus.initid = id; if (cctl_do_io(fd, retry_count, io, __func__) != 0) { retval = 1; goto bailout; } if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) { char scsi_path[40]; ctl_scsi_path_string(io, scsi_path, sizeof(scsi_path)); fprintf( stdout, "%sPERSISTENT RESERVE OUT executed " "successfully\n", scsi_path); } else ctl_io_error_print(io, NULL, stderr); bailout: ctl_scsi_free_io(io); if (dataptr != NULL) free(dataptr); return (retval); } struct cctl_req_option { char *name; int namelen; char *value; int vallen; STAILQ_ENTRY(cctl_req_option) links; }; static int cctl_create_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; int device_type = -1; uint64_t lun_size = 0; uint32_t blocksize = 0, req_lun_id = 0; char *serial_num = NULL; char *device_id = NULL; int lun_size_set = 0, blocksize_set = 0, lun_id_set = 0; char *backend_name = NULL; STAILQ_HEAD(, cctl_req_option) option_list; int num_options = 0; int retval = 0, c; STAILQ_INIT(&option_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'B': blocksize = strtoul(optarg, NULL, 0); blocksize_set = 1; break; case 'd': device_id = strdup(optarg); break; case 'l': req_lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 'o': { struct cctl_req_option *option; char *tmpstr; char *name, *value; tmpstr = strdup(optarg); name = strsep(&tmpstr, "="); if (name == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } value = strsep(&tmpstr, "="); if (value == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } option = malloc(sizeof(*option)); if (option == NULL) { warn("%s: error allocating %zd bytes", __func__, sizeof(*option)); retval = 1; goto bailout; } option->name = strdup(name); option->namelen = strlen(name) + 1; option->value = strdup(value); option->vallen = strlen(value) + 1; free(tmpstr); STAILQ_INSERT_TAIL(&option_list, option, links); num_options++; break; } case 's': if (strcasecmp(optarg, "auto") != 0) { retval = expand_number(optarg, &lun_size); if (retval != 0) { warn("%s: invalid -s argument", __func__); retval = 1; goto bailout; } } lun_size_set = 1; break; case 'S': serial_num = strdup(optarg); break; case 't': device_type = strtoul(optarg, NULL, 0); break; default: break; } } if (backend_name == NULL) { warnx("%s: backend name (-b) must be specified", __func__); retval = 1; goto bailout; } bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_CREATE; if (blocksize_set != 0) req.reqdata.create.blocksize_bytes = blocksize; if (lun_size_set != 0) req.reqdata.create.lun_size_bytes = lun_size; if (lun_id_set != 0) { req.reqdata.create.flags |= CTL_LUN_FLAG_ID_REQ; req.reqdata.create.req_lun_id = req_lun_id; } req.reqdata.create.flags |= CTL_LUN_FLAG_DEV_TYPE; if (device_type != -1) req.reqdata.create.device_type = device_type; else req.reqdata.create.device_type = T_DIRECT; if (serial_num != NULL) { strlcpy(req.reqdata.create.serial_num, serial_num, sizeof(req.reqdata.create.serial_num)); req.reqdata.create.flags |= CTL_LUN_FLAG_SERIAL_NUM; } if (device_id != NULL) { strlcpy(req.reqdata.create.device_id, device_id, sizeof(req.reqdata.create.device_id)); req.reqdata.create.flags |= CTL_LUN_FLAG_DEVID; } req.num_be_args = num_options; if (num_options > 0) { struct cctl_req_option *option, *next_option; int i; req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { warn("%s: error allocating %zd bytes", __func__, num_options * sizeof(*req.be_args)); retval = 1; goto bailout; } for (i = 0, option = STAILQ_FIRST(&option_list); i < num_options; i++, option = next_option) { next_option = STAILQ_NEXT(option, links); req.be_args[i].namelen = option->namelen; req.be_args[i].name = strdup(option->name); req.be_args[i].vallen = option->vallen; req.be_args[i].value = strdup(option->value); /* * XXX KDM do we want a way to specify a writeable * flag of some sort? Do we want a way to specify * binary data? */ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD; STAILQ_REMOVE(&option_list, option, cctl_req_option, links); free(option->name); free(option->value); free(option); } } if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN creation error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN creation warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN creation status: %d", req.status); retval = 1; goto bailout; } fprintf(stdout, "LUN created successfully\n"); fprintf(stdout, "backend: %s\n", req.backend); fprintf(stdout, "device type: %d\n",req.reqdata.create.device_type); fprintf(stdout, "LUN size: %ju bytes\n", (uintmax_t)req.reqdata.create.lun_size_bytes); fprintf(stdout, "blocksize %u bytes\n", req.reqdata.create.blocksize_bytes); fprintf(stdout, "LUN ID: %d\n", req.reqdata.create.req_lun_id); fprintf(stdout, "Serial Number: %s\n", req.reqdata.create.serial_num); fprintf(stdout, "Device ID; %s\n", req.reqdata.create.device_id); bailout: return (retval); } static int cctl_rm_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; uint32_t lun_id = 0; int lun_id_set = 0; char *backend_name = NULL; STAILQ_HEAD(, cctl_req_option) option_list; int num_options = 0; int retval = 0, c; STAILQ_INIT(&option_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'l': lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 'o': { struct cctl_req_option *option; char *tmpstr; char *name, *value; tmpstr = strdup(optarg); name = strsep(&tmpstr, "="); if (name == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } value = strsep(&tmpstr, "="); if (value == NULL) { warnx("%s: option -o takes \"name=value\"" "argument", __func__); retval = 1; goto bailout; } option = malloc(sizeof(*option)); if (option == NULL) { warn("%s: error allocating %zd bytes", __func__, sizeof(*option)); retval = 1; goto bailout; } option->name = strdup(name); option->namelen = strlen(name) + 1; option->value = strdup(value); option->vallen = strlen(value) + 1; free(tmpstr); STAILQ_INSERT_TAIL(&option_list, option, links); num_options++; break; } default: break; } } if (backend_name == NULL) errx(1, "%s: backend name (-b) must be specified", __func__); if (lun_id_set == 0) errx(1, "%s: LUN id (-l) must be specified", __func__); bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_RM; req.reqdata.rm.lun_id = lun_id; req.num_be_args = num_options; if (num_options > 0) { struct cctl_req_option *option, *next_option; int i; req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { warn("%s: error allocating %zd bytes", __func__, num_options * sizeof(*req.be_args)); retval = 1; goto bailout; } for (i = 0, option = STAILQ_FIRST(&option_list); i < num_options; i++, option = next_option) { next_option = STAILQ_NEXT(option, links); req.be_args[i].namelen = option->namelen; req.be_args[i].name = strdup(option->name); req.be_args[i].vallen = option->vallen; req.be_args[i].value = strdup(option->value); /* * XXX KDM do we want a way to specify a writeable * flag of some sort? Do we want a way to specify * binary data? */ req.be_args[i].flags = CTL_BEARG_ASCII | CTL_BEARG_RD; STAILQ_REMOVE(&option_list, option, cctl_req_option, links); free(option->name); free(option->value); free(option); } } if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN removal error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN removal warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN removal status: %d", req.status); retval = 1; goto bailout; } printf("LUN %d removed successfully\n", lun_id); bailout: return (retval); } static int cctl_modify_lun(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_req req; uint64_t lun_size = 0; uint32_t lun_id = 0; int lun_id_set = 0, lun_size_set = 0; char *backend_name = NULL; int retval = 0, c; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend_name = strdup(optarg); break; case 'l': lun_id = strtoul(optarg, NULL, 0); lun_id_set = 1; break; case 's': if (strcasecmp(optarg, "auto") != 0) { retval = expand_number(optarg, &lun_size); if (retval != 0) { warn("%s: invalid -s argument", __func__); retval = 1; goto bailout; } } lun_size_set = 1; break; default: break; } } if (backend_name == NULL) errx(1, "%s: backend name (-b) must be specified", __func__); if (lun_id_set == 0) errx(1, "%s: LUN id (-l) must be specified", __func__); if (lun_size_set == 0) errx(1, "%s: size (-s) must be specified", __func__); bzero(&req, sizeof(req)); strlcpy(req.backend, backend_name, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_MODIFY; req.reqdata.modify.lun_id = lun_id; req.reqdata.modify.lun_size_bytes = lun_size; if (ioctl(fd, CTL_LUN_REQ, &req) == -1) { warn("%s: error issuing CTL_LUN_REQ ioctl", __func__); retval = 1; goto bailout; } switch (req.status) { case CTL_LUN_ERROR: warnx("LUN modification error: %s", req.error_str); retval = 1; goto bailout; case CTL_LUN_WARNING: warnx("LUN modification warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: warnx("unknown LUN modification status: %d", req.status); retval = 1; goto bailout; } printf("LUN %d modified successfully\n", lun_id); bailout: return (retval); } struct cctl_islist_conn { int connection_id; char *initiator; char *initiator_addr; char *initiator_alias; char *target; char *target_alias; char *header_digest; char *data_digest; char *max_data_segment_length;; int immediate_data; int iser; STAILQ_ENTRY(cctl_islist_conn) links; }; struct cctl_islist_data { int num_conns; STAILQ_HEAD(,cctl_islist_conn) conn_list; struct cctl_islist_conn *cur_conn; int level; struct sbuf *cur_sb[32]; }; static void cctl_islist_start_element(void *user_data, const char *name, const char **attr) { int i; struct cctl_islist_data *islist; struct cctl_islist_conn *cur_conn; islist = (struct cctl_islist_data *)user_data; cur_conn = islist->cur_conn; islist->level++; if ((u_int)islist->level >= (sizeof(islist->cur_sb) / sizeof(islist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(islist->cur_sb) / sizeof(islist->cur_sb[0])); islist->cur_sb[islist->level] = sbuf_new_auto(); if (islist->cur_sb[islist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); if (strcmp(name, "connection") == 0) { if (cur_conn != NULL) errx(1, "%s: improper connection element nesting", __func__); cur_conn = calloc(1, sizeof(*cur_conn)); if (cur_conn == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_conn)); islist->num_conns++; islist->cur_conn = cur_conn; STAILQ_INSERT_TAIL(&islist->conn_list, cur_conn, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_conn->connection_id = strtoull(attr[i+1], NULL, 0); } else { errx(1, "%s: invalid connection attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_islist_end_element(void *user_data, const char *name) { struct cctl_islist_data *islist; struct cctl_islist_conn *cur_conn; char *str; islist = (struct cctl_islist_data *)user_data; cur_conn = islist->cur_conn; if ((cur_conn == NULL) && (strcmp(name, "ctlislist") != 0)) errx(1, "%s: cur_conn == NULL! (name = %s)", __func__, name); if (islist->cur_sb[islist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, islist->level, name); sbuf_finish(islist->cur_sb[islist->level]); str = strdup(sbuf_data(islist->cur_sb[islist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(islist->cur_sb[islist->level])); sbuf_delete(islist->cur_sb[islist->level]); islist->cur_sb[islist->level] = NULL; islist->level--; if (strcmp(name, "initiator") == 0) { cur_conn->initiator = str; str = NULL; } else if (strcmp(name, "initiator_addr") == 0) { cur_conn->initiator_addr = str; str = NULL; } else if (strcmp(name, "initiator_alias") == 0) { cur_conn->initiator_alias = str; str = NULL; } else if (strcmp(name, "target") == 0) { cur_conn->target = str; str = NULL; } else if (strcmp(name, "target_alias") == 0) { cur_conn->target_alias = str; str = NULL; } else if (strcmp(name, "header_digest") == 0) { cur_conn->header_digest = str; str = NULL; } else if (strcmp(name, "data_digest") == 0) { cur_conn->data_digest = str; str = NULL; } else if (strcmp(name, "max_data_segment_length") == 0) { cur_conn->max_data_segment_length = str; str = NULL; } else if (strcmp(name, "immediate_data") == 0) { cur_conn->immediate_data = atoi(str); } else if (strcmp(name, "iser") == 0) { cur_conn->iser = atoi(str); } else if (strcmp(name, "connection") == 0) { islist->cur_conn = NULL; } else if (strcmp(name, "ctlislist") == 0) { } else errx(1, "unknown element %s", name); free(str); } static void cctl_islist_char_handler(void *user_data, const XML_Char *str, int len) { struct cctl_islist_data *islist; islist = (struct cctl_islist_data *)user_data; sbuf_bcat(islist->cur_sb[islist->level], str, len); } static int cctl_islist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; struct cctl_islist_data islist; struct cctl_islist_conn *conn; XML_Parser parser; char *conn_str; int conn_len; int dump_xml = 0; int c, retval, verbose = 0; retval = 0; conn_len = 4096; bzero(&islist, sizeof(islist)); STAILQ_INIT(&islist.conn_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'v': verbose = 1; break; case 'x': dump_xml = 1; break; default: break; } } retry: conn_str = malloc(conn_len); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_LIST; req.data.list.alloc_len = conn_len; req.data.list.conn_xml = conn_str; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status == CTL_ISCSI_ERROR) { warnx("%s: error returned from CTL_ISCSI ioctl:\n%s", __func__, req.error_str); } else if (req.status == CTL_ISCSI_LIST_NEED_MORE_SPACE) { conn_len = conn_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", conn_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &islist); XML_SetElementHandler(parser, cctl_islist_start_element, cctl_islist_end_element); XML_SetCharacterDataHandler(parser, cctl_islist_char_handler); retval = XML_Parse(parser, conn_str, strlen(conn_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } XML_ParserFree(parser); if (verbose != 0) { STAILQ_FOREACH(conn, &islist.conn_list, links) { printf("Session ID: %d\n", conn->connection_id); printf("Initiator name: %s\n", conn->initiator); printf("Initiator portal: %s\n", conn->initiator_addr); printf("Initiator alias: %s\n", conn->initiator_alias); printf("Target name: %s\n", conn->target); printf("Target alias: %s\n", conn->target_alias); printf("Header digest: %s\n", conn->header_digest); printf("Data digest: %s\n", conn->data_digest); printf("DataSegmentLen: %s\n", conn->max_data_segment_length); printf("ImmediateData: %s\n", conn->immediate_data ? "Yes" : "No"); printf("iSER (RDMA): %s\n", conn->iser ? "Yes" : "No"); printf("\n"); } } else { printf("%4s %-16s %-36s %-36s\n", "ID", "Portal", "Initiator name", "Target name"); STAILQ_FOREACH(conn, &islist.conn_list, links) { printf("%4u %-16s %-36s %-36s\n", conn->connection_id, conn->initiator_addr, conn->initiator, conn->target); } } bailout: free(conn_str); return (retval); } static int cctl_islogout(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; int retval = 0, c; int all = 0, connection_id = -1, nargs = 0; char *initiator_name = NULL, *initiator_addr = NULL; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': all = 1; nargs++; break; case 'c': connection_id = strtoul(optarg, NULL, 0); nargs++; break; case 'i': initiator_name = strdup(optarg); if (initiator_name == NULL) err(1, "%s: strdup", __func__); nargs++; break; case 'p': initiator_addr = strdup(optarg); if (initiator_addr == NULL) err(1, "%s: strdup", __func__); nargs++; break; default: break; } } if (nargs == 0) errx(1, "%s: either -a, -c, -i, or -p must be specified", __func__); if (nargs > 1) errx(1, "%s: only one of -a, -c, -i, or -p may be specified", __func__); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_LOGOUT; req.data.logout.connection_id = connection_id; if (initiator_addr != NULL) strlcpy(req.data.logout.initiator_addr, initiator_addr, sizeof(req.data.logout.initiator_addr)); if (initiator_name != NULL) strlcpy(req.data.logout.initiator_name, initiator_name, sizeof(req.data.logout.initiator_name)); if (all != 0) req.data.logout.all = 1; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status != CTL_ISCSI_OK) { warnx("%s: error returned from CTL iSCSI logout request:\n%s", __func__, req.error_str); retval = 1; goto bailout; } printf("iSCSI logout requests submitted\n"); bailout: return (retval); } static int cctl_isterminate(int fd, int argc, char **argv, char *combinedopt) { struct ctl_iscsi req; int retval = 0, c; int all = 0, connection_id = -1, nargs = 0; char *initiator_name = NULL, *initiator_addr = NULL; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': all = 1; nargs++; break; case 'c': connection_id = strtoul(optarg, NULL, 0); nargs++; break; case 'i': initiator_name = strdup(optarg); if (initiator_name == NULL) err(1, "%s: strdup", __func__); nargs++; break; case 'p': initiator_addr = strdup(optarg); if (initiator_addr == NULL) err(1, "%s: strdup", __func__); nargs++; break; default: break; } } if (nargs == 0) errx(1, "%s: either -a, -c, -i, or -p must be specified", __func__); if (nargs > 1) errx(1, "%s: only one of -a, -c, -i, or -p may be specified", __func__); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_TERMINATE; req.data.terminate.connection_id = connection_id; if (initiator_addr != NULL) strlcpy(req.data.terminate.initiator_addr, initiator_addr, sizeof(req.data.terminate.initiator_addr)); if (initiator_name != NULL) strlcpy(req.data.terminate.initiator_name, initiator_name, sizeof(req.data.terminate.initiator_name)); if (all != 0) req.data.terminate.all = 1; if (ioctl(fd, CTL_ISCSI, &req) == -1) { warn("%s: error issuing CTL_ISCSI ioctl", __func__); retval = 1; goto bailout; } if (req.status != CTL_ISCSI_OK) { warnx("%s: error returned from CTL iSCSI connection " "termination request:\n%s", __func__, req.error_str); retval = 1; goto bailout; } printf("iSCSI connections terminated\n"); bailout: return (retval); } /* * Name/value pair used for per-LUN attributes. */ struct cctl_lun_nv { char *name; char *value; STAILQ_ENTRY(cctl_lun_nv) links; }; /* * Backend LUN information. */ struct cctl_lun { uint64_t lun_id; char *backend_type; uint64_t size_blocks; uint32_t blocksize; char *serial_number; char *device_id; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_lun) links; }; struct cctl_devlist_data { int num_luns; STAILQ_HEAD(,cctl_lun) lun_list; struct cctl_lun *cur_lun; int level; struct sbuf *cur_sb[32]; }; static void cctl_start_element(void *user_data, const char *name, const char **attr) { int i; struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; devlist->level++; if ((u_int)devlist->level >= (sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0])); devlist->cur_sb[devlist->level] = sbuf_new_auto(); if (devlist->cur_sb[devlist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); if (strcmp(name, "lun") == 0) { if (cur_lun != NULL) errx(1, "%s: improper lun element nesting", __func__); cur_lun = calloc(1, sizeof(*cur_lun)); if (cur_lun == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_lun)); devlist->num_luns++; devlist->cur_lun = cur_lun; STAILQ_INIT(&cur_lun->attr_list); STAILQ_INSERT_TAIL(&devlist->lun_list, cur_lun, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_lun->lun_id = strtoull(attr[i+1], NULL, 0); } else { errx(1, "%s: invalid LUN attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_end_element(void *user_data, const char *name) { struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; char *str; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; if ((cur_lun == NULL) && (strcmp(name, "ctllunlist") != 0)) errx(1, "%s: cur_lun == NULL! (name = %s)", __func__, name); if (devlist->cur_sb[devlist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, devlist->level, name); if (sbuf_finish(devlist->cur_sb[devlist->level]) != 0) err(1, "%s: sbuf_finish", __func__); str = strdup(sbuf_data(devlist->cur_sb[devlist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(devlist->cur_sb[devlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(devlist->cur_sb[devlist->level]); devlist->cur_sb[devlist->level] = NULL; devlist->level--; if (strcmp(name, "backend_type") == 0) { cur_lun->backend_type = str; str = NULL; } else if (strcmp(name, "size") == 0) { cur_lun->size_blocks = strtoull(str, NULL, 0); } else if (strcmp(name, "blocksize") == 0) { cur_lun->blocksize = strtoul(str, NULL, 0); } else if (strcmp(name, "serial_number") == 0) { cur_lun->serial_number = str; str = NULL; } else if (strcmp(name, "device_id") == 0) { cur_lun->device_id = str; str = NULL; } else if (strcmp(name, "lun") == 0) { devlist->cur_lun = NULL; } else if (strcmp(name, "ctllunlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); nv->name = strdup(name); if (nv->name == NULL) err(1, "%s: can't allocated %zd bytes for string", __func__, strlen(name)); nv->value = str; str = NULL; STAILQ_INSERT_TAIL(&cur_lun->attr_list, nv, links); } free(str); } static void cctl_char_handler(void *user_data, const XML_Char *str, int len) { struct cctl_devlist_data *devlist; devlist = (struct cctl_devlist_data *)user_data; sbuf_bcat(devlist->cur_sb[devlist->level], str, len); } static int cctl_devlist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_list list; struct cctl_devlist_data devlist; struct cctl_lun *lun; XML_Parser parser; char *lun_str; int lun_len; int dump_xml = 0; int retval, c; char *backend = NULL; int verbose = 0; retval = 0; lun_len = 4096; bzero(&devlist, sizeof(devlist)); STAILQ_INIT(&devlist.lun_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': backend = strdup(optarg); break; case 'v': verbose++; break; case 'x': dump_xml = 1; break; default: break; } } retry: lun_str = malloc(lun_len); bzero(&list, sizeof(list)); list.alloc_len = lun_len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = lun_str; if (ioctl(fd, CTL_LUN_LIST, &list) == -1) { warn("%s: error issuing CTL_LUN_LIST ioctl", __func__); retval = 1; goto bailout; } if (list.status == CTL_LUN_LIST_ERROR) { warnx("%s: error returned from CTL_LUN_LIST ioctl:\n%s", __func__, list.error_str); } else if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { lun_len = lun_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", lun_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &devlist); XML_SetElementHandler(parser, cctl_start_element, cctl_end_element); XML_SetCharacterDataHandler(parser, cctl_char_handler); retval = XML_Parse(parser, lun_str, strlen(lun_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } XML_ParserFree(parser); printf("LUN Backend %18s %4s %-16s %-16s\n", "Size (Blocks)", "BS", "Serial Number", "Device ID"); STAILQ_FOREACH(lun, &devlist.lun_list, links) { struct cctl_lun_nv *nv; if ((backend != NULL) && (strcmp(lun->backend_type, backend) != 0)) continue; printf("%3ju %-8s %18ju %4u %-16s %-16s\n", (uintmax_t)lun->lun_id, lun->backend_type, (uintmax_t)lun->size_blocks, lun->blocksize, lun->serial_number, lun->device_id); if (verbose == 0) continue; STAILQ_FOREACH(nv, &lun->attr_list, links) { printf(" %s=%s\n", nv->name, nv->value); } } bailout: free(lun_str); return (retval); } /* * Port information. */ struct cctl_port { uint64_t port_id; char *online; char *frontend_type; char *name; int pp, vp; - char *target, *port; + char *target, *port, *lun_map; STAILQ_HEAD(,cctl_lun_nv) init_list; + STAILQ_HEAD(,cctl_lun_nv) lun_list; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_port) links; }; struct cctl_portlist_data { int num_ports; STAILQ_HEAD(,cctl_port) port_list; struct cctl_port *cur_port; int level; uint64_t cur_id; struct sbuf *cur_sb[32]; }; static void cctl_start_pelement(void *user_data, const char *name, const char **attr) { int i; struct cctl_portlist_data *portlist; struct cctl_port *cur_port; portlist = (struct cctl_portlist_data *)user_data; cur_port = portlist->cur_port; portlist->level++; if ((u_int)portlist->level >= (sizeof(portlist->cur_sb) / sizeof(portlist->cur_sb[0]))) errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(portlist->cur_sb) / sizeof(portlist->cur_sb[0])); portlist->cur_sb[portlist->level] = sbuf_new_auto(); if (portlist->cur_sb[portlist->level] == NULL) err(1, "%s: Unable to allocate sbuf", __func__); portlist->cur_id = 0; for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { portlist->cur_id = strtoull(attr[i+1], NULL, 0); break; } } if (strcmp(name, "targ_port") == 0) { if (cur_port != NULL) errx(1, "%s: improper port element nesting", __func__); cur_port = calloc(1, sizeof(*cur_port)); if (cur_port == NULL) err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_port)); portlist->num_ports++; portlist->cur_port = cur_port; STAILQ_INIT(&cur_port->init_list); + STAILQ_INIT(&cur_port->lun_list); STAILQ_INIT(&cur_port->attr_list); cur_port->port_id = portlist->cur_id; STAILQ_INSERT_TAIL(&portlist->port_list, cur_port, links); } } static void cctl_end_pelement(void *user_data, const char *name) { struct cctl_portlist_data *portlist; struct cctl_port *cur_port; char *str; portlist = (struct cctl_portlist_data *)user_data; cur_port = portlist->cur_port; if ((cur_port == NULL) && (strcmp(name, "ctlportlist") != 0)) errx(1, "%s: cur_port == NULL! (name = %s)", __func__, name); if (portlist->cur_sb[portlist->level] == NULL) errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, portlist->level, name); if (sbuf_finish(portlist->cur_sb[portlist->level]) != 0) err(1, "%s: sbuf_finish", __func__); str = strdup(sbuf_data(portlist->cur_sb[portlist->level])); if (str == NULL) err(1, "%s can't allocate %zd bytes for string", __func__, sbuf_len(portlist->cur_sb[portlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(portlist->cur_sb[portlist->level]); portlist->cur_sb[portlist->level] = NULL; portlist->level--; if (strcmp(name, "frontend_type") == 0) { cur_port->frontend_type = str; str = NULL; } else if (strcmp(name, "port_name") == 0) { cur_port->name = str; str = NULL; } else if (strcmp(name, "online") == 0) { cur_port->online = str; str = NULL; } else if (strcmp(name, "physical_port") == 0) { cur_port->pp = strtoull(str, NULL, 0); } else if (strcmp(name, "virtual_port") == 0) { cur_port->vp = strtoull(str, NULL, 0); } else if (strcmp(name, "target") == 0) { cur_port->target = str; str = NULL; } else if (strcmp(name, "port") == 0) { cur_port->port = str; str = NULL; + } else if (strcmp(name, "lun_map") == 0) { + cur_port->lun_map = str; + str = NULL; } else if (strcmp(name, "targ_port") == 0) { portlist->cur_port = NULL; } else if (strcmp(name, "ctlportlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); - if (strcmp(name, "initiator") == 0) + if (strcmp(name, "initiator") == 0 || + strcmp(name, "lun") == 0) asprintf(&nv->name, "%ju", portlist->cur_id); else nv->name = strdup(name); if (nv->name == NULL) err(1, "%s: can't allocated %zd bytes for string", __func__, strlen(name)); nv->value = str; str = NULL; if (strcmp(name, "initiator") == 0) STAILQ_INSERT_TAIL(&cur_port->init_list, nv, links); + else if (strcmp(name, "lun") == 0) + STAILQ_INSERT_TAIL(&cur_port->lun_list, nv, links); else STAILQ_INSERT_TAIL(&cur_port->attr_list, nv, links); } free(str); } static void cctl_char_phandler(void *user_data, const XML_Char *str, int len) { struct cctl_portlist_data *portlist; portlist = (struct cctl_portlist_data *)user_data; sbuf_bcat(portlist->cur_sb[portlist->level], str, len); } static int cctl_portlist(int fd, int argc, char **argv, char *combinedopt) { struct ctl_lun_list list; struct cctl_portlist_data portlist; struct cctl_port *port; XML_Parser parser; char *port_str; int port_len; int dump_xml = 0; int retval, c; char *frontend = NULL; uint64_t portarg = UINT64_MAX; - int verbose = 0, init = 0, quiet = 0; + int verbose = 0, init = 0, lun = 0, quiet = 0; retval = 0; port_len = 4096; bzero(&portlist, sizeof(portlist)); STAILQ_INIT(&portlist.port_list); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'f': frontend = strdup(optarg); break; case 'i': init++; break; + case 'l': + lun++; + break; case 'p': portarg = strtoll(optarg, NULL, 0); break; case 'q': quiet++; break; case 'v': verbose++; break; case 'x': dump_xml = 1; break; default: break; } } retry: port_str = malloc(port_len); bzero(&list, sizeof(list)); list.alloc_len = port_len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = port_str; if (ioctl(fd, CTL_PORT_LIST, &list) == -1) { warn("%s: error issuing CTL_PORT_LIST ioctl", __func__); retval = 1; goto bailout; } if (list.status == CTL_LUN_LIST_ERROR) { warnx("%s: error returned from CTL_PORT_LIST ioctl:\n%s", __func__, list.error_str); } else if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { port_len = port_len << 1; goto retry; } if (dump_xml != 0) { printf("%s", port_str); goto bailout; } parser = XML_ParserCreate(NULL); if (parser == NULL) { warn("%s: Unable to create XML parser", __func__); retval = 1; goto bailout; } XML_SetUserData(parser, &portlist); XML_SetElementHandler(parser, cctl_start_pelement, cctl_end_pelement); XML_SetCharacterDataHandler(parser, cctl_char_phandler); retval = XML_Parse(parser, port_str, strlen(port_str), 1); if (retval != 1) { warnx("%s: Unable to parse XML: Error %d", __func__, XML_GetErrorCode(parser)); XML_ParserFree(parser); retval = 1; goto bailout; } XML_ParserFree(parser); if (quiet == 0) printf("Port Online Frontend Name pp vp\n"); STAILQ_FOREACH(port, &portlist.port_list, links) { struct cctl_lun_nv *nv; if ((frontend != NULL) && (strcmp(port->frontend_type, frontend) != 0)) continue; if ((portarg != UINT64_MAX) && (portarg != port->port_id)) continue; printf("%-4ju %-6s %-8s %-8s %-2d %-2d %s\n", (uintmax_t)port->port_id, port->online, port->frontend_type, port->name, port->pp, port->vp, port->port ? port->port : ""); if (init || verbose) { if (port->target) printf(" Target: %s\n", port->target); STAILQ_FOREACH(nv, &port->init_list, links) { printf(" Initiator %s: %s\n", nv->name, nv->value); } } + if (lun || verbose) { + if (port->lun_map) { + STAILQ_FOREACH(nv, &port->lun_list, links) + printf(" LUN %s: %s\n", + nv->name, nv->value); + if (STAILQ_EMPTY(&port->lun_list)) + printf(" No LUNs mapped\n"); + } else + printf(" All LUNs mapped\n"); + } + if (verbose) { STAILQ_FOREACH(nv, &port->attr_list, links) { printf(" %s=%s\n", nv->name, nv->value); } } } bailout: free(port_str); return (retval); } +static int +cctl_lunmap(int fd, int argc, char **argv, char *combinedopt) +{ + struct ctl_lun_map lm; + int retval = 0, c; + + retval = 0; + lm.port = UINT32_MAX; + lm.plun = UINT32_MAX; + lm.lun = UINT32_MAX; + + while ((c = getopt(argc, argv, combinedopt)) != -1) { + switch (c) { + case 'p': + lm.port = strtoll(optarg, NULL, 0); + break; + case 'l': + lm.plun = strtoll(optarg, NULL, 0); + break; + case 'L': + lm.lun = strtoll(optarg, NULL, 0); + break; + default: + break; + } + } + + if (ioctl(fd, CTL_LUN_MAP, &lm) == -1) { + warn("%s: error issuing CTL_LUN_MAP ioctl", __func__); + retval = 1; + } + + return (retval); +} + void usage(int error) { fprintf(error ? stderr : stdout, "Usage:\n" "Primary commands:\n" " ctladm tur [dev_id][general options]\n" " ctladm inquiry [dev_id][general options]\n" " ctladm devid [dev_id][general options]\n" " ctladm reqsense [dev_id][general options]\n" " ctladm reportluns [dev_id][general options]\n" " ctladm read [dev_id][general options] <-l lba> <-d len>\n" " <-f file|-> <-b blocksize> [-c cdbsize][-N]\n" " ctladm write [dev_id][general options] <-l lba> <-d len>\n" " <-f file|-> <-b blocksize> [-c cdbsize][-N]\n" " ctladm readcap [dev_id][general options] [-c cdbsize]\n" " ctladm modesense [dev_id][general options] <-m page|-l> [-P pc]\n" " [-d] [-S subpage] [-c cdbsize]\n" " ctladm prin [dev_id][general options] <-a action>\n" " ctladm prout [dev_id][general options] <-a action>\n" " <-r restype] [-k key] [-s sa_key]\n" " ctladm rtpg [dev_id][general options]\n" " ctladm start [dev_id][general options] [-i] [-o]\n" " ctladm stop [dev_id][general options] [-i] [-o]\n" " ctladm synccache [dev_id][general options] [-l lba]\n" " [-b blockcount] [-r] [-i] [-c cdbsize]\n" " ctladm create <-b backend> [-B blocksize] [-d device_id]\n" " [-l lun_id] [-o name=value] [-s size_bytes]\n" " [-S serial_num] [-t dev_type]\n" " ctladm remove <-b backend> <-l lun_id> [-o name=value]\n" " ctladm modify <-b backend> <-l lun_id> <-s size_bytes>\n" " ctladm devlist [-b backend] [-v] [-x]\n" " ctladm shutdown\n" " ctladm startup\n" " ctladm hardstop\n" " ctladm hardstart\n" " ctladm lunlist\n" +" ctladm lunmap -p targ_port [-l pLUN] [-L cLUN]\n" " ctladm bbrread [dev_id] <-l lba> <-d datalen>\n" " ctladm delay [dev_id] <-l datamove|done> [-T oneshot|cont]\n" " [-t secs]\n" " ctladm realsync \n" " ctladm setsync [dev_id] <-i interval>\n" " ctladm getsync [dev_id]\n" " ctladm inject [dev_id] <-i action> <-p pattern> [-r lba,len]\n" " [-s len fmt [args]] [-c] [-d delete_id]\n" " ctladm port <-l | -o | [-w wwnn][-W wwpn]>\n" " [-p targ_port] [-t port_type] [-q] [-x]\n" " ctladm portlist [-f frontend] [-i] [-p targ_port] [-q] [-v] [-x]\n" " ctladm islist [-v | -x]\n" " ctladm islogout <-a | -c connection-id | -i name | -p portal>\n" " ctladm isterminate <-a | -c connection-id | -i name | -p portal>\n" " ctladm dumpooa\n" " ctladm dumpstructs\n" " ctladm help\n" "General Options:\n" "-I intiator_id : defaults to 7, used to change the initiator id\n" "-C retries : specify the number of times to retry this command\n" "-D devicename : specify the device to operate on\n" " : (default is %s)\n" "read/write options:\n" "-l lba : logical block address\n" "-d len : read/write length, in blocks\n" "-f file|- : write/read data to/from file or stdout/stdin\n" "-b blocksize : block size, in bytes\n" "-c cdbsize : specify minimum cdb size: 6, 10, 12 or 16\n" "-N : do not copy data to/from userland\n" "readcapacity options:\n" "-c cdbsize : specify minimum cdb size: 10 or 16\n" "modesense options:\n" "-m page : specify the mode page to view\n" "-l : request a list of supported pages\n" "-P pc : specify the page control value: 0-3 (current,\n" " changeable, default, saved, respectively)\n" "-d : disable block descriptors for mode sense\n" "-S subpage : specify a subpage\n" "-c cdbsize : specify minimum cdb size: 6 or 10\n" "persistent reserve in options:\n" "-a action : specify the action value: 0-2 (read key, read\n" " reservation, read capabilities, respectively)\n" "persistent reserve out options:\n" "-a action : specify the action value: 0-5 (register, reserve,\n" " release, clear, preempt, register and ignore)\n" "-k key : key value\n" "-s sa_key : service action value\n" "-r restype : specify the reservation type: 0-5(wr ex, ex ac,\n" " wr ex ro, ex ac ro, wr ex ar, ex ac ar)\n" "start/stop options:\n" "-i : set the immediate bit (CTL does not support this)\n" "-o : set the on/offline bit\n" "synccache options:\n" "-l lba : set the starting LBA\n" "-b blockcount : set the length to sync in blocks\n" "-r : set the relative addressing bit\n" "-i : set the immediate bit\n" "-c cdbsize : specify minimum cdb size: 10 or 16\n" "create options:\n" "-b backend : backend name (\"block\", \"ramdisk\", etc.)\n" "-B blocksize : LUN blocksize in bytes (some backends)\n" "-d device_id : SCSI VPD page 0x83 ID\n" "-l lun_id : requested LUN number\n" "-o name=value : backend-specific options, multiple allowed\n" "-s size_bytes : LUN size in bytes (some backends)\n" "-S serial_num : SCSI VPD page 0x80 serial number\n" "-t dev_type : SCSI device type (0=disk, 3=processor)\n" "remove options:\n" "-b backend : backend name (\"block\", \"ramdisk\", etc.)\n" "-l lun_id : LUN number to delete\n" "-o name=value : backend-specific options, multiple allowed\n" "devlist options:\n" "-b backend : list devices from specified backend only\n" "-v : be verbose, show backend attributes\n" "-x : dump raw XML\n" "delay options:\n" "-l datamove|done : delay command at datamove or done phase\n" "-T oneshot : delay one command, then resume normal completion\n" "-T cont : delay all commands\n" "-t secs : number of seconds to delay\n" "inject options:\n" "-i error_action : action to perform\n" "-p pattern : command pattern to look for\n" "-r lba,len : LBA range for pattern\n" "-s len fmt [args] : sense data for custom sense action\n" "-c : continuous operation\n" "-d delete_id : error id to delete\n" "port options:\n" "-l : list frontend ports\n" "-o on|off : turn frontend ports on or off\n" "-w wwnn : set WWNN for one frontend\n" "-W wwpn : set WWPN for one frontend\n" "-t port_type : specify fc, scsi, ioctl, internal frontend type\n" "-p targ_port : specify target port number\n" "-q : omit header in list output\n" "-x : output port list in XML format\n" "portlist options:\n" "-f fronetnd : specify frontend type\n" "-i : report target and initiators addresses\n" +"-l : report LUN mapping\n" "-p targ_port : specify target port number\n" "-q : omit header in list output\n" "-v : verbose output (report all port options)\n" "-x : output port list in XML format\n" +"lunmap options:\n" +"-p targ_port : specify target port number\n" +"-L pLUN : specify port-visible LUN\n" +"-L cLUN : specify CTL LUN\n" "bbrread options:\n" "-l lba : starting LBA\n" "-d datalen : length, in bytes, to read\n", CTL_DEFAULT_DEV); } int main(int argc, char **argv) { int c; ctladm_cmdfunction command; ctladm_cmdargs cmdargs; ctladm_optret optreturn; char *device; const char *mainopt = "C:D:I:"; const char *subopt = NULL; char combinedopt[256]; int target, lun; int optstart = 2; int retval, fd; int retries; int initid; int saved_errno; retval = 0; cmdargs = CTLADM_ARG_NONE; command = CTLADM_CMD_HELP; device = NULL; fd = -1; retries = 0; target = 0; lun = 0; initid = 7; if (argc < 2) { usage(1); retval = 1; goto bailout; } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &command, &cmdargs,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } if (cmdargs & CTLADM_ARG_NEED_TL) { if ((argc < 3) || (!isdigit(argv[2][0]))) { warnx("option %s requires a target:lun argument", argv[1]); usage(0); exit(1); } retval = cctl_parse_tl(argv[2], &target, &lun); if (retval != 0) errx(1, "invalid target:lun argument %s", argv[2]); cmdargs |= CTLADM_ARG_TARG_LUN; optstart++; } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch (c) { case 'C': cmdargs |= CTLADM_ARG_RETRIES; retries = strtol(optarg, NULL, 0); break; case 'D': device = strdup(optarg); cmdargs |= CTLADM_ARG_DEVICE; break; case 'I': cmdargs |= CTLADM_ARG_INITIATOR; initid = strtol(optarg, NULL, 0); break; default: break; } } if ((cmdargs & CTLADM_ARG_INITIATOR) == 0) initid = 7; optind = optstart; optreset = 1; /* * Default to opening the CTL device for now. */ if (((cmdargs & CTLADM_ARG_DEVICE) == 0) && (command != CTLADM_CMD_HELP)) { device = strdup(CTL_DEFAULT_DEV); cmdargs |= CTLADM_ARG_DEVICE; } if ((cmdargs & CTLADM_ARG_DEVICE) && (command != CTLADM_CMD_HELP)) { fd = open(device, O_RDWR); if (fd == -1 && errno == ENOENT) { saved_errno = errno; retval = kldload("ctl"); if (retval != -1) fd = open(device, O_RDWR); else errno = saved_errno; } if (fd == -1) { fprintf(stderr, "%s: error opening %s: %s\n", argv[0], device, strerror(errno)); retval = 1; goto bailout; } } else if ((command != CTLADM_CMD_HELP) && ((cmdargs & CTLADM_ARG_DEVICE) == 0)) { fprintf(stderr, "%s: you must specify a device with the " "--device argument for this command\n", argv[0]); command = CTLADM_CMD_HELP; retval = 1; } switch (command) { case CTLADM_CMD_TUR: retval = cctl_tur(fd, target, lun, initid, retries); break; case CTLADM_CMD_INQUIRY: retval = cctl_inquiry(fd, target, lun, initid, retries); break; case CTLADM_CMD_REQ_SENSE: retval = cctl_req_sense(fd, target, lun, initid, retries); break; case CTLADM_CMD_REPORT_LUNS: retval = cctl_report_luns(fd, target, lun, initid, retries); break; case CTLADM_CMD_CREATE: retval = cctl_create_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_RM: retval = cctl_rm_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_DEVLIST: retval = cctl_devlist(fd, argc, argv, combinedopt); break; case CTLADM_CMD_READ: case CTLADM_CMD_WRITE: retval = cctl_read_write(fd, target, lun, initid, retries, argc, argv, combinedopt, command); break; case CTLADM_CMD_PORT: retval = cctl_port(fd, argc, argv, combinedopt); break; case CTLADM_CMD_PORTLIST: retval = cctl_portlist(fd, argc, argv, combinedopt); + break; + case CTLADM_CMD_LUNMAP: + retval = cctl_lunmap(fd, argc, argv, combinedopt); break; case CTLADM_CMD_READCAPACITY: retval = cctl_read_capacity(fd, target, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_MODESENSE: retval = cctl_mode_sense(fd, target, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_START: case CTLADM_CMD_STOP: retval = cctl_start_stop(fd, target, lun, initid, retries, (command == CTLADM_CMD_START) ? 1 : 0, argc, argv, combinedopt); break; case CTLADM_CMD_SYNC_CACHE: retval = cctl_sync_cache(fd, target, lun, initid, retries, argc, argv, combinedopt); break; case CTLADM_CMD_SHUTDOWN: case CTLADM_CMD_STARTUP: retval = cctl_startup_shutdown(fd, target, lun, initid, command); break; case CTLADM_CMD_HARDSTOP: case CTLADM_CMD_HARDSTART: retval = cctl_hardstopstart(fd, command); break; case CTLADM_CMD_BBRREAD: retval = cctl_bbrread(fd, target, lun, initid, argc, argv, combinedopt); break; case CTLADM_CMD_LUNLIST: retval = cctl_lunlist(fd); break; case CTLADM_CMD_DELAY: retval = cctl_delay(fd, target, lun, argc, argv, combinedopt); break; case CTLADM_CMD_REALSYNC: retval = cctl_realsync(fd, argc, argv); break; case CTLADM_CMD_SETSYNC: case CTLADM_CMD_GETSYNC: retval = cctl_getsetsync(fd, target, lun, command, argc, argv, combinedopt); break; case CTLADM_CMD_ERR_INJECT: retval = cctl_error_inject(fd, target, lun, argc, argv, combinedopt); break; case CTLADM_CMD_DUMPOOA: retval = cctl_dump_ooa(fd, argc, argv); break; case CTLADM_CMD_DUMPSTRUCTS: retval = cctl_dump_structs(fd, cmdargs); break; case CTLADM_CMD_PRES_IN: retval = cctl_persistent_reserve_in(fd, target, lun, initid, argc, argv, combinedopt, retries); break; case CTLADM_CMD_PRES_OUT: retval = cctl_persistent_reserve_out(fd, target, lun, initid, argc, argv, combinedopt, retries); break; case CTLADM_CMD_INQ_VPD_DEVID: retval = cctl_inquiry_vpd_devid(fd, target, lun, initid); break; case CTLADM_CMD_RTPG: retval = cctl_report_target_port_group(fd, target, lun, initid); break; case CTLADM_CMD_MODIFY: retval = cctl_modify_lun(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISLIST: retval = cctl_islist(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISLOGOUT: retval = cctl_islogout(fd, argc, argv, combinedopt); break; case CTLADM_CMD_ISTERMINATE: retval = cctl_isterminate(fd, argc, argv, combinedopt); break; case CTLADM_CMD_HELP: default: usage(retval); break; } bailout: if (fd != -1) close(fd); exit (retval); } /* * vim: ts=8 */ Index: stable/10/usr.sbin/ctld/ctl.conf.5 =================================================================== --- stable/10/usr.sbin/ctld/ctl.conf.5 (revision 279001) +++ stable/10/usr.sbin/ctld/ctl.conf.5 (revision 279002) @@ -1,419 +1,432 @@ .\" Copyright (c) 2012 The FreeBSD Foundation .\" All rights reserved. .\" .\" This software was developed by Edward Tomasz Napierala under sponsorship .\" from the FreeBSD Foundation. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd November 24, 2014 +.Dd February 1, 2015 .Dt CTL.CONF 5 .Os .Sh NAME .Nm ctl.conf .Nd CAM Target Layer / iSCSI target daemon configuration file .Sh DESCRIPTION The .Nm configuration file is used by the .Xr ctld 8 daemon. Lines starting with .Ql # are interpreted as comments. The general syntax of the .Nm file is: .Bd -literal -offset indent .No pidfile Ar path .No auth-group Ar name No { .Dl chap Ar user Ar secret .Dl ... } .No portal-group Ar name No { .Dl listen Ar address .\".Dl listen-iser Ar address .Dl discovery-auth-group Ar name .Dl ... } +.No lun Ar name No { +.Dl path Ar path +} + .No target Ar name { .Dl auth-group Ar name .Dl portal-group Ar name +.Dl lun Ar number Ar name .Dl lun Ar number No { .Dl path Ar path .Dl } .Dl ... } .Ed .Ss Global Context .Bl -tag -width indent .It Ic auth-group Ar name Create an .Sy auth-group configuration context, defining a new auth-group, which can then be assigned to any number of targets. .It Ic debug Ar level The debug verbosity level. The default is 0. .It Ic maxproc Ar number The limit for concurrently running child processes handling incoming connections. The default is 30. A setting of 0 disables the limit. .It Ic pidfile Ar path The path to the pidfile. The default is .Pa /var/run/ctld.pid . .It Ic portal-group Ar name Create a .Sy portal-group configuration context, defining a new portal-group, which can then be assigned to any number of targets. +.It Ic lun Ar name +Create a +.Sy lun +configuration context, defining a LUN to be exported by some target(s). .It Ic target Ar name Create a .Sy target configuration context, which can contain one or more .Sy lun contexts. .It Ic timeout Ar seconds The timeout for login sessions, after which the connection will be forcibly terminated. The default is 60. A setting of 0 disables the timeout. .It Ic isns-server Ar address An IPv4 or IPv6 address and optionally port of iSNS server to register on. .It Ic isns-period Ar seconds iSNS registration period. Registered Network Entity not updated during this period will be unregistered. The default is 900. .It Ic isns-timeout Ar seconds Timeout for iSNS requests. The default is 5. .El .Ss auth-group Context .Bl -tag -width indent .It Ic auth-type Ar type Sets the authentication type. Type can be either .Qq Ar none , .Qq Ar deny , .Qq Ar chap , or .Qq Ar chap-mutual . In most cases it is not necessary to set the type using this clause; it is usually used to disable authentication for a given .Sy auth-group . .It Ic chap Ar user Ar secret A set of CHAP authentication credentials. Note that for any .Sy auth-group , the configuration may only contain either .Sy chap or .Sy chap-mutual entries; it is an error to mix them. .It Ic chap-mutual Ar user Ar secret Ar mutualuser Ar mutualsecret A set of mutual CHAP authentication credentials. Note that for any .Sy auth-group , the configuration may only contain either .Sy chap or .Sy chap-mutual entries; it is an error to mix them. .It Ic initiator-name Ar initiator-name An iSCSI initiator name. Only initiators with a name matching one of the defined names will be allowed to connect. If not defined, there will be no restrictions based on initiator name. .It Ic initiator-portal Ar address Ns Op / Ns Ar prefixlen An iSCSI initiator portal: an IPv4 or IPv6 address, optionally followed by a literal slash and a prefix length. Only initiators with an address matching one of the defined addresses will be allowed to connect. If not defined, there will be no restrictions based on initiator address. .El .Ss portal-group Context .Bl -tag -width indent .It Ic discovery-auth-group Ar name Assign a previously defined authentication group to the portal group, to be used for target discovery. By default, portal groups are assigned predefined .Sy auth-group .Qq Ar default , which denies discovery. Another predefined .Sy auth-group , .Qq Ar no-authentication , may be used to permit discovery without authentication. .It Ic discovery-filter Ar filter Determines which targets are returned during discovery. Filter can be either .Qq Ar none , .Qq Ar portal , .Qq Ar portal-name , or .Qq Ar portal-name-auth . When set to .Qq Ar none , discovery will return all targets assigned to that portal group. When set to .Qq Ar portal , discovery will not return targets that cannot be accessed by the initiator because of their .Sy initiator-portal . When set to .Qq Ar portal-name , the check will include both .Sy initiator-portal and .Sy initiator-name . When set to .Qq Ar portal-name-auth , the check will include .Sy initiator-portal , .Sy initiator-name , and authentication credentials. The target is returned if it does not require CHAP authentication, or if the CHAP user and secret used during discovery match those used by the target. Note that when using .Qq Ar portal-name-auth , targets that require CHAP authentication will only be returned if .Sy discovery-auth-group requires CHAP. The default is .Qq Ar none . .It Ic listen Ar address An IPv4 or IPv6 address and port to listen on for incoming connections. .\".It Ic listen-iser Ar address .\"An IPv4 or IPv6 address and port to listen on for incoming connections .\"using iSER (iSCSI over RDMA) protocol. .It Ic redirect Aq Ar address IPv4 or IPv6 address to redirect initiators to. When configured, all initiators attempting to connect to portal belonging to this .Sy portal-group will get redirected using "Target moved temporarily" login response. Redirection happens before authentication and any .Sy initiator-name or .Sy initiator-portal checks are skipped. .El .Ss target Context .Bl -tag -width indent .It Ic alias Ar text Assign a human-readable description to the target. There is no default. .It Ic auth-group Ar name Assign a previously defined authentication group to the target. By default, targets that do not specify their own auth settings, using clauses such as .Sy chap or .Sy initiator-name , are assigned predefined .Sy auth-group .Qq Ar default , which denies all access. Another predefined .Sy auth-group , .Qq Ar no-authentication , may be used to permit access without authentication. Note that targets must only use one of .Sy auth-group , chap , No or Sy chap-mutual ; it is a configuration error to mix multiple types in one target. .It Ic auth-type Ar type Sets the authentication type. Type can be either .Qq Ar none , .Qq Ar deny , .Qq Ar chap , or .Qq Ar chap-mutual . In most cases it is not necessary to set the type using this clause; it is usually used to disable authentication for a given .Sy target . This clause is mutually exclusive with .Sy auth-group ; one cannot use both in a single target. .It Ic chap Ar user Ar secret A set of CHAP authentication credentials. Note that targets must only use one of .Sy auth-group , chap , No or Sy chap-mutual ; it is a configuration error to mix multiple types in one target. .It Ic chap-mutual Ar user Ar secret Ar mutualuser Ar mutualsecret A set of mutual CHAP authentication credentials. Note that targets must only use one of .Sy auth-group , chap , No or Sy chap-mutual ; it is a configuration error to mix multiple types in one target. .It Ic initiator-name Ar initiator-name An iSCSI initiator name. Only initiators with a name matching one of the defined names will be allowed to connect. If not defined, there will be no restrictions based on initiator name. This clause is mutually exclusive with .Sy auth-group ; one cannot use both in a single target. .It Ic initiator-portal Ar address Ns Op / Ns Ar prefixlen An iSCSI initiator portal: an IPv4 or IPv6 address, optionally followed by a literal slash and a prefix length. Only initiators with an address matching one of the defined addresses will be allowed to connect. If not defined, there will be no restrictions based on initiator address. This clause is mutually exclusive with .Sy auth-group ; one cannot use both in a single target. .It Ic portal-group Ar name Assign a previously defined portal group to the target. The default portal group is .Qq Ar default , which makes the target available on TCP port 3260 on all configured IPv4 and IPv6 addresses. .It Ic redirect Aq Ar address IPv4 or IPv6 address to redirect initiators to. When configured, all initiators attempting to connect to this target will get redirected using "Target moved temporarily" login response. Redirection happens after successful authentication. +.It Ic lun Ar number Ar name +Export previously defined +.Sy lun +by the parent target. .It Ic lun Ar number Create a .Sy lun configuration context, defining a LUN exported by the parent target. .El .Ss lun Context .Bl -tag -width indent .It Ic backend Ar block No | Ar ramdisk The CTL backend to use for a given LUN. Valid choices are .Qq Ar block and .Qq Ar ramdisk ; block is used for LUNs backed by files or disk device nodes; ramdisk is a bitsink device, used mostly for testing. The default backend is block. .It Ic blocksize Ar size The blocksize visible to the initiator. The default blocksize is 512. .It Ic device-id Ar string The SCSI Device Identification string presented to the initiator. .It Ic option Ar name Ar value The CTL-specific options passed to the kernel. All CTL-specific options are documented in the .Sx OPTIONS section of .Xr ctladm 8 . .It Ic path Ar path The path to the file or device node used to back the LUN. .It Ic serial Ar string The SCSI serial number presented to the initiator. .It Ic size Ar size The LUN size, in bytes. .El .Sh FILES .Bl -tag -width ".Pa /etc/ctl.conf" -compact .It Pa /etc/ctl.conf The default location of the .Xr ctld 8 configuration file. .El .Sh EXAMPLES .Bd -literal auth-group ag0 { chap-mutual "user" "secret" "mutualuser" "mutualsecret" chap-mutual "user2" "secret2" "mutualuser" "mutualsecret" } auth-group ag1 { auth-type none initiator-name "iqn.2012-06.com.example:initiatorhost1" initiator-name "iqn.2012-06.com.example:initiatorhost2" initiator-portal 192.168.1.1/24 initiator-portal [2001:db8::de:ef] } portal-group pg0 { discovery-auth-group no-authentication listen 0.0.0.0:3260 listen [::]:3260 listen [fe80::be:ef]:3261 } target iqn.2012-06.com.example:target0 { alias "Example target" auth-group no-authentication lun 0 { path /dev/zvol/tank/example_0 blocksize 4096 size 4G } } +lun example_1 { + path /dev/zvol/tank/example_1 +} + target iqn.2012-06.com.example:target1 { chap chapuser chapsecret - lun 0 { - path /dev/zvol/tank/example_1 - } + lun 0 example_1 } target iqn.2012-06.com.example:target2 { auth-group ag0 portal-group pg0 - lun 0 { - path /dev/zvol/tank/example2_0 - } + lun 0 example_1 lun 1 { - path /dev/zvol/tank/example2_1 + path /dev/zvol/tank/example_2 option foo bar } } .Ed .Sh SEE ALSO .Xr ctl 4 , .Xr ctladm 8 , .Xr ctld 8 .Sh AUTHORS The .Nm configuration file functionality for .Xr ctld 8 was developed by .An Edward Tomasz Napierala Aq trasz@FreeBSD.org under sponsorship from the FreeBSD Foundation. Index: stable/10/usr.sbin/ctld/ctld.c =================================================================== --- stable/10/usr.sbin/ctld/ctld.c (revision 279001) +++ stable/10/usr.sbin/ctld/ctld.c (revision 279002) @@ -1,2423 +1,2407 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ctld.h" #include "isns.h" bool proxy_mode = false; static volatile bool sighup_received = false; static volatile bool sigterm_received = false; static volatile bool sigalrm_received = false; static int nchildren = 0; static void usage(void) { fprintf(stderr, "usage: ctld [-d][-f config-file]\n"); exit(1); } char * checked_strdup(const char *s) { char *c; c = strdup(s); if (c == NULL) log_err(1, "strdup"); return (c); } struct conf * conf_new(void) { struct conf *conf; conf = calloc(1, sizeof(*conf)); if (conf == NULL) log_err(1, "calloc"); + TAILQ_INIT(&conf->conf_luns); TAILQ_INIT(&conf->conf_targets); TAILQ_INIT(&conf->conf_auth_groups); TAILQ_INIT(&conf->conf_portal_groups); TAILQ_INIT(&conf->conf_isns); conf->conf_isns_period = 900; conf->conf_isns_timeout = 5; conf->conf_debug = 0; conf->conf_timeout = 60; conf->conf_maxproc = 30; return (conf); } void conf_delete(struct conf *conf) { + struct lun *lun, *ltmp; struct target *targ, *tmp; struct auth_group *ag, *cagtmp; struct portal_group *pg, *cpgtmp; struct isns *is, *istmp; assert(conf->conf_pidfh == NULL); + TAILQ_FOREACH_SAFE(lun, &conf->conf_luns, l_next, ltmp) + lun_delete(lun); TAILQ_FOREACH_SAFE(targ, &conf->conf_targets, t_next, tmp) target_delete(targ); TAILQ_FOREACH_SAFE(ag, &conf->conf_auth_groups, ag_next, cagtmp) auth_group_delete(ag); TAILQ_FOREACH_SAFE(pg, &conf->conf_portal_groups, pg_next, cpgtmp) portal_group_delete(pg); TAILQ_FOREACH_SAFE(is, &conf->conf_isns, i_next, istmp) isns_delete(is); free(conf->conf_pidfile_path); free(conf); } static struct auth * auth_new(struct auth_group *ag) { struct auth *auth; auth = calloc(1, sizeof(*auth)); if (auth == NULL) log_err(1, "calloc"); auth->a_auth_group = ag; TAILQ_INSERT_TAIL(&ag->ag_auths, auth, a_next); return (auth); } static void auth_delete(struct auth *auth) { TAILQ_REMOVE(&auth->a_auth_group->ag_auths, auth, a_next); free(auth->a_user); free(auth->a_secret); free(auth->a_mutual_user); free(auth->a_mutual_secret); free(auth); } const struct auth * auth_find(const struct auth_group *ag, const char *user) { const struct auth *auth; TAILQ_FOREACH(auth, &ag->ag_auths, a_next) { if (strcmp(auth->a_user, user) == 0) return (auth); } return (NULL); } static void auth_check_secret_length(struct auth *auth) { size_t len; len = strlen(auth->a_secret); if (len > 16) { if (auth->a_auth_group->ag_name != NULL) log_warnx("secret for user \"%s\", auth-group \"%s\", " "is too long; it should be at most 16 characters " "long", auth->a_user, auth->a_auth_group->ag_name); else log_warnx("secret for user \"%s\", target \"%s\", " "is too long; it should be at most 16 characters " "long", auth->a_user, auth->a_auth_group->ag_target->t_name); } if (len < 12) { if (auth->a_auth_group->ag_name != NULL) log_warnx("secret for user \"%s\", auth-group \"%s\", " "is too short; it should be at least 12 characters " "long", auth->a_user, auth->a_auth_group->ag_name); else log_warnx("secret for user \"%s\", target \"%s\", " "is too short; it should be at least 16 characters " "long", auth->a_user, auth->a_auth_group->ag_target->t_name); } if (auth->a_mutual_secret != NULL) { len = strlen(auth->a_secret); if (len > 16) { if (auth->a_auth_group->ag_name != NULL) log_warnx("mutual secret for user \"%s\", " "auth-group \"%s\", is too long; it should " "be at most 16 characters long", auth->a_user, auth->a_auth_group->ag_name); else log_warnx("mutual secret for user \"%s\", " "target \"%s\", is too long; it should " "be at most 16 characters long", auth->a_user, auth->a_auth_group->ag_target->t_name); } if (len < 12) { if (auth->a_auth_group->ag_name != NULL) log_warnx("mutual secret for user \"%s\", " "auth-group \"%s\", is too short; it " "should be at least 12 characters long", auth->a_user, auth->a_auth_group->ag_name); else log_warnx("mutual secret for user \"%s\", " "target \"%s\", is too short; it should be " "at least 16 characters long", auth->a_user, auth->a_auth_group->ag_target->t_name); } } } const struct auth * auth_new_chap(struct auth_group *ag, const char *user, const char *secret) { struct auth *auth; if (ag->ag_type == AG_TYPE_UNKNOWN) ag->ag_type = AG_TYPE_CHAP; if (ag->ag_type != AG_TYPE_CHAP) { if (ag->ag_name != NULL) log_warnx("cannot mix \"chap\" authentication with " "other types for auth-group \"%s\"", ag->ag_name); else log_warnx("cannot mix \"chap\" authentication with " "other types for target \"%s\"", ag->ag_target->t_name); return (NULL); } auth = auth_new(ag); auth->a_user = checked_strdup(user); auth->a_secret = checked_strdup(secret); auth_check_secret_length(auth); return (auth); } const struct auth * auth_new_chap_mutual(struct auth_group *ag, const char *user, const char *secret, const char *user2, const char *secret2) { struct auth *auth; if (ag->ag_type == AG_TYPE_UNKNOWN) ag->ag_type = AG_TYPE_CHAP_MUTUAL; if (ag->ag_type != AG_TYPE_CHAP_MUTUAL) { if (ag->ag_name != NULL) log_warnx("cannot mix \"chap-mutual\" authentication " "with other types for auth-group \"%s\"", ag->ag_name); else log_warnx("cannot mix \"chap-mutual\" authentication " "with other types for target \"%s\"", ag->ag_target->t_name); return (NULL); } auth = auth_new(ag); auth->a_user = checked_strdup(user); auth->a_secret = checked_strdup(secret); auth->a_mutual_user = checked_strdup(user2); auth->a_mutual_secret = checked_strdup(secret2); auth_check_secret_length(auth); return (auth); } const struct auth_name * auth_name_new(struct auth_group *ag, const char *name) { struct auth_name *an; an = calloc(1, sizeof(*an)); if (an == NULL) log_err(1, "calloc"); an->an_auth_group = ag; an->an_initator_name = checked_strdup(name); TAILQ_INSERT_TAIL(&ag->ag_names, an, an_next); return (an); } static void auth_name_delete(struct auth_name *an) { TAILQ_REMOVE(&an->an_auth_group->ag_names, an, an_next); free(an->an_initator_name); free(an); } bool auth_name_defined(const struct auth_group *ag) { if (TAILQ_EMPTY(&ag->ag_names)) return (false); return (true); } const struct auth_name * auth_name_find(const struct auth_group *ag, const char *name) { const struct auth_name *auth_name; TAILQ_FOREACH(auth_name, &ag->ag_names, an_next) { if (strcmp(auth_name->an_initator_name, name) == 0) return (auth_name); } return (NULL); } int auth_name_check(const struct auth_group *ag, const char *initiator_name) { if (!auth_name_defined(ag)) return (0); if (auth_name_find(ag, initiator_name) == NULL) return (1); return (0); } const struct auth_portal * auth_portal_new(struct auth_group *ag, const char *portal) { struct auth_portal *ap; char *net, *mask, *str, *tmp; int len, dm, m; ap = calloc(1, sizeof(*ap)); if (ap == NULL) log_err(1, "calloc"); ap->ap_auth_group = ag; ap->ap_initator_portal = checked_strdup(portal); mask = str = checked_strdup(portal); net = strsep(&mask, "/"); if (net[0] == '[') net++; len = strlen(net); if (len == 0) goto error; if (net[len - 1] == ']') net[len - 1] = 0; if (strchr(net, ':') != NULL) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ap->ap_sa; sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; if (inet_pton(AF_INET6, net, &sin6->sin6_addr) <= 0) goto error; dm = 128; } else { struct sockaddr_in *sin = (struct sockaddr_in *)&ap->ap_sa; sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; if (inet_pton(AF_INET, net, &sin->sin_addr) <= 0) goto error; dm = 32; } if (mask != NULL) { m = strtol(mask, &tmp, 0); if (m < 0 || m > dm || tmp[0] != 0) goto error; } else m = dm; ap->ap_mask = m; free(str); TAILQ_INSERT_TAIL(&ag->ag_portals, ap, ap_next); return (ap); error: log_errx(1, "Incorrect initiator portal '%s'", portal); return (NULL); } static void auth_portal_delete(struct auth_portal *ap) { TAILQ_REMOVE(&ap->ap_auth_group->ag_portals, ap, ap_next); free(ap->ap_initator_portal); free(ap); } bool auth_portal_defined(const struct auth_group *ag) { if (TAILQ_EMPTY(&ag->ag_portals)) return (false); return (true); } const struct auth_portal * auth_portal_find(const struct auth_group *ag, const struct sockaddr_storage *ss) { const struct auth_portal *ap; const uint8_t *a, *b; int i; uint8_t bmask; TAILQ_FOREACH(ap, &ag->ag_portals, ap_next) { if (ap->ap_sa.ss_family != ss->ss_family) continue; if (ss->ss_family == AF_INET) { a = (const uint8_t *) &((const struct sockaddr_in *)ss)->sin_addr; b = (const uint8_t *) &((const struct sockaddr_in *)&ap->ap_sa)->sin_addr; } else { a = (const uint8_t *) &((const struct sockaddr_in6 *)ss)->sin6_addr; b = (const uint8_t *) &((const struct sockaddr_in6 *)&ap->ap_sa)->sin6_addr; } for (i = 0; i < ap->ap_mask / 8; i++) { if (a[i] != b[i]) goto next; } if (ap->ap_mask % 8) { bmask = 0xff << (8 - (ap->ap_mask % 8)); if ((a[i] & bmask) != (b[i] & bmask)) goto next; } return (ap); next: ; } return (NULL); } int auth_portal_check(const struct auth_group *ag, const struct sockaddr_storage *sa) { if (!auth_portal_defined(ag)) return (0); if (auth_portal_find(ag, sa) == NULL) return (1); return (0); } struct auth_group * auth_group_new(struct conf *conf, const char *name) { struct auth_group *ag; if (name != NULL) { ag = auth_group_find(conf, name); if (ag != NULL) { log_warnx("duplicated auth-group \"%s\"", name); return (NULL); } } ag = calloc(1, sizeof(*ag)); if (ag == NULL) log_err(1, "calloc"); if (name != NULL) ag->ag_name = checked_strdup(name); TAILQ_INIT(&ag->ag_auths); TAILQ_INIT(&ag->ag_names); TAILQ_INIT(&ag->ag_portals); ag->ag_conf = conf; TAILQ_INSERT_TAIL(&conf->conf_auth_groups, ag, ag_next); return (ag); } void auth_group_delete(struct auth_group *ag) { struct auth *auth, *auth_tmp; struct auth_name *auth_name, *auth_name_tmp; struct auth_portal *auth_portal, *auth_portal_tmp; TAILQ_REMOVE(&ag->ag_conf->conf_auth_groups, ag, ag_next); TAILQ_FOREACH_SAFE(auth, &ag->ag_auths, a_next, auth_tmp) auth_delete(auth); TAILQ_FOREACH_SAFE(auth_name, &ag->ag_names, an_next, auth_name_tmp) auth_name_delete(auth_name); TAILQ_FOREACH_SAFE(auth_portal, &ag->ag_portals, ap_next, auth_portal_tmp) auth_portal_delete(auth_portal); free(ag->ag_name); free(ag); } struct auth_group * auth_group_find(const struct conf *conf, const char *name) { struct auth_group *ag; TAILQ_FOREACH(ag, &conf->conf_auth_groups, ag_next) { if (ag->ag_name != NULL && strcmp(ag->ag_name, name) == 0) return (ag); } return (NULL); } int auth_group_set_type(struct auth_group *ag, const char *str) { int type; if (strcmp(str, "none") == 0) { type = AG_TYPE_NO_AUTHENTICATION; } else if (strcmp(str, "deny") == 0) { type = AG_TYPE_DENY; } else if (strcmp(str, "chap") == 0) { type = AG_TYPE_CHAP; } else if (strcmp(str, "chap-mutual") == 0) { type = AG_TYPE_CHAP_MUTUAL; } else { if (ag->ag_name != NULL) log_warnx("invalid auth-type \"%s\" for auth-group " "\"%s\"", str, ag->ag_name); else log_warnx("invalid auth-type \"%s\" for target " "\"%s\"", str, ag->ag_target->t_name); return (1); } if (ag->ag_type != AG_TYPE_UNKNOWN && ag->ag_type != type) { if (ag->ag_name != NULL) { log_warnx("cannot set auth-type to \"%s\" for " "auth-group \"%s\"; already has a different " "type", str, ag->ag_name); } else { log_warnx("cannot set auth-type to \"%s\" for target " "\"%s\"; already has a different type", str, ag->ag_target->t_name); } return (1); } ag->ag_type = type; return (0); } static struct portal * portal_new(struct portal_group *pg) { struct portal *portal; portal = calloc(1, sizeof(*portal)); if (portal == NULL) log_err(1, "calloc"); TAILQ_INIT(&portal->p_targets); portal->p_portal_group = pg; TAILQ_INSERT_TAIL(&pg->pg_portals, portal, p_next); return (portal); } static void portal_delete(struct portal *portal) { TAILQ_REMOVE(&portal->p_portal_group->pg_portals, portal, p_next); if (portal->p_ai != NULL) freeaddrinfo(portal->p_ai); free(portal->p_listen); free(portal); } struct portal_group * portal_group_new(struct conf *conf, const char *name) { struct portal_group *pg; pg = portal_group_find(conf, name); if (pg != NULL) { log_warnx("duplicated portal-group \"%s\"", name); return (NULL); } pg = calloc(1, sizeof(*pg)); if (pg == NULL) log_err(1, "calloc"); pg->pg_name = checked_strdup(name); TAILQ_INIT(&pg->pg_portals); pg->pg_conf = conf; conf->conf_last_portal_group_tag++; pg->pg_tag = conf->conf_last_portal_group_tag; TAILQ_INSERT_TAIL(&conf->conf_portal_groups, pg, pg_next); return (pg); } void portal_group_delete(struct portal_group *pg) { struct portal *portal, *tmp; TAILQ_REMOVE(&pg->pg_conf->conf_portal_groups, pg, pg_next); TAILQ_FOREACH_SAFE(portal, &pg->pg_portals, p_next, tmp) portal_delete(portal); free(pg->pg_name); free(pg->pg_redirection); free(pg); } struct portal_group * portal_group_find(const struct conf *conf, const char *name) { struct portal_group *pg; TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { if (strcmp(pg->pg_name, name) == 0) return (pg); } return (NULL); } static int parse_addr_port(char *arg, const char *def_port, struct addrinfo **ai) { struct addrinfo hints; char *str, *addr, *ch; const char *port; int error, colons = 0; str = arg = strdup(arg); if (arg[0] == '[') { /* * IPv6 address in square brackets, perhaps with port. */ arg++; addr = strsep(&arg, "]"); if (arg == NULL) return (1); if (arg[0] == '\0') { port = def_port; } else if (arg[0] == ':') { port = arg + 1; } else { free(str); return (1); } } else { /* * Either IPv6 address without brackets - and without * a port - or IPv4 address. Just count the colons. */ for (ch = arg; *ch != '\0'; ch++) { if (*ch == ':') colons++; } if (colons > 1) { addr = arg; port = def_port; } else { addr = strsep(&arg, ":"); if (arg == NULL) port = def_port; else port = arg; } } memset(&hints, 0, sizeof(hints)); hints.ai_family = PF_UNSPEC; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_PASSIVE; error = getaddrinfo(addr, port, &hints, ai); free(str); return ((error != 0) ? 1 : 0); } int portal_group_add_listen(struct portal_group *pg, const char *value, bool iser) { struct portal *portal; portal = portal_new(pg); portal->p_listen = checked_strdup(value); portal->p_iser = iser; if (parse_addr_port(portal->p_listen, "3260", &portal->p_ai)) { log_warnx("invalid listen address %s", portal->p_listen); portal_delete(portal); return (1); } /* * XXX: getaddrinfo(3) may return multiple addresses; we should turn * those into multiple portals. */ return (0); } int isns_new(struct conf *conf, const char *addr) { struct isns *isns; isns = calloc(1, sizeof(*isns)); if (isns == NULL) log_err(1, "calloc"); isns->i_conf = conf; TAILQ_INSERT_TAIL(&conf->conf_isns, isns, i_next); isns->i_addr = checked_strdup(addr); if (parse_addr_port(isns->i_addr, "3205", &isns->i_ai)) { log_warnx("invalid iSNS address %s", isns->i_addr); isns_delete(isns); return (1); } /* * XXX: getaddrinfo(3) may return multiple addresses; we should turn * those into multiple servers. */ return (0); } void isns_delete(struct isns *isns) { TAILQ_REMOVE(&isns->i_conf->conf_isns, isns, i_next); free(isns->i_addr); if (isns->i_ai != NULL) freeaddrinfo(isns->i_ai); free(isns); } static int isns_do_connect(struct isns *isns) { int s; s = socket(isns->i_ai->ai_family, isns->i_ai->ai_socktype, isns->i_ai->ai_protocol); if (s < 0) { log_warn("socket(2) failed for %s", isns->i_addr); return (-1); } if (connect(s, isns->i_ai->ai_addr, isns->i_ai->ai_addrlen)) { log_warn("connect(2) failed for %s", isns->i_addr); close(s); return (-1); } return(s); } static int isns_do_register(struct isns *isns, int s, const char *hostname) { struct conf *conf = isns->i_conf; struct target *target; struct portal *portal; struct portal_group *pg; struct isns_req *req; int res = 0; uint32_t error; req = isns_req_create(ISNS_FUNC_DEVATTRREG, ISNS_FLAG_CLIENT); isns_req_add_str(req, 32, TAILQ_FIRST(&conf->conf_targets)->t_name); isns_req_add_delim(req); isns_req_add_str(req, 1, hostname); isns_req_add_32(req, 2, 2); /* 2 -- iSCSI */ isns_req_add_32(req, 6, conf->conf_isns_period); TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { if (pg->pg_unassigned) continue; TAILQ_FOREACH(portal, &pg->pg_portals, p_next) { isns_req_add_addr(req, 16, portal->p_ai); isns_req_add_port(req, 17, portal->p_ai); } } TAILQ_FOREACH(target, &conf->conf_targets, t_next) { isns_req_add_str(req, 32, target->t_name); isns_req_add_32(req, 33, 1); /* 1 -- Target*/ if (target->t_alias != NULL) isns_req_add_str(req, 34, target->t_alias); pg = target->t_portal_group; isns_req_add_32(req, 51, pg->pg_tag); TAILQ_FOREACH(portal, &pg->pg_portals, p_next) { isns_req_add_addr(req, 49, portal->p_ai); isns_req_add_port(req, 50, portal->p_ai); } } res = isns_req_send(s, req); if (res < 0) { log_warn("send(2) failed for %s", isns->i_addr); goto quit; } res = isns_req_receive(s, req); if (res < 0) { log_warn("receive(2) failed for %s", isns->i_addr); goto quit; } error = isns_req_get_status(req); if (error != 0) { log_warnx("iSNS register error %d for %s", error, isns->i_addr); res = -1; } quit: isns_req_free(req); return (res); } static int isns_do_check(struct isns *isns, int s, const char *hostname) { struct conf *conf = isns->i_conf; struct isns_req *req; int res = 0; uint32_t error; req = isns_req_create(ISNS_FUNC_DEVATTRQRY, ISNS_FLAG_CLIENT); isns_req_add_str(req, 32, TAILQ_FIRST(&conf->conf_targets)->t_name); isns_req_add_str(req, 1, hostname); isns_req_add_delim(req); isns_req_add(req, 2, 0, NULL); res = isns_req_send(s, req); if (res < 0) { log_warn("send(2) failed for %s", isns->i_addr); goto quit; } res = isns_req_receive(s, req); if (res < 0) { log_warn("receive(2) failed for %s", isns->i_addr); goto quit; } error = isns_req_get_status(req); if (error != 0) { log_warnx("iSNS check error %d for %s", error, isns->i_addr); res = -1; } quit: isns_req_free(req); return (res); } static int isns_do_deregister(struct isns *isns, int s, const char *hostname) { struct conf *conf = isns->i_conf; struct isns_req *req; int res = 0; uint32_t error; req = isns_req_create(ISNS_FUNC_DEVDEREG, ISNS_FLAG_CLIENT); isns_req_add_str(req, 32, TAILQ_FIRST(&conf->conf_targets)->t_name); isns_req_add_delim(req); isns_req_add_str(req, 1, hostname); res = isns_req_send(s, req); if (res < 0) { log_warn("send(2) failed for %s", isns->i_addr); goto quit; } res = isns_req_receive(s, req); if (res < 0) { log_warn("receive(2) failed for %s", isns->i_addr); goto quit; } error = isns_req_get_status(req); if (error != 0) { log_warnx("iSNS deregister error %d for %s", error, isns->i_addr); res = -1; } quit: isns_req_free(req); return (res); } void isns_register(struct isns *isns, struct isns *oldisns) { struct conf *conf = isns->i_conf; int s; char hostname[256]; if (TAILQ_EMPTY(&conf->conf_targets) || TAILQ_EMPTY(&conf->conf_portal_groups)) return; set_timeout(conf->conf_isns_timeout, false); s = isns_do_connect(isns); if (s < 0) { set_timeout(0, false); return; } gethostname(hostname, sizeof(hostname)); if (oldisns == NULL || TAILQ_EMPTY(&oldisns->i_conf->conf_targets)) oldisns = isns; isns_do_deregister(oldisns, s, hostname); isns_do_register(isns, s, hostname); close(s); set_timeout(0, false); } void isns_check(struct isns *isns) { struct conf *conf = isns->i_conf; int s, res; char hostname[256]; if (TAILQ_EMPTY(&conf->conf_targets) || TAILQ_EMPTY(&conf->conf_portal_groups)) return; set_timeout(conf->conf_isns_timeout, false); s = isns_do_connect(isns); if (s < 0) { set_timeout(0, false); return; } gethostname(hostname, sizeof(hostname)); res = isns_do_check(isns, s, hostname); if (res < 0) { isns_do_deregister(isns, s, hostname); isns_do_register(isns, s, hostname); } close(s); set_timeout(0, false); } void isns_deregister(struct isns *isns) { struct conf *conf = isns->i_conf; int s; char hostname[256]; if (TAILQ_EMPTY(&conf->conf_targets) || TAILQ_EMPTY(&conf->conf_portal_groups)) return; set_timeout(conf->conf_isns_timeout, false); s = isns_do_connect(isns); if (s < 0) return; gethostname(hostname, sizeof(hostname)); isns_do_deregister(isns, s, hostname); close(s); set_timeout(0, false); } int portal_group_set_filter(struct portal_group *pg, const char *str) { int filter; if (strcmp(str, "none") == 0) { filter = PG_FILTER_NONE; } else if (strcmp(str, "portal") == 0) { filter = PG_FILTER_PORTAL; } else if (strcmp(str, "portal-name") == 0) { filter = PG_FILTER_PORTAL_NAME; } else if (strcmp(str, "portal-name-auth") == 0) { filter = PG_FILTER_PORTAL_NAME_AUTH; } else { log_warnx("invalid discovery-filter \"%s\" for portal-group " "\"%s\"; valid values are \"none\", \"portal\", " "\"portal-name\", and \"portal-name-auth\"", str, pg->pg_name); return (1); } if (pg->pg_discovery_filter != PG_FILTER_UNKNOWN && pg->pg_discovery_filter != filter) { log_warnx("cannot set discovery-filter to \"%s\" for " "portal-group \"%s\"; already has a different " "value", str, pg->pg_name); return (1); } pg->pg_discovery_filter = filter; return (0); } int portal_group_set_redirection(struct portal_group *pg, const char *addr) { if (pg->pg_redirection != NULL) { log_warnx("cannot set redirection to \"%s\" for " "portal-group \"%s\"; already defined", addr, pg->pg_name); return (1); } pg->pg_redirection = checked_strdup(addr); return (0); } static bool valid_hex(const char ch) { switch (ch) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'A': case 'b': case 'B': case 'c': case 'C': case 'd': case 'D': case 'e': case 'E': case 'f': case 'F': return (true); default: return (false); } } bool valid_iscsi_name(const char *name) { int i; if (strlen(name) >= MAX_NAME_LEN) { log_warnx("overlong name for target \"%s\"; max length allowed " "by iSCSI specification is %d characters", name, MAX_NAME_LEN); return (false); } /* * In the cases below, we don't return an error, just in case the admin * was right, and we're wrong. */ if (strncasecmp(name, "iqn.", strlen("iqn.")) == 0) { for (i = strlen("iqn."); name[i] != '\0'; i++) { /* * XXX: We should verify UTF-8 normalisation, as defined * by 3.2.6.2: iSCSI Name Encoding. */ if (isalnum(name[i])) continue; if (name[i] == '-' || name[i] == '.' || name[i] == ':') continue; log_warnx("invalid character \"%c\" in target name " "\"%s\"; allowed characters are letters, digits, " "'-', '.', and ':'", name[i], name); break; } /* * XXX: Check more stuff: valid date and a valid reversed domain. */ } else if (strncasecmp(name, "eui.", strlen("eui.")) == 0) { if (strlen(name) != strlen("eui.") + 16) log_warnx("invalid target name \"%s\"; the \"eui.\" " "should be followed by exactly 16 hexadecimal " "digits", name); for (i = strlen("eui."); name[i] != '\0'; i++) { if (!valid_hex(name[i])) { log_warnx("invalid character \"%c\" in target " "name \"%s\"; allowed characters are 1-9 " "and A-F", name[i], name); break; } } } else if (strncasecmp(name, "naa.", strlen("naa.")) == 0) { if (strlen(name) > strlen("naa.") + 32) log_warnx("invalid target name \"%s\"; the \"naa.\" " "should be followed by at most 32 hexadecimal " "digits", name); for (i = strlen("naa."); name[i] != '\0'; i++) { if (!valid_hex(name[i])) { log_warnx("invalid character \"%c\" in target " "name \"%s\"; allowed characters are 1-9 " "and A-F", name[i], name); break; } } } else { log_warnx("invalid target name \"%s\"; should start with " "either \".iqn\", \"eui.\", or \"naa.\"", name); } return (true); } struct target * target_new(struct conf *conf, const char *name) { struct target *targ; int i, len; targ = target_find(conf, name); if (targ != NULL) { log_warnx("duplicated target \"%s\"", name); return (NULL); } if (valid_iscsi_name(name) == false) { log_warnx("target name \"%s\" is invalid", name); return (NULL); } targ = calloc(1, sizeof(*targ)); if (targ == NULL) log_err(1, "calloc"); targ->t_name = checked_strdup(name); /* * RFC 3722 requires us to normalize the name to lowercase. */ len = strlen(name); for (i = 0; i < len; i++) targ->t_name[i] = tolower(targ->t_name[i]); - TAILQ_INIT(&targ->t_luns); targ->t_conf = conf; TAILQ_INSERT_TAIL(&conf->conf_targets, targ, t_next); return (targ); } void target_delete(struct target *targ) { - struct lun *lun, *tmp; TAILQ_REMOVE(&targ->t_conf->conf_targets, targ, t_next); - TAILQ_FOREACH_SAFE(lun, &targ->t_luns, l_next, tmp) - lun_delete(lun); free(targ->t_name); free(targ->t_redirection); free(targ); } struct target * target_find(struct conf *conf, const char *name) { struct target *targ; TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { if (strcasecmp(targ->t_name, name) == 0) return (targ); } return (NULL); } int target_set_redirection(struct target *target, const char *addr) { if (target->t_redirection != NULL) { log_warnx("cannot set redirection to \"%s\" for " "target \"%s\"; already defined", addr, target->t_name); return (1); } target->t_redirection = checked_strdup(addr); return (0); } +void +target_set_ctl_port(struct target *target, uint32_t value) +{ + + target->t_ctl_port = value; +} + struct lun * -lun_new(struct target *targ, int lun_id) +lun_new(struct conf *conf, const char *name) { struct lun *lun; - lun = lun_find(targ, lun_id); + lun = lun_find(conf, name); if (lun != NULL) { - log_warnx("duplicated lun %d for target \"%s\"", - lun_id, targ->t_name); + log_warnx("duplicated lun \"%s\"", name); return (NULL); } lun = calloc(1, sizeof(*lun)); if (lun == NULL) log_err(1, "calloc"); - lun->l_lun = lun_id; + lun->l_conf = conf; + lun->l_name = checked_strdup(name); TAILQ_INIT(&lun->l_options); - lun->l_target = targ; - TAILQ_INSERT_TAIL(&targ->t_luns, lun, l_next); + TAILQ_INSERT_TAIL(&conf->conf_luns, lun, l_next); return (lun); } void lun_delete(struct lun *lun) { + struct target *targ; struct lun_option *lo, *tmp; + int i; - TAILQ_REMOVE(&lun->l_target->t_luns, lun, l_next); + TAILQ_FOREACH(targ, &lun->l_conf->conf_targets, t_next) { + for (i = 0; i < MAX_LUNS; i++) { + if (targ->t_luns[i] == lun) + targ->t_luns[i] = NULL; + } + } + TAILQ_REMOVE(&lun->l_conf->conf_luns, lun, l_next); TAILQ_FOREACH_SAFE(lo, &lun->l_options, lo_next, tmp) lun_option_delete(lo); + free(lun->l_name); free(lun->l_backend); free(lun->l_device_id); free(lun->l_path); + free(lun->l_scsiname); free(lun->l_serial); free(lun); } struct lun * -lun_find(const struct target *targ, int lun_id) +lun_find(const struct conf *conf, const char *name) { struct lun *lun; - TAILQ_FOREACH(lun, &targ->t_luns, l_next) { - if (lun->l_lun == lun_id) + TAILQ_FOREACH(lun, &conf->conf_luns, l_next) { + if (strcmp(lun->l_name, name) == 0) return (lun); } return (NULL); } void lun_set_backend(struct lun *lun, const char *value) { free(lun->l_backend); lun->l_backend = checked_strdup(value); } void lun_set_blocksize(struct lun *lun, size_t value) { lun->l_blocksize = value; } void lun_set_device_id(struct lun *lun, const char *value) { free(lun->l_device_id); lun->l_device_id = checked_strdup(value); } void lun_set_path(struct lun *lun, const char *value) { free(lun->l_path); lun->l_path = checked_strdup(value); } void +lun_set_scsiname(struct lun *lun, const char *value) +{ + free(lun->l_scsiname); + lun->l_scsiname = checked_strdup(value); +} + +void lun_set_serial(struct lun *lun, const char *value) { free(lun->l_serial); lun->l_serial = checked_strdup(value); } void lun_set_size(struct lun *lun, size_t value) { lun->l_size = value; } void lun_set_ctl_lun(struct lun *lun, uint32_t value) { lun->l_ctl_lun = value; } struct lun_option * lun_option_new(struct lun *lun, const char *name, const char *value) { struct lun_option *lo; lo = lun_option_find(lun, name); if (lo != NULL) { - log_warnx("duplicated lun option %s for lun %d, target \"%s\"", - name, lun->l_lun, lun->l_target->t_name); + log_warnx("duplicated lun option \"%s\" for lun \"%s\"", + name, lun->l_name); return (NULL); } lo = calloc(1, sizeof(*lo)); if (lo == NULL) log_err(1, "calloc"); lo->lo_name = checked_strdup(name); lo->lo_value = checked_strdup(value); lo->lo_lun = lun; TAILQ_INSERT_TAIL(&lun->l_options, lo, lo_next); return (lo); } void lun_option_delete(struct lun_option *lo) { TAILQ_REMOVE(&lo->lo_lun->l_options, lo, lo_next); free(lo->lo_name); free(lo->lo_value); free(lo); } struct lun_option * lun_option_find(const struct lun *lun, const char *name) { struct lun_option *lo; TAILQ_FOREACH(lo, &lun->l_options, lo_next) { if (strcmp(lo->lo_name, name) == 0) return (lo); } return (NULL); } void lun_option_set(struct lun_option *lo, const char *value) { free(lo->lo_value); lo->lo_value = checked_strdup(value); } static struct connection * connection_new(struct portal *portal, int fd, const char *host, const struct sockaddr *client_sa) { struct connection *conn; conn = calloc(1, sizeof(*conn)); if (conn == NULL) log_err(1, "calloc"); conn->conn_portal = portal; conn->conn_socket = fd; conn->conn_initiator_addr = checked_strdup(host); memcpy(&conn->conn_initiator_sa, client_sa, client_sa->sa_len); /* * Default values, from RFC 3720, section 12. */ conn->conn_max_data_segment_length = 8192; conn->conn_max_burst_length = 262144; conn->conn_immediate_data = true; return (conn); } #if 0 static void conf_print(struct conf *conf) { struct auth_group *ag; struct auth *auth; struct auth_name *auth_name; struct auth_portal *auth_portal; struct portal_group *pg; struct portal *portal; struct target *targ; struct lun *lun; struct lun_option *lo; TAILQ_FOREACH(ag, &conf->conf_auth_groups, ag_next) { fprintf(stderr, "auth-group %s {\n", ag->ag_name); TAILQ_FOREACH(auth, &ag->ag_auths, a_next) fprintf(stderr, "\t chap-mutual %s %s %s %s\n", auth->a_user, auth->a_secret, auth->a_mutual_user, auth->a_mutual_secret); TAILQ_FOREACH(auth_name, &ag->ag_names, an_next) fprintf(stderr, "\t initiator-name %s\n", auth_name->an_initator_name); TAILQ_FOREACH(auth_portal, &ag->ag_portals, an_next) fprintf(stderr, "\t initiator-portal %s\n", auth_portal->an_initator_portal); fprintf(stderr, "}\n"); } TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { fprintf(stderr, "portal-group %s {\n", pg->pg_name); TAILQ_FOREACH(portal, &pg->pg_portals, p_next) fprintf(stderr, "\t listen %s\n", portal->p_listen); fprintf(stderr, "}\n"); } + TAILQ_FOREACH(lun, &conf->conf_luns, l_next) { + fprintf(stderr, "\tlun %s {\n", lun->l_name); + fprintf(stderr, "\t\tpath %s\n", lun->l_path); + TAILQ_FOREACH(lo, &lun->l_options, lo_next) + fprintf(stderr, "\t\toption %s %s\n", + lo->lo_name, lo->lo_value); + fprintf(stderr, "\t}\n"); + } TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { fprintf(stderr, "target %s {\n", targ->t_name); if (targ->t_alias != NULL) fprintf(stderr, "\t alias %s\n", targ->t_alias); - TAILQ_FOREACH(lun, &targ->t_luns, l_next) { - fprintf(stderr, "\tlun %d {\n", lun->l_lun); - fprintf(stderr, "\t\tpath %s\n", lun->l_path); - TAILQ_FOREACH(lo, &lun->l_options, lo_next) - fprintf(stderr, "\t\toption %s %s\n", - lo->lo_name, lo->lo_value); - fprintf(stderr, "\t}\n"); - } fprintf(stderr, "}\n"); } } #endif static int conf_verify_lun(struct lun *lun) { const struct lun *lun2; - const struct target *targ2; if (lun->l_backend == NULL) lun_set_backend(lun, "block"); if (strcmp(lun->l_backend, "block") == 0) { if (lun->l_path == NULL) { - log_warnx("missing path for lun %d, target \"%s\"", - lun->l_lun, lun->l_target->t_name); + log_warnx("missing path for lun \"%s\"", + lun->l_name); return (1); } } else if (strcmp(lun->l_backend, "ramdisk") == 0) { if (lun->l_size == 0) { - log_warnx("missing size for ramdisk-backed lun %d, " - "target \"%s\"", lun->l_lun, lun->l_target->t_name); + log_warnx("missing size for ramdisk-backed lun \"%s\"", + lun->l_name); return (1); } if (lun->l_path != NULL) { log_warnx("path must not be specified " - "for ramdisk-backed lun %d, target \"%s\"", - lun->l_lun, lun->l_target->t_name); + "for ramdisk-backed lun \"%s\"", + lun->l_name); return (1); } } - if (lun->l_lun < 0 || lun->l_lun > 255) { - log_warnx("invalid lun number for lun %d, target \"%s\"; " - "must be between 0 and 255", lun->l_lun, - lun->l_target->t_name); - return (1); - } if (lun->l_blocksize == 0) { lun_set_blocksize(lun, DEFAULT_BLOCKSIZE); } else if (lun->l_blocksize < 0) { - log_warnx("invalid blocksize for lun %d, target \"%s\"; " - "must be larger than 0", lun->l_lun, lun->l_target->t_name); + log_warnx("invalid blocksize for lun \"%s\"; " + "must be larger than 0", lun->l_name); return (1); } if (lun->l_size != 0 && lun->l_size % lun->l_blocksize != 0) { - log_warnx("invalid size for lun %d, target \"%s\"; " - "must be multiple of blocksize", lun->l_lun, - lun->l_target->t_name); + log_warnx("invalid size for lun \"%s\"; " + "must be multiple of blocksize", lun->l_name); return (1); } - TAILQ_FOREACH(targ2, &lun->l_target->t_conf->conf_targets, t_next) { - TAILQ_FOREACH(lun2, &targ2->t_luns, l_next) { - if (lun == lun2) - continue; - if (lun->l_path != NULL && lun2->l_path != NULL && - strcmp(lun->l_path, lun2->l_path) == 0) { - log_debugx("WARNING: path \"%s\" duplicated " - "between lun %d, target \"%s\", and " - "lun %d, target \"%s\"", lun->l_path, - lun->l_lun, lun->l_target->t_name, - lun2->l_lun, lun2->l_target->t_name); - } + TAILQ_FOREACH(lun2, &lun->l_conf->conf_luns, l_next) { + if (lun == lun2) + continue; + if (lun->l_path != NULL && lun2->l_path != NULL && + strcmp(lun->l_path, lun2->l_path) == 0) { + log_debugx("WARNING: path \"%s\" duplicated " + "between lun \"%s\", and " + "lun \"%s\"", lun->l_path, + lun->l_name, lun2->l_name); } } return (0); } int conf_verify(struct conf *conf) { struct auth_group *ag; struct portal_group *pg; struct target *targ; struct lun *lun; bool found; - int error; + int error, i; if (conf->conf_pidfile_path == NULL) conf->conf_pidfile_path = checked_strdup(DEFAULT_PIDFILE); + TAILQ_FOREACH(lun, &conf->conf_luns, l_next) { + error = conf_verify_lun(lun); + if (error != 0) + return (error); + } TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { if (targ->t_auth_group == NULL) { targ->t_auth_group = auth_group_find(conf, "default"); assert(targ->t_auth_group != NULL); } if (targ->t_portal_group == NULL) { targ->t_portal_group = portal_group_find(conf, "default"); assert(targ->t_portal_group != NULL); } found = false; - TAILQ_FOREACH(lun, &targ->t_luns, l_next) { - error = conf_verify_lun(lun); - if (error != 0) - return (error); - found = true; + for (i = 0; i < MAX_LUNS; i++) { + if (targ->t_luns[i] != NULL) + found = true; } if (!found && targ->t_redirection == NULL) { log_warnx("no LUNs defined for target \"%s\"", targ->t_name); } if (found && targ->t_redirection != NULL) { log_debugx("target \"%s\" contains luns, " " but configured for redirection", targ->t_name); } } TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { assert(pg->pg_name != NULL); if (pg->pg_discovery_auth_group == NULL) { pg->pg_discovery_auth_group = auth_group_find(conf, "default"); assert(pg->pg_discovery_auth_group != NULL); } if (pg->pg_discovery_filter == PG_FILTER_UNKNOWN) pg->pg_discovery_filter = PG_FILTER_NONE; TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { if (targ->t_portal_group == pg) break; } if (pg->pg_redirection != NULL) { if (targ != NULL) { log_debugx("portal-group \"%s\" assigned " "to target \"%s\", but configured " "for redirection", pg->pg_name, targ->t_name); } pg->pg_unassigned = false; } else if (targ != NULL) { pg->pg_unassigned = false; } else { if (strcmp(pg->pg_name, "default") != 0) log_warnx("portal-group \"%s\" not assigned " "to any target", pg->pg_name); pg->pg_unassigned = true; } } TAILQ_FOREACH(ag, &conf->conf_auth_groups, ag_next) { if (ag->ag_name == NULL) assert(ag->ag_target != NULL); else assert(ag->ag_target == NULL); found = false; TAILQ_FOREACH(targ, &conf->conf_targets, t_next) { if (targ->t_auth_group == ag) { found = true; break; } } TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { if (pg->pg_discovery_auth_group == ag) { found = true; break; } } if (!found && ag->ag_name != NULL && strcmp(ag->ag_name, "default") != 0 && strcmp(ag->ag_name, "no-authentication") != 0 && strcmp(ag->ag_name, "no-access") != 0) { log_warnx("auth-group \"%s\" not assigned " "to any target", ag->ag_name); } } return (0); } static int conf_apply(struct conf *oldconf, struct conf *newconf) { struct target *oldtarg, *newtarg, *tmptarg; struct lun *oldlun, *newlun, *tmplun; struct portal_group *oldpg, *newpg; struct portal *oldp, *newp; struct isns *oldns, *newns; pid_t otherpid; int changed, cumulated_error = 0, error, sockbuf; int one = 1; if (oldconf->conf_debug != newconf->conf_debug) { log_debugx("changing debug level to %d", newconf->conf_debug); log_init(newconf->conf_debug); } if (oldconf->conf_pidfh != NULL) { assert(oldconf->conf_pidfile_path != NULL); if (newconf->conf_pidfile_path != NULL && strcmp(oldconf->conf_pidfile_path, newconf->conf_pidfile_path) == 0) { newconf->conf_pidfh = oldconf->conf_pidfh; oldconf->conf_pidfh = NULL; } else { log_debugx("removing pidfile %s", oldconf->conf_pidfile_path); pidfile_remove(oldconf->conf_pidfh); oldconf->conf_pidfh = NULL; } } if (newconf->conf_pidfh == NULL && newconf->conf_pidfile_path != NULL) { log_debugx("opening pidfile %s", newconf->conf_pidfile_path); newconf->conf_pidfh = pidfile_open(newconf->conf_pidfile_path, 0600, &otherpid); if (newconf->conf_pidfh == NULL) { if (errno == EEXIST) log_errx(1, "daemon already running, pid: %jd.", (intmax_t)otherpid); log_err(1, "cannot open or create pidfile \"%s\"", newconf->conf_pidfile_path); } } /* Deregister on removed iSNS servers. */ TAILQ_FOREACH(oldns, &oldconf->conf_isns, i_next) { TAILQ_FOREACH(newns, &newconf->conf_isns, i_next) { if (strcmp(oldns->i_addr, newns->i_addr) == 0) break; } if (newns == NULL) isns_deregister(oldns); } /* * XXX: If target or lun removal fails, we should somehow "move" * the old lun or target into newconf, so that subsequent * conf_apply() would try to remove them again. That would * be somewhat hairy, though, and lun deletion failures don't * really happen, so leave it as it is for now. */ + /* + * First, remove any targets present in the old configuration + * and missing in the new one. + */ TAILQ_FOREACH_SAFE(oldtarg, &oldconf->conf_targets, t_next, tmptarg) { - /* - * First, remove any targets present in the old configuration - * and missing in the new one. - */ newtarg = target_find(newconf, oldtarg->t_name); - if (newtarg == NULL) { - error = kernel_port_remove(oldtarg); + if (newtarg != NULL) + continue; + error = kernel_port_remove(oldtarg); + if (error != 0) { + log_warnx("failed to remove target %s", + oldtarg->t_name); + /* + * XXX: Uncomment after fixing the root cause. + * + * cumulated_error++; + */ + } + } + + /* + * Second, remove any LUNs present in the old configuration + * and missing in the new one. + */ + TAILQ_FOREACH_SAFE(oldlun, &oldconf->conf_luns, l_next, tmplun) { + newlun = lun_find(newconf, oldlun->l_name); + if (newlun == NULL) { + log_debugx("lun \"%s\", CTL lun %d " + "not found in new configuration; " + "removing", oldlun->l_name, oldlun->l_ctl_lun); + error = kernel_lun_remove(oldlun); if (error != 0) { - log_warnx("failed to remove target %s", - oldtarg->t_name); - /* - * XXX: Uncomment after fixing the root cause. - * - * cumulated_error++; - */ + log_warnx("failed to remove lun \"%s\", " + "CTL lun %d", + oldlun->l_name, oldlun->l_ctl_lun); + cumulated_error++; } - TAILQ_FOREACH_SAFE(oldlun, &oldtarg->t_luns, l_next, - tmplun) { - log_debugx("target %s not found in new " - "configuration; removing its lun %d, " - "backed by CTL lun %d", - oldtarg->t_name, oldlun->l_lun, - oldlun->l_ctl_lun); - error = kernel_lun_remove(oldlun); - if (error != 0) { - log_warnx("failed to remove lun %d, " - "target %s, CTL lun %d", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - cumulated_error++; - } - } continue; } /* - * Second, remove any LUNs present in the old target - * and missing in the new one. + * Also remove the LUNs changed by more than size. */ - TAILQ_FOREACH_SAFE(oldlun, &oldtarg->t_luns, l_next, tmplun) { - newlun = lun_find(newtarg, oldlun->l_lun); - if (newlun == NULL) { - log_debugx("lun %d, target %s, CTL lun %d " - "not found in new configuration; " - "removing", oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - error = kernel_lun_remove(oldlun); - if (error != 0) { - log_warnx("failed to remove lun %d, " - "target %s, CTL lun %d", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - cumulated_error++; - } - continue; + changed = 0; + assert(oldlun->l_backend != NULL); + assert(newlun->l_backend != NULL); + if (strcmp(newlun->l_backend, oldlun->l_backend) != 0) { + log_debugx("backend for lun \"%s\", " + "CTL lun %d changed; removing", + oldlun->l_name, oldlun->l_ctl_lun); + changed = 1; + } + if (oldlun->l_blocksize != newlun->l_blocksize) { + log_debugx("blocksize for lun \"%s\", " + "CTL lun %d changed; removing", + oldlun->l_name, oldlun->l_ctl_lun); + changed = 1; + } + if (newlun->l_device_id != NULL && + (oldlun->l_device_id == NULL || + strcmp(oldlun->l_device_id, newlun->l_device_id) != + 0)) { + log_debugx("device-id for lun \"%s\", " + "CTL lun %d changed; removing", + oldlun->l_name, oldlun->l_ctl_lun); + changed = 1; + } + if (newlun->l_path != NULL && + (oldlun->l_path == NULL || + strcmp(oldlun->l_path, newlun->l_path) != 0)) { + log_debugx("path for lun \"%s\", " + "CTL lun %d, changed; removing", + oldlun->l_name, oldlun->l_ctl_lun); + changed = 1; + } + if (newlun->l_serial != NULL && + (oldlun->l_serial == NULL || + strcmp(oldlun->l_serial, newlun->l_serial) != 0)) { + log_debugx("serial for lun \"%s\", " + "CTL lun %d changed; removing", + oldlun->l_name, oldlun->l_ctl_lun); + changed = 1; + } + if (changed) { + error = kernel_lun_remove(oldlun); + if (error != 0) { + log_warnx("failed to remove lun \"%s\", " + "CTL lun %d", + oldlun->l_name, oldlun->l_ctl_lun); + cumulated_error++; } + lun_delete(oldlun); + continue; + } - /* - * Also remove the LUNs changed by more than size. - */ - changed = 0; - assert(oldlun->l_backend != NULL); - assert(newlun->l_backend != NULL); - if (strcmp(newlun->l_backend, oldlun->l_backend) != 0) { - log_debugx("backend for lun %d, target %s, " - "CTL lun %d changed; removing", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - changed = 1; - } - if (oldlun->l_blocksize != newlun->l_blocksize) { - log_debugx("blocksize for lun %d, target %s, " - "CTL lun %d changed; removing", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - changed = 1; - } - if (newlun->l_device_id != NULL && - (oldlun->l_device_id == NULL || - strcmp(oldlun->l_device_id, newlun->l_device_id) != - 0)) { - log_debugx("device-id for lun %d, target %s, " - "CTL lun %d changed; removing", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - changed = 1; - } - if (newlun->l_path != NULL && - (oldlun->l_path == NULL || - strcmp(oldlun->l_path, newlun->l_path) != 0)) { - log_debugx("path for lun %d, target %s, " - "CTL lun %d, changed; removing", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - changed = 1; - } - if (newlun->l_serial != NULL && - (oldlun->l_serial == NULL || - strcmp(oldlun->l_serial, newlun->l_serial) != 0)) { - log_debugx("serial for lun %d, target %s, " - "CTL lun %d changed; removing", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); - changed = 1; - } - if (changed) { - error = kernel_lun_remove(oldlun); + lun_set_ctl_lun(newlun, oldlun->l_ctl_lun); + } + + TAILQ_FOREACH_SAFE(newlun, &newconf->conf_luns, l_next, tmplun) { + oldlun = lun_find(oldconf, newlun->l_name); + if (oldlun != NULL) { + if (newlun->l_size != oldlun->l_size || + newlun->l_size == 0) { + log_debugx("resizing lun \"%s\", CTL lun %d", + newlun->l_name, newlun->l_ctl_lun); + error = kernel_lun_resize(newlun); if (error != 0) { - log_warnx("failed to remove lun %d, " - "target %s, CTL lun %d", - oldlun->l_lun, oldtarg->t_name, - oldlun->l_ctl_lun); + log_warnx("failed to " + "resize lun \"%s\", CTL lun %d", + newlun->l_name, + newlun->l_ctl_lun); cumulated_error++; } - lun_delete(oldlun); - continue; } - - lun_set_ctl_lun(newlun, oldlun->l_ctl_lun); + continue; } + log_debugx("adding lun \"%s\"", newlun->l_name); + error = kernel_lun_add(newlun); + if (error != 0) { + log_warnx("failed to add lun \"%s\"", newlun->l_name); + lun_delete(newlun); + cumulated_error++; + } } /* * Now add new targets or modify existing ones. */ TAILQ_FOREACH(newtarg, &newconf->conf_targets, t_next) { oldtarg = target_find(oldconf, newtarg->t_name); - TAILQ_FOREACH_SAFE(newlun, &newtarg->t_luns, l_next, tmplun) { - if (oldtarg != NULL) { - oldlun = lun_find(oldtarg, newlun->l_lun); - if (oldlun != NULL) { - if (newlun->l_size != oldlun->l_size || - newlun->l_size == 0) { - log_debugx("resizing lun %d, " - "target %s, CTL lun %d", - newlun->l_lun, - newtarg->t_name, - newlun->l_ctl_lun); - error = - kernel_lun_resize(newlun); - if (error != 0) { - log_warnx("failed to " - "resize lun %d, " - "target %s, " - "CTL lun %d", - newlun->l_lun, - newtarg->t_name, - newlun->l_lun); - cumulated_error++; - } - } - continue; - } - } - log_debugx("adding lun %d, target %s", - newlun->l_lun, newtarg->t_name); - error = kernel_lun_add(newlun); - if (error != 0) { - log_warnx("failed to add lun %d, target %s", - newlun->l_lun, newtarg->t_name); - lun_delete(newlun); - cumulated_error++; - } - } - if (oldtarg == NULL) { + if (oldtarg == NULL) error = kernel_port_add(newtarg); - if (error != 0) { - log_warnx("failed to add target %s", - newtarg->t_name); - /* - * XXX: Uncomment after fixing the root cause. - * - * cumulated_error++; - */ - } + else { + target_set_ctl_port(newtarg, oldtarg->t_ctl_port); + error = kernel_port_update(newtarg); + } + if (error != 0) { + log_warnx("failed to %s target %s", + (oldtarg == NULL) ? "add" : "update", + newtarg->t_name); + /* + * XXX: Uncomment after fixing the root cause. + * + * cumulated_error++; + */ } } /* * Go through the new portals, opening the sockets as neccessary. */ TAILQ_FOREACH(newpg, &newconf->conf_portal_groups, pg_next) { if (newpg->pg_unassigned) { log_debugx("not listening on portal-group \"%s\", " "not assigned to any target", newpg->pg_name); continue; } TAILQ_FOREACH(newp, &newpg->pg_portals, p_next) { /* * Try to find already open portal and reuse * the listening socket. We don't care about * what portal or portal group that was, what * matters is the listening address. */ TAILQ_FOREACH(oldpg, &oldconf->conf_portal_groups, pg_next) { TAILQ_FOREACH(oldp, &oldpg->pg_portals, p_next) { if (strcmp(newp->p_listen, oldp->p_listen) == 0 && oldp->p_socket > 0) { newp->p_socket = oldp->p_socket; oldp->p_socket = 0; break; } } } if (newp->p_socket > 0) { /* * We're done with this portal. */ continue; } #ifdef ICL_KERNEL_PROXY if (proxy_mode) { newpg->pg_conf->conf_portal_id++; newp->p_id = newpg->pg_conf->conf_portal_id; log_debugx("listening on %s, portal-group " "\"%s\", portal id %d, using ICL proxy", newp->p_listen, newpg->pg_name, newp->p_id); kernel_listen(newp->p_ai, newp->p_iser, newp->p_id); continue; } #endif assert(proxy_mode == false); assert(newp->p_iser == false); log_debugx("listening on %s, portal-group \"%s\"", newp->p_listen, newpg->pg_name); newp->p_socket = socket(newp->p_ai->ai_family, newp->p_ai->ai_socktype, newp->p_ai->ai_protocol); if (newp->p_socket < 0) { log_warn("socket(2) failed for %s", newp->p_listen); cumulated_error++; continue; } sockbuf = SOCKBUF_SIZE; if (setsockopt(newp->p_socket, SOL_SOCKET, SO_RCVBUF, &sockbuf, sizeof(sockbuf)) == -1) log_warn("setsockopt(SO_RCVBUF) failed " "for %s", newp->p_listen); sockbuf = SOCKBUF_SIZE; if (setsockopt(newp->p_socket, SOL_SOCKET, SO_SNDBUF, &sockbuf, sizeof(sockbuf)) == -1) log_warn("setsockopt(SO_SNDBUF) failed " "for %s", newp->p_listen); error = setsockopt(newp->p_socket, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)); if (error != 0) { log_warn("setsockopt(SO_REUSEADDR) failed " "for %s", newp->p_listen); close(newp->p_socket); newp->p_socket = 0; cumulated_error++; continue; } error = bind(newp->p_socket, newp->p_ai->ai_addr, newp->p_ai->ai_addrlen); if (error != 0) { log_warn("bind(2) failed for %s", newp->p_listen); close(newp->p_socket); newp->p_socket = 0; cumulated_error++; continue; } error = listen(newp->p_socket, -1); if (error != 0) { log_warn("listen(2) failed for %s", newp->p_listen); close(newp->p_socket); newp->p_socket = 0; cumulated_error++; continue; } } } /* * Go through the no longer used sockets, closing them. */ TAILQ_FOREACH(oldpg, &oldconf->conf_portal_groups, pg_next) { TAILQ_FOREACH(oldp, &oldpg->pg_portals, p_next) { if (oldp->p_socket <= 0) continue; log_debugx("closing socket for %s, portal-group \"%s\"", oldp->p_listen, oldpg->pg_name); close(oldp->p_socket); oldp->p_socket = 0; } } /* (Re-)Register on remaining/new iSNS servers. */ TAILQ_FOREACH(newns, &newconf->conf_isns, i_next) { TAILQ_FOREACH(oldns, &oldconf->conf_isns, i_next) { if (strcmp(oldns->i_addr, newns->i_addr) == 0) break; } isns_register(newns, oldns); } /* Schedule iSNS update */ if (!TAILQ_EMPTY(&newconf->conf_isns)) set_timeout((newconf->conf_isns_period + 2) / 3, false); return (cumulated_error); } bool timed_out(void) { return (sigalrm_received); } static void sigalrm_handler_fatal(int dummy __unused) { /* * It would be easiest to just log an error and exit. We can't * do this, though, because log_errx() is not signal safe, since * it calls syslog(3). Instead, set a flag checked by pdu_send() * and pdu_receive(), to call log_errx() there. Should they fail * to notice, we'll exit here one second later. */ if (sigalrm_received) { /* * Oh well. Just give up and quit. */ _exit(2); } sigalrm_received = true; } static void sigalrm_handler(int dummy __unused) { sigalrm_received = true; } void set_timeout(int timeout, int fatal) { struct sigaction sa; struct itimerval itv; int error; if (timeout <= 0) { log_debugx("session timeout disabled"); bzero(&itv, sizeof(itv)); error = setitimer(ITIMER_REAL, &itv, NULL); if (error != 0) log_err(1, "setitimer"); sigalrm_received = false; return; } sigalrm_received = false; bzero(&sa, sizeof(sa)); if (fatal) sa.sa_handler = sigalrm_handler_fatal; else sa.sa_handler = sigalrm_handler; sigfillset(&sa.sa_mask); error = sigaction(SIGALRM, &sa, NULL); if (error != 0) log_err(1, "sigaction"); /* * First SIGALRM will arive after conf_timeout seconds. * If we do nothing, another one will arrive a second later. */ log_debugx("setting session timeout to %d seconds", timeout); bzero(&itv, sizeof(itv)); itv.it_interval.tv_sec = 1; itv.it_value.tv_sec = timeout; error = setitimer(ITIMER_REAL, &itv, NULL); if (error != 0) log_err(1, "setitimer"); } static int wait_for_children(bool block) { pid_t pid; int status; int num = 0; for (;;) { /* * If "block" is true, wait for at least one process. */ if (block && num == 0) pid = wait4(-1, &status, 0, NULL); else pid = wait4(-1, &status, WNOHANG, NULL); if (pid <= 0) break; if (WIFSIGNALED(status)) { log_warnx("child process %d terminated with signal %d", pid, WTERMSIG(status)); } else if (WEXITSTATUS(status) != 0) { log_warnx("child process %d terminated with exit status %d", pid, WEXITSTATUS(status)); } else { log_debugx("child process %d terminated gracefully", pid); } num++; } return (num); } static void handle_connection(struct portal *portal, int fd, const struct sockaddr *client_sa, bool dont_fork) { struct connection *conn; int error; pid_t pid; char host[NI_MAXHOST + 1]; struct conf *conf; conf = portal->p_portal_group->pg_conf; if (dont_fork) { log_debugx("incoming connection; not forking due to -d flag"); } else { nchildren -= wait_for_children(false); assert(nchildren >= 0); while (conf->conf_maxproc > 0 && nchildren >= conf->conf_maxproc) { log_debugx("maxproc limit of %d child processes hit; " "waiting for child process to exit", conf->conf_maxproc); nchildren -= wait_for_children(true); assert(nchildren >= 0); } log_debugx("incoming connection; forking child process #%d", nchildren); nchildren++; pid = fork(); if (pid < 0) log_err(1, "fork"); if (pid > 0) { close(fd); return; } } pidfile_close(conf->conf_pidfh); error = getnameinfo(client_sa, client_sa->sa_len, host, sizeof(host), NULL, 0, NI_NUMERICHOST); if (error != 0) log_errx(1, "getnameinfo: %s", gai_strerror(error)); log_debugx("accepted connection from %s; portal group \"%s\"", host, portal->p_portal_group->pg_name); log_set_peer_addr(host); setproctitle("%s", host); conn = connection_new(portal, fd, host, client_sa); set_timeout(conf->conf_timeout, true); kernel_capsicate(); login(conn); if (conn->conn_session_type == CONN_SESSION_TYPE_NORMAL) { kernel_handoff(conn); log_debugx("connection handed off to the kernel"); } else { assert(conn->conn_session_type == CONN_SESSION_TYPE_DISCOVERY); discovery(conn); } log_debugx("nothing more to do; exiting"); exit(0); } static int fd_add(int fd, fd_set *fdset, int nfds) { /* * Skip sockets which we failed to bind. */ if (fd <= 0) return (nfds); FD_SET(fd, fdset); if (fd > nfds) nfds = fd; return (nfds); } static void main_loop(struct conf *conf, bool dont_fork) { struct portal_group *pg; struct portal *portal; struct sockaddr_storage client_sa; socklen_t client_salen; #ifdef ICL_KERNEL_PROXY int connection_id; int portal_id; #endif fd_set fdset; int error, nfds, client_fd; pidfile_write(conf->conf_pidfh); for (;;) { if (sighup_received || sigterm_received || timed_out()) return; #ifdef ICL_KERNEL_PROXY if (proxy_mode) { client_salen = sizeof(client_sa); kernel_accept(&connection_id, &portal_id, (struct sockaddr *)&client_sa, &client_salen); assert(client_salen >= client_sa.ss_len); log_debugx("incoming connection, id %d, portal id %d", connection_id, portal_id); TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { TAILQ_FOREACH(portal, &pg->pg_portals, p_next) { if (portal->p_id == portal_id) { goto found; } } } log_errx(1, "kernel returned invalid portal_id %d", portal_id); found: handle_connection(portal, connection_id, (struct sockaddr *)&client_sa, dont_fork); } else { #endif assert(proxy_mode == false); FD_ZERO(&fdset); nfds = 0; TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { TAILQ_FOREACH(portal, &pg->pg_portals, p_next) nfds = fd_add(portal->p_socket, &fdset, nfds); } error = select(nfds + 1, &fdset, NULL, NULL, NULL); if (error <= 0) { if (errno == EINTR) return; log_err(1, "select"); } TAILQ_FOREACH(pg, &conf->conf_portal_groups, pg_next) { TAILQ_FOREACH(portal, &pg->pg_portals, p_next) { if (!FD_ISSET(portal->p_socket, &fdset)) continue; client_salen = sizeof(client_sa); client_fd = accept(portal->p_socket, (struct sockaddr *)&client_sa, &client_salen); if (client_fd < 0) log_err(1, "accept"); assert(client_salen >= client_sa.ss_len); handle_connection(portal, client_fd, (struct sockaddr *)&client_sa, dont_fork); break; } } #ifdef ICL_KERNEL_PROXY } #endif } } static void sighup_handler(int dummy __unused) { sighup_received = true; } static void sigterm_handler(int dummy __unused) { sigterm_received = true; } static void sigchld_handler(int dummy __unused) { /* * The only purpose of this handler is to make SIGCHLD * interrupt the ISCSIDWAIT ioctl(2), so we can call * wait_for_children(). */ } static void register_signals(void) { struct sigaction sa; int error; bzero(&sa, sizeof(sa)); sa.sa_handler = sighup_handler; sigfillset(&sa.sa_mask); error = sigaction(SIGHUP, &sa, NULL); if (error != 0) log_err(1, "sigaction"); sa.sa_handler = sigterm_handler; error = sigaction(SIGTERM, &sa, NULL); if (error != 0) log_err(1, "sigaction"); sa.sa_handler = sigterm_handler; error = sigaction(SIGINT, &sa, NULL); if (error != 0) log_err(1, "sigaction"); sa.sa_handler = sigchld_handler; error = sigaction(SIGCHLD, &sa, NULL); if (error != 0) log_err(1, "sigaction"); } int main(int argc, char **argv) { struct conf *oldconf, *newconf, *tmpconf; struct isns *newns; const char *config_path = DEFAULT_CONFIG_PATH; int debug = 0, ch, error; bool dont_daemonize = false; while ((ch = getopt(argc, argv, "df:R")) != -1) { switch (ch) { case 'd': dont_daemonize = true; debug++; break; case 'f': config_path = optarg; break; case 'R': #ifndef ICL_KERNEL_PROXY log_errx(1, "ctld(8) compiled without ICL_KERNEL_PROXY " "does not support iSER protocol"); #endif proxy_mode = true; break; case '?': default: usage(); } } argc -= optind; if (argc != 0) usage(); log_init(debug); kernel_init(); oldconf = conf_new_from_kernel(); newconf = conf_new_from_file(config_path); if (newconf == NULL) log_errx(1, "configuration error; exiting"); if (debug > 0) { oldconf->conf_debug = debug; newconf->conf_debug = debug; } error = conf_apply(oldconf, newconf); if (error != 0) log_errx(1, "failed to apply configuration; exiting"); conf_delete(oldconf); oldconf = NULL; register_signals(); if (dont_daemonize == false) { log_debugx("daemonizing"); if (daemon(0, 0) == -1) { log_warn("cannot daemonize"); pidfile_remove(newconf->conf_pidfh); exit(1); } } /* Schedule iSNS update */ if (!TAILQ_EMPTY(&newconf->conf_isns)) set_timeout((newconf->conf_isns_period + 2) / 3, false); for (;;) { main_loop(newconf, dont_daemonize); if (sighup_received) { sighup_received = false; log_debugx("received SIGHUP, reloading configuration"); tmpconf = conf_new_from_file(config_path); if (tmpconf == NULL) { log_warnx("configuration error, " "continuing with old configuration"); } else { if (debug > 0) tmpconf->conf_debug = debug; oldconf = newconf; newconf = tmpconf; error = conf_apply(oldconf, newconf); if (error != 0) log_warnx("failed to reload " "configuration"); conf_delete(oldconf); oldconf = NULL; } } else if (sigterm_received) { log_debugx("exiting on signal; " "reloading empty configuration"); log_debugx("disabling CTL iSCSI port " "and terminating all connections"); oldconf = newconf; newconf = conf_new(); if (debug > 0) newconf->conf_debug = debug; error = conf_apply(oldconf, newconf); if (error != 0) log_warnx("failed to apply configuration"); conf_delete(oldconf); oldconf = NULL; log_warnx("exiting on signal"); exit(0); } else { nchildren -= wait_for_children(false); assert(nchildren >= 0); if (timed_out()) { set_timeout(0, false); TAILQ_FOREACH(newns, &newconf->conf_isns, i_next) isns_check(newns); /* Schedule iSNS update */ if (!TAILQ_EMPTY(&newconf->conf_isns)) { set_timeout((newconf->conf_isns_period + 2) / 3, false); } } } } /* NOTREACHED */ } Index: stable/10/usr.sbin/ctld/ctld.h =================================================================== --- stable/10/usr.sbin/ctld/ctld.h (revision 279001) +++ stable/10/usr.sbin/ctld/ctld.h (revision 279002) @@ -1,398 +1,407 @@ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CTLD_H #define CTLD_H #include #ifdef ICL_KERNEL_PROXY #include #endif #include #include #include #include #define DEFAULT_CONFIG_PATH "/etc/ctl.conf" #define DEFAULT_PIDFILE "/var/run/ctld.pid" #define DEFAULT_BLOCKSIZE 512 +#define MAX_LUNS 1024 #define MAX_NAME_LEN 223 #define MAX_DATA_SEGMENT_LENGTH (128 * 1024) #define MAX_BURST_LENGTH 16776192 #define SOCKBUF_SIZE 1048576 struct auth { TAILQ_ENTRY(auth) a_next; struct auth_group *a_auth_group; char *a_user; char *a_secret; char *a_mutual_user; char *a_mutual_secret; }; struct auth_name { TAILQ_ENTRY(auth_name) an_next; struct auth_group *an_auth_group; char *an_initator_name; }; struct auth_portal { TAILQ_ENTRY(auth_portal) ap_next; struct auth_group *ap_auth_group; char *ap_initator_portal; struct sockaddr_storage ap_sa; int ap_mask; }; #define AG_TYPE_UNKNOWN 0 #define AG_TYPE_DENY 1 #define AG_TYPE_NO_AUTHENTICATION 2 #define AG_TYPE_CHAP 3 #define AG_TYPE_CHAP_MUTUAL 4 struct auth_group { TAILQ_ENTRY(auth_group) ag_next; struct conf *ag_conf; char *ag_name; struct target *ag_target; int ag_type; TAILQ_HEAD(, auth) ag_auths; TAILQ_HEAD(, auth_name) ag_names; TAILQ_HEAD(, auth_portal) ag_portals; }; struct portal { TAILQ_ENTRY(portal) p_next; struct portal_group *p_portal_group; bool p_iser; char *p_listen; struct addrinfo *p_ai; #ifdef ICL_KERNEL_PROXY int p_id; #endif TAILQ_HEAD(, target) p_targets; int p_socket; }; #define PG_FILTER_UNKNOWN 0 #define PG_FILTER_NONE 1 #define PG_FILTER_PORTAL 2 #define PG_FILTER_PORTAL_NAME 3 #define PG_FILTER_PORTAL_NAME_AUTH 4 struct portal_group { TAILQ_ENTRY(portal_group) pg_next; struct conf *pg_conf; char *pg_name; struct auth_group *pg_discovery_auth_group; int pg_discovery_filter; bool pg_unassigned; TAILQ_HEAD(, portal) pg_portals; char *pg_redirection; uint16_t pg_tag; }; struct lun_option { TAILQ_ENTRY(lun_option) lo_next; struct lun *lo_lun; char *lo_name; char *lo_value; }; struct lun { TAILQ_ENTRY(lun) l_next; + struct conf *l_conf; TAILQ_HEAD(, lun_option) l_options; - struct target *l_target; - int l_lun; + char *l_name; char *l_backend; int l_blocksize; char *l_device_id; char *l_path; + char *l_scsiname; char *l_serial; int64_t l_size; int l_ctl_lun; }; struct target { TAILQ_ENTRY(target) t_next; - TAILQ_HEAD(, lun) t_luns; struct conf *t_conf; + struct lun *t_luns[MAX_LUNS]; struct auth_group *t_auth_group; struct portal_group *t_portal_group; char *t_name; char *t_alias; char *t_redirection; + + uint32_t t_ctl_port; }; struct isns { TAILQ_ENTRY(isns) i_next; struct conf *i_conf; char *i_addr; struct addrinfo *i_ai; }; struct conf { char *conf_pidfile_path; + TAILQ_HEAD(, lun) conf_luns; TAILQ_HEAD(, target) conf_targets; TAILQ_HEAD(, auth_group) conf_auth_groups; TAILQ_HEAD(, portal_group) conf_portal_groups; TAILQ_HEAD(, isns) conf_isns; int conf_isns_period; int conf_isns_timeout; int conf_debug; int conf_timeout; int conf_maxproc; uint16_t conf_last_portal_group_tag; #ifdef ICL_KERNEL_PROXY int conf_portal_id; #endif struct pidfh *conf_pidfh; bool conf_default_pg_defined; bool conf_default_ag_defined; bool conf_kernel_port_on; }; #define CONN_SESSION_TYPE_NONE 0 #define CONN_SESSION_TYPE_DISCOVERY 1 #define CONN_SESSION_TYPE_NORMAL 2 #define CONN_DIGEST_NONE 0 #define CONN_DIGEST_CRC32C 1 struct connection { struct portal *conn_portal; struct target *conn_target; int conn_socket; int conn_session_type; char *conn_initiator_name; char *conn_initiator_addr; char *conn_initiator_alias; uint8_t conn_initiator_isid[6]; struct sockaddr_storage conn_initiator_sa; uint32_t conn_cmdsn; uint32_t conn_statsn; size_t conn_max_data_segment_length; size_t conn_max_burst_length; int conn_immediate_data; int conn_header_digest; int conn_data_digest; const char *conn_user; struct chap *conn_chap; }; struct pdu { struct connection *pdu_connection; struct iscsi_bhs *pdu_bhs; char *pdu_data; size_t pdu_data_len; }; #define KEYS_MAX 1024 struct keys { char *keys_names[KEYS_MAX]; char *keys_values[KEYS_MAX]; char *keys_data; size_t keys_data_len; }; #define CHAP_CHALLENGE_LEN 1024 struct chap { unsigned char chap_id; char chap_challenge[CHAP_CHALLENGE_LEN]; char chap_response[MD5_DIGEST_LENGTH]; }; struct rchap { char *rchap_secret; unsigned char rchap_id; void *rchap_challenge; size_t rchap_challenge_len; }; struct chap *chap_new(void); char *chap_get_id(const struct chap *chap); char *chap_get_challenge(const struct chap *chap); int chap_receive(struct chap *chap, const char *response); int chap_authenticate(struct chap *chap, const char *secret); void chap_delete(struct chap *chap); struct rchap *rchap_new(const char *secret); int rchap_receive(struct rchap *rchap, const char *id, const char *challenge); char *rchap_get_response(struct rchap *rchap); void rchap_delete(struct rchap *rchap); struct conf *conf_new(void); struct conf *conf_new_from_file(const char *path); struct conf *conf_new_from_kernel(void); void conf_delete(struct conf *conf); int conf_verify(struct conf *conf); struct auth_group *auth_group_new(struct conf *conf, const char *name); void auth_group_delete(struct auth_group *ag); struct auth_group *auth_group_find(const struct conf *conf, const char *name); int auth_group_set_type(struct auth_group *ag, const char *type); const struct auth *auth_new_chap(struct auth_group *ag, const char *user, const char *secret); const struct auth *auth_new_chap_mutual(struct auth_group *ag, const char *user, const char *secret, const char *user2, const char *secret2); const struct auth *auth_find(const struct auth_group *ag, const char *user); const struct auth_name *auth_name_new(struct auth_group *ag, const char *initiator_name); bool auth_name_defined(const struct auth_group *ag); const struct auth_name *auth_name_find(const struct auth_group *ag, const char *initiator_name); int auth_name_check(const struct auth_group *ag, const char *initiator_name); const struct auth_portal *auth_portal_new(struct auth_group *ag, const char *initiator_portal); bool auth_portal_defined(const struct auth_group *ag); const struct auth_portal *auth_portal_find(const struct auth_group *ag, const struct sockaddr_storage *sa); int auth_portal_check(const struct auth_group *ag, const struct sockaddr_storage *sa); struct portal_group *portal_group_new(struct conf *conf, const char *name); void portal_group_delete(struct portal_group *pg); struct portal_group *portal_group_find(const struct conf *conf, const char *name); int portal_group_add_listen(struct portal_group *pg, const char *listen, bool iser); int portal_group_set_filter(struct portal_group *pg, const char *filter); int portal_group_set_redirection(struct portal_group *pg, const char *addr); int isns_new(struct conf *conf, const char *addr); void isns_delete(struct isns *is); void isns_register(struct isns *isns, struct isns *oldisns); void isns_check(struct isns *isns); void isns_deregister(struct isns *isns); struct target *target_new(struct conf *conf, const char *name); void target_delete(struct target *target); struct target *target_find(struct conf *conf, const char *name); int target_set_redirection(struct target *target, const char *addr); +void target_set_ctl_port(struct target *target, + uint32_t value); -struct lun *lun_new(struct target *target, int lun_id); +struct lun *lun_new(struct conf *conf, const char *name); void lun_delete(struct lun *lun); -struct lun *lun_find(const struct target *target, int lun_id); +struct lun *lun_find(const struct conf *conf, const char *name); void lun_set_backend(struct lun *lun, const char *value); void lun_set_blocksize(struct lun *lun, size_t value); void lun_set_device_id(struct lun *lun, const char *value); void lun_set_path(struct lun *lun, const char *value); +void lun_set_scsiname(struct lun *lun, const char *value); void lun_set_serial(struct lun *lun, const char *value); void lun_set_size(struct lun *lun, size_t value); void lun_set_ctl_lun(struct lun *lun, uint32_t value); struct lun_option *lun_option_new(struct lun *lun, const char *name, const char *value); void lun_option_delete(struct lun_option *clo); struct lun_option *lun_option_find(const struct lun *lun, const char *name); void lun_option_set(struct lun_option *clo, const char *value); void kernel_init(void); int kernel_lun_add(struct lun *lun); int kernel_lun_resize(struct lun *lun); int kernel_lun_remove(struct lun *lun); void kernel_handoff(struct connection *conn); int kernel_port_add(struct target *targ); +int kernel_port_update(struct target *targ); int kernel_port_remove(struct target *targ); void kernel_capsicate(void); #ifdef ICL_KERNEL_PROXY void kernel_listen(struct addrinfo *ai, bool iser, int portal_id); void kernel_accept(int *connection_id, int *portal_id, struct sockaddr *client_sa, socklen_t *client_salen); void kernel_send(struct pdu *pdu); void kernel_receive(struct pdu *pdu); #endif struct keys *keys_new(void); void keys_delete(struct keys *keys); void keys_load(struct keys *keys, const struct pdu *pdu); void keys_save(struct keys *keys, struct pdu *pdu); const char *keys_find(struct keys *keys, const char *name); int keys_find_int(struct keys *keys, const char *name); void keys_add(struct keys *keys, const char *name, const char *value); void keys_add_int(struct keys *keys, const char *name, int value); struct pdu *pdu_new(struct connection *conn); struct pdu *pdu_new_response(struct pdu *request); void pdu_delete(struct pdu *pdu); void pdu_receive(struct pdu *request); void pdu_send(struct pdu *response); void login(struct connection *conn); void discovery(struct connection *conn); void log_init(int level); void log_set_peer_name(const char *name); void log_set_peer_addr(const char *addr); void log_err(int, const char *, ...) __dead2 __printflike(2, 3); void log_errx(int, const char *, ...) __dead2 __printflike(2, 3); void log_warn(const char *, ...) __printflike(1, 2); void log_warnx(const char *, ...) __printflike(1, 2); void log_debugx(const char *, ...) __printflike(1, 2); char *checked_strdup(const char *); bool valid_iscsi_name(const char *name); void set_timeout(int timeout, int fatal); bool timed_out(void); #endif /* !CTLD_H */ Index: stable/10/usr.sbin/ctld/kernel.c =================================================================== --- stable/10/usr.sbin/ctld/kernel.c (revision 279001) +++ stable/10/usr.sbin/ctld/kernel.c (revision 279002) @@ -1,1080 +1,1081 @@ /*- * Copyright (c) 2003, 2004 Silicon Graphics International Corp. * Copyright (c) 1997-2007 Kenneth D. Merry * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Edward Tomasz Napierala * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ctld.h" #ifdef ICL_KERNEL_PROXY #include #endif extern bool proxy_mode; static int ctl_fd = 0; void kernel_init(void) { int retval, saved_errno; ctl_fd = open(CTL_DEFAULT_DEV, O_RDWR); if (ctl_fd < 0 && errno == ENOENT) { saved_errno = errno; retval = kldload("ctl"); if (retval != -1) ctl_fd = open(CTL_DEFAULT_DEV, O_RDWR); else errno = saved_errno; } if (ctl_fd < 0) log_err(1, "failed to open %s", CTL_DEFAULT_DEV); } /* * Name/value pair used for per-LUN attributes. */ struct cctl_lun_nv { char *name; char *value; STAILQ_ENTRY(cctl_lun_nv) links; }; /* * Backend LUN information. */ struct cctl_lun { uint64_t lun_id; char *backend_type; uint64_t size_blocks; uint32_t blocksize; char *serial_number; char *device_id; - char *cfiscsi_target; - int cfiscsi_lun; + char *ctld_name; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_lun) links; }; struct cctl_port { uint32_t port_id; int cfiscsi_status; char *cfiscsi_target; uint16_t cfiscsi_portal_group_tag; STAILQ_HEAD(,cctl_lun_nv) attr_list; STAILQ_ENTRY(cctl_port) links; }; struct cctl_devlist_data { int num_luns; STAILQ_HEAD(,cctl_lun) lun_list; struct cctl_lun *cur_lun; int num_ports; STAILQ_HEAD(,cctl_port) port_list; struct cctl_port *cur_port; int level; struct sbuf *cur_sb[32]; }; static void cctl_start_element(void *user_data, const char *name, const char **attr) { int i; struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; devlist->level++; if ((u_int)devlist->level >= (sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0]))) log_errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0])); devlist->cur_sb[devlist->level] = sbuf_new_auto(); if (devlist->cur_sb[devlist->level] == NULL) log_err(1, "%s: unable to allocate sbuf", __func__); if (strcmp(name, "lun") == 0) { if (cur_lun != NULL) log_errx(1, "%s: improper lun element nesting", __func__); cur_lun = calloc(1, sizeof(*cur_lun)); if (cur_lun == NULL) log_err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_lun)); devlist->num_luns++; devlist->cur_lun = cur_lun; STAILQ_INIT(&cur_lun->attr_list); STAILQ_INSERT_TAIL(&devlist->lun_list, cur_lun, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_lun->lun_id = strtoull(attr[i+1], NULL, 0); } else { log_errx(1, "%s: invalid LUN attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_end_element(void *user_data, const char *name) { struct cctl_devlist_data *devlist; struct cctl_lun *cur_lun; char *str; devlist = (struct cctl_devlist_data *)user_data; cur_lun = devlist->cur_lun; if ((cur_lun == NULL) && (strcmp(name, "ctllunlist") != 0)) log_errx(1, "%s: cur_lun == NULL! (name = %s)", __func__, name); if (devlist->cur_sb[devlist->level] == NULL) log_errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, devlist->level, name); sbuf_finish(devlist->cur_sb[devlist->level]); str = checked_strdup(sbuf_data(devlist->cur_sb[devlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(devlist->cur_sb[devlist->level]); devlist->cur_sb[devlist->level] = NULL; devlist->level--; if (strcmp(name, "backend_type") == 0) { cur_lun->backend_type = str; str = NULL; } else if (strcmp(name, "size") == 0) { cur_lun->size_blocks = strtoull(str, NULL, 0); } else if (strcmp(name, "blocksize") == 0) { cur_lun->blocksize = strtoul(str, NULL, 0); } else if (strcmp(name, "serial_number") == 0) { cur_lun->serial_number = str; str = NULL; } else if (strcmp(name, "device_id") == 0) { cur_lun->device_id = str; str = NULL; - } else if (strcmp(name, "cfiscsi_target") == 0) { - cur_lun->cfiscsi_target = str; + } else if (strcmp(name, "ctld_name") == 0) { + cur_lun->ctld_name = str; str = NULL; - } else if (strcmp(name, "cfiscsi_lun") == 0) { - cur_lun->cfiscsi_lun = strtoul(str, NULL, 0); } else if (strcmp(name, "lun") == 0) { devlist->cur_lun = NULL; } else if (strcmp(name, "ctllunlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) log_err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); nv->name = checked_strdup(name); nv->value = str; str = NULL; STAILQ_INSERT_TAIL(&cur_lun->attr_list, nv, links); } free(str); } static void cctl_start_pelement(void *user_data, const char *name, const char **attr) { int i; struct cctl_devlist_data *devlist; struct cctl_port *cur_port; devlist = (struct cctl_devlist_data *)user_data; cur_port = devlist->cur_port; devlist->level++; if ((u_int)devlist->level >= (sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0]))) log_errx(1, "%s: too many nesting levels, %zd max", __func__, sizeof(devlist->cur_sb) / sizeof(devlist->cur_sb[0])); devlist->cur_sb[devlist->level] = sbuf_new_auto(); if (devlist->cur_sb[devlist->level] == NULL) log_err(1, "%s: unable to allocate sbuf", __func__); if (strcmp(name, "targ_port") == 0) { if (cur_port != NULL) log_errx(1, "%s: improper port element nesting (%s)", __func__, name); cur_port = calloc(1, sizeof(*cur_port)); if (cur_port == NULL) log_err(1, "%s: cannot allocate %zd bytes", __func__, sizeof(*cur_port)); devlist->num_ports++; devlist->cur_port = cur_port; STAILQ_INIT(&cur_port->attr_list); STAILQ_INSERT_TAIL(&devlist->port_list, cur_port, links); for (i = 0; attr[i] != NULL; i += 2) { if (strcmp(attr[i], "id") == 0) { cur_port->port_id = strtoul(attr[i+1], NULL, 0); } else { log_errx(1, "%s: invalid LUN attribute %s = %s", __func__, attr[i], attr[i+1]); } } } } static void cctl_end_pelement(void *user_data, const char *name) { struct cctl_devlist_data *devlist; struct cctl_port *cur_port; char *str; devlist = (struct cctl_devlist_data *)user_data; cur_port = devlist->cur_port; if ((cur_port == NULL) && (strcmp(name, "ctlportlist") != 0)) log_errx(1, "%s: cur_port == NULL! (name = %s)", __func__, name); if (devlist->cur_sb[devlist->level] == NULL) log_errx(1, "%s: no valid sbuf at level %d (name %s)", __func__, devlist->level, name); sbuf_finish(devlist->cur_sb[devlist->level]); str = checked_strdup(sbuf_data(devlist->cur_sb[devlist->level])); if (strlen(str) == 0) { free(str); str = NULL; } sbuf_delete(devlist->cur_sb[devlist->level]); devlist->cur_sb[devlist->level] = NULL; devlist->level--; if (strcmp(name, "cfiscsi_target") == 0) { cur_port->cfiscsi_target = str; str = NULL; } else if (strcmp(name, "cfiscsi_status") == 0) { cur_port->cfiscsi_status = strtoul(str, NULL, 0); } else if (strcmp(name, "cfiscsi_portal_group_tag") == 0) { cur_port->cfiscsi_portal_group_tag = strtoul(str, NULL, 0); } else if (strcmp(name, "targ_port") == 0) { devlist->cur_port = NULL; } else if (strcmp(name, "ctlportlist") == 0) { /* Nothing. */ } else { struct cctl_lun_nv *nv; nv = calloc(1, sizeof(*nv)); if (nv == NULL) log_err(1, "%s: can't allocate %zd bytes for nv pair", __func__, sizeof(*nv)); nv->name = checked_strdup(name); nv->value = str; str = NULL; STAILQ_INSERT_TAIL(&cur_port->attr_list, nv, links); } free(str); } static void cctl_char_handler(void *user_data, const XML_Char *str, int len) { struct cctl_devlist_data *devlist; devlist = (struct cctl_devlist_data *)user_data; sbuf_bcat(devlist->cur_sb[devlist->level], str, len); } struct conf * conf_new_from_kernel(void) { struct conf *conf = NULL; struct target *targ; struct lun *cl; struct lun_option *lo; struct ctl_lun_list list; struct cctl_devlist_data devlist; struct cctl_lun *lun; struct cctl_port *port; XML_Parser parser; char *str; int len, retval; bzero(&devlist, sizeof(devlist)); STAILQ_INIT(&devlist.lun_list); STAILQ_INIT(&devlist.port_list); log_debugx("obtaining previously configured CTL luns from the kernel"); str = NULL; len = 4096; retry: str = realloc(str, len); if (str == NULL) log_err(1, "realloc"); bzero(&list, sizeof(list)); list.alloc_len = len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = str; if (ioctl(ctl_fd, CTL_LUN_LIST, &list) == -1) { log_warn("error issuing CTL_LUN_LIST ioctl"); free(str); return (NULL); } if (list.status == CTL_LUN_LIST_ERROR) { log_warnx("error returned from CTL_LUN_LIST ioctl: %s", list.error_str); free(str); return (NULL); } if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { len = len << 1; goto retry; } parser = XML_ParserCreate(NULL); if (parser == NULL) { log_warnx("unable to create XML parser"); free(str); return (NULL); } XML_SetUserData(parser, &devlist); XML_SetElementHandler(parser, cctl_start_element, cctl_end_element); XML_SetCharacterDataHandler(parser, cctl_char_handler); retval = XML_Parse(parser, str, strlen(str), 1); XML_ParserFree(parser); free(str); if (retval != 1) { log_warnx("XML_Parse failed"); return (NULL); } str = NULL; len = 4096; retry_port: str = realloc(str, len); if (str == NULL) log_err(1, "realloc"); bzero(&list, sizeof(list)); list.alloc_len = len; list.status = CTL_LUN_LIST_NONE; list.lun_xml = str; if (ioctl(ctl_fd, CTL_PORT_LIST, &list) == -1) { log_warn("error issuing CTL_PORT_LIST ioctl"); free(str); return (NULL); } if (list.status == CTL_PORT_LIST_ERROR) { log_warnx("error returned from CTL_PORT_LIST ioctl: %s", list.error_str); free(str); return (NULL); } if (list.status == CTL_LUN_LIST_NEED_MORE_SPACE) { len = len << 1; goto retry_port; } parser = XML_ParserCreate(NULL); if (parser == NULL) { log_warnx("unable to create XML parser"); free(str); return (NULL); } XML_SetUserData(parser, &devlist); XML_SetElementHandler(parser, cctl_start_pelement, cctl_end_pelement); XML_SetCharacterDataHandler(parser, cctl_char_handler); retval = XML_Parse(parser, str, strlen(str), 1); XML_ParserFree(parser); free(str); if (retval != 1) { log_warnx("XML_Parse failed"); return (NULL); } conf = conf_new(); STAILQ_FOREACH(port, &devlist.port_list, links) { if (port->cfiscsi_target == NULL) { log_debugx("CTL port %ju wasn't managed by ctld; " "ignoring", (uintmax_t)port->port_id); continue; } if (port->cfiscsi_status != 1) { log_debugx("CTL port %ju is not active (%d); ignoring", (uintmax_t)port->port_id, port->cfiscsi_status); continue; } targ = target_find(conf, port->cfiscsi_target); if (targ == NULL) { #if 0 log_debugx("found new kernel target %s for CTL port %ld", port->cfiscsi_target, port->port_id); #endif targ = target_new(conf, port->cfiscsi_target); if (targ == NULL) { log_warnx("target_new failed"); continue; } } } STAILQ_FOREACH(lun, &devlist.lun_list, links) { struct cctl_lun_nv *nv; - if (lun->cfiscsi_target == NULL) { + if (lun->ctld_name == NULL) { log_debugx("CTL lun %ju wasn't managed by ctld; " "ignoring", (uintmax_t)lun->lun_id); continue; } - targ = target_find(conf, lun->cfiscsi_target); - if (targ == NULL) { -#if 0 - log_debugx("found new kernel target %s for CTL lun %ld", - lun->cfiscsi_target, lun->lun_id); -#endif - targ = target_new(conf, lun->cfiscsi_target); - if (targ == NULL) { - log_warnx("target_new failed"); - continue; - } - } - - cl = lun_find(targ, lun->cfiscsi_lun); + cl = lun_find(conf, lun->ctld_name); if (cl != NULL) { - log_warnx("found CTL lun %ju, backing lun %d, target " - "%s, also backed by CTL lun %d; ignoring", - (uintmax_t) lun->lun_id, cl->l_lun, - cl->l_target->t_name, cl->l_ctl_lun); + log_warnx("found CTL lun %ju \"%s\", " + "also backed by CTL lun %d; ignoring", + (uintmax_t)lun->lun_id, lun->ctld_name, + cl->l_ctl_lun); continue; } - log_debugx("found CTL lun %ju, backing lun %d, target %s", - (uintmax_t)lun->lun_id, lun->cfiscsi_lun, lun->cfiscsi_target); + log_debugx("found CTL lun %ju \"%s\"", + (uintmax_t)lun->lun_id, lun->ctld_name); - cl = lun_new(targ, lun->cfiscsi_lun); + cl = lun_new(conf, lun->ctld_name); if (cl == NULL) { log_warnx("lun_new failed"); continue; } lun_set_backend(cl, lun->backend_type); lun_set_blocksize(cl, lun->blocksize); lun_set_device_id(cl, lun->device_id); lun_set_serial(cl, lun->serial_number); lun_set_size(cl, lun->size_blocks * cl->l_blocksize); lun_set_ctl_lun(cl, lun->lun_id); STAILQ_FOREACH(nv, &lun->attr_list, links) { if (strcmp(nv->name, "file") == 0 || strcmp(nv->name, "dev") == 0) { lun_set_path(cl, nv->value); continue; } lo = lun_option_new(cl, nv->name, nv->value); if (lo == NULL) log_warnx("unable to add CTL lun option %s " - "for CTL lun %ju for lun %d, target %s", + "for CTL lun %ju \"%s\"", nv->name, (uintmax_t) lun->lun_id, - cl->l_lun, cl->l_target->t_name); + cl->l_name); } } return (conf); } static void str_arg(struct ctl_be_arg *arg, const char *name, const char *value) { arg->namelen = strlen(name) + 1; arg->name = __DECONST(char *, name); arg->vallen = strlen(value) + 1; arg->value = __DECONST(char *, value); arg->flags = CTL_BEARG_ASCII | CTL_BEARG_RD; } int kernel_lun_add(struct lun *lun) { struct lun_option *lo; struct ctl_lun_req req; - char *tmp; int error, i, num_options; bzero(&req, sizeof(req)); strlcpy(req.backend, lun->l_backend, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_CREATE; req.reqdata.create.blocksize_bytes = lun->l_blocksize; if (lun->l_size != 0) req.reqdata.create.lun_size_bytes = lun->l_size; req.reqdata.create.flags |= CTL_LUN_FLAG_DEV_TYPE; req.reqdata.create.device_type = T_DIRECT; if (lun->l_serial != NULL) { strncpy(req.reqdata.create.serial_num, lun->l_serial, sizeof(req.reqdata.create.serial_num)); req.reqdata.create.flags |= CTL_LUN_FLAG_SERIAL_NUM; } if (lun->l_device_id != NULL) { strncpy(req.reqdata.create.device_id, lun->l_device_id, sizeof(req.reqdata.create.device_id)); req.reqdata.create.flags |= CTL_LUN_FLAG_DEVID; } if (lun->l_path != NULL) { lo = lun_option_find(lun, "file"); if (lo != NULL) { lun_option_set(lo, lun->l_path); } else { lo = lun_option_new(lun, "file", lun->l_path); assert(lo != NULL); } } - lo = lun_option_find(lun, "cfiscsi_target"); + lo = lun_option_find(lun, "ctld_name"); if (lo != NULL) { - lun_option_set(lo, lun->l_target->t_name); + lun_option_set(lo, lun->l_name); } else { - lo = lun_option_new(lun, "cfiscsi_target", - lun->l_target->t_name); + lo = lun_option_new(lun, "ctld_name", lun->l_name); assert(lo != NULL); } - asprintf(&tmp, "%d", lun->l_lun); - if (tmp == NULL) - log_errx(1, "asprintf"); - lo = lun_option_find(lun, "cfiscsi_lun"); - if (lo != NULL) { - lun_option_set(lo, tmp); - free(tmp); - } else { - lo = lun_option_new(lun, "cfiscsi_lun", tmp); - free(tmp); - assert(lo != NULL); - } - - asprintf(&tmp, "%s,lun,%d", lun->l_target->t_name, lun->l_lun); - if (tmp == NULL) - log_errx(1, "asprintf"); lo = lun_option_find(lun, "scsiname"); - if (lo != NULL) { - lun_option_set(lo, tmp); - free(tmp); - } else { - lo = lun_option_new(lun, "scsiname", tmp); - free(tmp); + if (lo == NULL && lun->l_scsiname != NULL) { + lo = lun_option_new(lun, "scsiname", lun->l_scsiname); assert(lo != NULL); } num_options = 0; TAILQ_FOREACH(lo, &lun->l_options, lo_next) num_options++; req.num_be_args = num_options; if (num_options > 0) { req.be_args = malloc(num_options * sizeof(*req.be_args)); if (req.be_args == NULL) { log_warn("error allocating %zd bytes", num_options * sizeof(*req.be_args)); return (1); } i = 0; TAILQ_FOREACH(lo, &lun->l_options, lo_next) { str_arg(&req.be_args[i], lo->lo_name, lo->lo_value); i++; } assert(i == num_options); } error = ioctl(ctl_fd, CTL_LUN_REQ, &req); free(req.be_args); if (error != 0) { log_warn("error issuing CTL_LUN_REQ ioctl"); return (1); } switch (req.status) { case CTL_LUN_ERROR: log_warnx("LUN creation error: %s", req.error_str); return (1); case CTL_LUN_WARNING: log_warnx("LUN creation warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: log_warnx("unknown LUN creation status: %d", req.status); return (1); } lun_set_ctl_lun(lun, req.reqdata.create.req_lun_id); return (0); } int kernel_lun_resize(struct lun *lun) { struct ctl_lun_req req; bzero(&req, sizeof(req)); strlcpy(req.backend, lun->l_backend, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_MODIFY; req.reqdata.modify.lun_id = lun->l_ctl_lun; req.reqdata.modify.lun_size_bytes = lun->l_size; if (ioctl(ctl_fd, CTL_LUN_REQ, &req) == -1) { log_warn("error issuing CTL_LUN_REQ ioctl"); return (1); } switch (req.status) { case CTL_LUN_ERROR: log_warnx("LUN modification error: %s", req.error_str); return (1); case CTL_LUN_WARNING: log_warnx("LUN modification warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: log_warnx("unknown LUN modification status: %d", req.status); return (1); } return (0); } int kernel_lun_remove(struct lun *lun) { struct ctl_lun_req req; bzero(&req, sizeof(req)); strlcpy(req.backend, lun->l_backend, sizeof(req.backend)); req.reqtype = CTL_LUNREQ_RM; req.reqdata.rm.lun_id = lun->l_ctl_lun; if (ioctl(ctl_fd, CTL_LUN_REQ, &req) == -1) { log_warn("error issuing CTL_LUN_REQ ioctl"); return (1); } switch (req.status) { case CTL_LUN_ERROR: log_warnx("LUN removal error: %s", req.error_str); return (1); case CTL_LUN_WARNING: log_warnx("LUN removal warning: %s", req.error_str); break; case CTL_LUN_OK: break; default: log_warnx("unknown LUN removal status: %d", req.status); return (1); } return (0); } void kernel_handoff(struct connection *conn) { struct ctl_iscsi req; bzero(&req, sizeof(req)); req.type = CTL_ISCSI_HANDOFF; strlcpy(req.data.handoff.initiator_name, conn->conn_initiator_name, sizeof(req.data.handoff.initiator_name)); strlcpy(req.data.handoff.initiator_addr, conn->conn_initiator_addr, sizeof(req.data.handoff.initiator_addr)); if (conn->conn_initiator_alias != NULL) { strlcpy(req.data.handoff.initiator_alias, conn->conn_initiator_alias, sizeof(req.data.handoff.initiator_alias)); } memcpy(req.data.handoff.initiator_isid, conn->conn_initiator_isid, sizeof(req.data.handoff.initiator_isid)); strlcpy(req.data.handoff.target_name, conn->conn_target->t_name, sizeof(req.data.handoff.target_name)); #ifdef ICL_KERNEL_PROXY if (proxy_mode) req.data.handoff.connection_id = conn->conn_socket; else req.data.handoff.socket = conn->conn_socket; #else req.data.handoff.socket = conn->conn_socket; #endif req.data.handoff.portal_group_tag = conn->conn_portal->p_portal_group->pg_tag; if (conn->conn_header_digest == CONN_DIGEST_CRC32C) req.data.handoff.header_digest = CTL_ISCSI_DIGEST_CRC32C; if (conn->conn_data_digest == CONN_DIGEST_CRC32C) req.data.handoff.data_digest = CTL_ISCSI_DIGEST_CRC32C; req.data.handoff.cmdsn = conn->conn_cmdsn; req.data.handoff.statsn = conn->conn_statsn; req.data.handoff.max_recv_data_segment_length = conn->conn_max_data_segment_length; req.data.handoff.max_burst_length = conn->conn_max_burst_length; req.data.handoff.immediate_data = conn->conn_immediate_data; if (ioctl(ctl_fd, CTL_ISCSI, &req) == -1) { log_err(1, "error issuing CTL_ISCSI ioctl; " "dropping connection"); } if (req.status != CTL_ISCSI_OK) { log_errx(1, "error returned from CTL iSCSI handoff request: " "%s; dropping connection", req.error_str); } } int kernel_port_add(struct target *targ) { struct ctl_port_entry entry; struct ctl_req req; + struct ctl_lun_map lm; char tagstr[16]; - int error; - uint32_t port_id = -1; + int error, i; + /* Create iSCSI port. */ bzero(&req, sizeof(req)); strlcpy(req.driver, "iscsi", sizeof(req.driver)); req.reqtype = CTL_REQ_CREATE; req.num_args = 4; req.args = malloc(req.num_args * sizeof(*req.args)); req.args[0].namelen = sizeof("port_id"); req.args[0].name = __DECONST(char *, "port_id"); - req.args[0].vallen = sizeof(port_id); - req.args[0].value = &port_id; + req.args[0].vallen = sizeof(targ->t_ctl_port); + req.args[0].value = &targ->t_ctl_port; req.args[0].flags = CTL_BEARG_WR; str_arg(&req.args[1], "cfiscsi_target", targ->t_name); snprintf(tagstr, sizeof(tagstr), "%d", targ->t_portal_group->pg_tag); str_arg(&req.args[2], "cfiscsi_portal_group_tag", tagstr); if (targ->t_alias) str_arg(&req.args[3], "cfiscsi_target_alias", targ->t_alias); else req.num_args--; - error = ioctl(ctl_fd, CTL_PORT_REQ, &req); free(req.args); if (error != 0) { log_warn("error issuing CTL_PORT_REQ ioctl"); return (1); } - if (req.status == CTL_LUN_ERROR) { log_warnx("error returned from port creation request: %s", req.error_str); return (1); } - if (req.status != CTL_LUN_OK) { log_warnx("unknown port creation request status %d", req.status); return (1); } - bzero(&entry, sizeof(entry)); - entry.targ_port = port_id; + /* Explicitly enable mapping to block any access except allowed. */ + lm.port = targ->t_ctl_port; + lm.plun = UINT32_MAX; + lm.lun = 0; + error = ioctl(ctl_fd, CTL_LUN_MAP, &lm); + if (error != 0) + log_warn("CTL_LUN_MAP ioctl failed"); + /* Map configured LUNs */ + for (i = 0; i < MAX_LUNS; i++) { + if (targ->t_luns[i] == NULL) + continue; + lm.port = targ->t_ctl_port; + lm.plun = i; + lm.lun = targ->t_luns[i]->l_ctl_lun; + error = ioctl(ctl_fd, CTL_LUN_MAP, &lm); + if (error != 0) + log_warn("CTL_LUN_MAP ioctl failed"); + } + + /* Enable port */ + bzero(&entry, sizeof(entry)); + entry.targ_port = targ->t_ctl_port; error = ioctl(ctl_fd, CTL_ENABLE_PORT, &entry); if (error != 0) { log_warn("CTL_ENABLE_PORT ioctl failed"); return (-1); } + return (0); +} + +int +kernel_port_update(struct target *targ) +{ + struct ctl_lun_map lm; + int error, i; + + /* Map configured LUNs and unmap others */ + for (i = 0; i < MAX_LUNS; i++) { + lm.port = targ->t_ctl_port; + lm.plun = i; + if (targ->t_luns[i] == NULL) + lm.lun = UINT32_MAX; + else + lm.lun = targ->t_luns[i]->l_ctl_lun; + error = ioctl(ctl_fd, CTL_LUN_MAP, &lm); + if (error != 0) + log_warn("CTL_LUN_MAP ioctl failed"); + } return (0); } int kernel_port_remove(struct target *targ) { struct ctl_req req; char tagstr[16]; int error; bzero(&req, sizeof(req)); strlcpy(req.driver, "iscsi", sizeof(req.driver)); req.reqtype = CTL_REQ_REMOVE; req.num_args = 2; req.args = malloc(req.num_args * sizeof(*req.args)); str_arg(&req.args[0], "cfiscsi_target", targ->t_name); if (targ->t_portal_group) { snprintf(tagstr, sizeof(tagstr), "%d", targ->t_portal_group->pg_tag); str_arg(&req.args[1], "cfiscsi_portal_group_tag", tagstr); } else req.num_args--; error = ioctl(ctl_fd, CTL_PORT_REQ, &req); free(req.args); if (error != 0) { log_warn("error issuing CTL_PORT_REQ ioctl"); return (1); } if (req.status == CTL_LUN_ERROR) { log_warnx("error returned from port removal request: %s", req.error_str); return (1); } if (req.status != CTL_LUN_OK) { log_warnx("unknown port removal request status %d", req.status); return (1); } return (0); } #ifdef ICL_KERNEL_PROXY void kernel_listen(struct addrinfo *ai, bool iser, int portal_id) { struct ctl_iscsi req; bzero(&req, sizeof(req)); req.type = CTL_ISCSI_LISTEN; req.data.listen.iser = iser; req.data.listen.domain = ai->ai_family; req.data.listen.socktype = ai->ai_socktype; req.data.listen.protocol = ai->ai_protocol; req.data.listen.addr = ai->ai_addr; req.data.listen.addrlen = ai->ai_addrlen; req.data.listen.portal_id = portal_id; if (ioctl(ctl_fd, CTL_ISCSI, &req) == -1) log_err(1, "error issuing CTL_ISCSI ioctl"); if (req.status != CTL_ISCSI_OK) { log_errx(1, "error returned from CTL iSCSI listen: %s", req.error_str); } } void kernel_accept(int *connection_id, int *portal_id, struct sockaddr *client_sa, socklen_t *client_salen) { struct ctl_iscsi req; struct sockaddr_storage ss; bzero(&req, sizeof(req)); req.type = CTL_ISCSI_ACCEPT; req.data.accept.initiator_addr = (struct sockaddr *)&ss; if (ioctl(ctl_fd, CTL_ISCSI, &req) == -1) log_err(1, "error issuing CTL_ISCSI ioctl"); if (req.status != CTL_ISCSI_OK) { log_errx(1, "error returned from CTL iSCSI accept: %s", req.error_str); } *connection_id = req.data.accept.connection_id; *portal_id = req.data.accept.portal_id; *client_salen = req.data.accept.initiator_addrlen; memcpy(client_sa, &ss, *client_salen); } void kernel_send(struct pdu *pdu) { struct ctl_iscsi req; bzero(&req, sizeof(req)); req.type = CTL_ISCSI_SEND; req.data.send.connection_id = pdu->pdu_connection->conn_socket; req.data.send.bhs = pdu->pdu_bhs; req.data.send.data_segment_len = pdu->pdu_data_len; req.data.send.data_segment = pdu->pdu_data; if (ioctl(ctl_fd, CTL_ISCSI, &req) == -1) { log_err(1, "error issuing CTL_ISCSI ioctl; " "dropping connection"); } if (req.status != CTL_ISCSI_OK) { log_errx(1, "error returned from CTL iSCSI send: " "%s; dropping connection", req.error_str); } } void kernel_receive(struct pdu *pdu) { struct ctl_iscsi req; pdu->pdu_data = malloc(MAX_DATA_SEGMENT_LENGTH); if (pdu->pdu_data == NULL) log_err(1, "malloc"); bzero(&req, sizeof(req)); req.type = CTL_ISCSI_RECEIVE; req.data.receive.connection_id = pdu->pdu_connection->conn_socket; req.data.receive.bhs = pdu->pdu_bhs; req.data.receive.data_segment_len = MAX_DATA_SEGMENT_LENGTH; req.data.receive.data_segment = pdu->pdu_data; if (ioctl(ctl_fd, CTL_ISCSI, &req) == -1) { log_err(1, "error issuing CTL_ISCSI ioctl; " "dropping connection"); } if (req.status != CTL_ISCSI_OK) { log_errx(1, "error returned from CTL iSCSI receive: " "%s; dropping connection", req.error_str); } } #endif /* ICL_KERNEL_PROXY */ /* * XXX: I CANT INTO LATIN */ void kernel_capsicate(void) { int error; cap_rights_t rights; const unsigned long cmds[] = { CTL_ISCSI }; cap_rights_init(&rights, CAP_IOCTL); error = cap_rights_limit(ctl_fd, &rights); if (error != 0 && errno != ENOSYS) log_err(1, "cap_rights_limit"); error = cap_ioctls_limit(ctl_fd, cmds, sizeof(cmds) / sizeof(cmds[0])); if (error != 0 && errno != ENOSYS) log_err(1, "cap_ioctls_limit"); error = cap_enter(); if (error != 0 && errno != ENOSYS) log_err(1, "cap_enter"); if (cap_sandboxed()) log_debugx("Capsicum capability mode enabled"); else log_warnx("Capsicum capability mode not supported"); } Index: stable/10/usr.sbin/ctld/parse.y =================================================================== --- stable/10/usr.sbin/ctld/parse.y (revision 279001) +++ stable/10/usr.sbin/ctld/parse.y (revision 279002) @@ -1,927 +1,973 @@ %{ /*- * Copyright (c) 2012 The FreeBSD Foundation * All rights reserved. * * This software was developed by Edward Tomasz Napierala under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include "ctld.h" extern FILE *yyin; extern char *yytext; extern int lineno; static struct conf *conf = NULL; static struct auth_group *auth_group = NULL; static struct portal_group *portal_group = NULL; static struct target *target = NULL; static struct lun *lun = NULL; extern void yyerror(const char *); extern int yylex(void); extern void yyrestart(FILE *); %} %token ALIAS AUTH_GROUP AUTH_TYPE BACKEND BLOCKSIZE CHAP CHAP_MUTUAL %token CLOSING_BRACKET DEBUG DEVICE_ID DISCOVERY_AUTH_GROUP DISCOVERY_FILTER %token INITIATOR_NAME INITIATOR_PORTAL ISNS_SERVER ISNS_PERIOD ISNS_TIMEOUT %token LISTEN LISTEN_ISER LUN MAXPROC OPENING_BRACKET OPTION %token PATH PIDFILE PORTAL_GROUP REDIRECT SEMICOLON SERIAL SIZE STR %token TARGET TIMEOUT %union { char *str; } %token STR %% statements: | statements statement | statements statement SEMICOLON ; statement: debug | timeout | maxproc | pidfile | isns_server | isns_period | isns_timeout | auth_group | portal_group | + lun + | target ; debug: DEBUG STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } conf->conf_debug = tmp; } ; timeout: TIMEOUT STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } conf->conf_timeout = tmp; } ; maxproc: MAXPROC STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } conf->conf_maxproc = tmp; } ; pidfile: PIDFILE STR { if (conf->conf_pidfile_path != NULL) { log_warnx("pidfile specified more than once"); free($2); return (1); } conf->conf_pidfile_path = $2; } ; isns_server: ISNS_SERVER STR { int error; error = isns_new(conf, $2); free($2); if (error != 0) return (1); } ; isns_period: ISNS_PERIOD STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } conf->conf_isns_period = tmp; } ; isns_timeout: ISNS_TIMEOUT STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } conf->conf_isns_timeout = tmp; } ; auth_group: AUTH_GROUP auth_group_name OPENING_BRACKET auth_group_entries CLOSING_BRACKET { auth_group = NULL; } ; auth_group_name: STR { /* * Make it possible to redefine default * auth-group. but only once. */ if (strcmp($1, "default") == 0 && conf->conf_default_ag_defined == false) { auth_group = auth_group_find(conf, $1); conf->conf_default_ag_defined = true; } else { auth_group = auth_group_new(conf, $1); } free($1); if (auth_group == NULL) return (1); } ; auth_group_entries: | auth_group_entries auth_group_entry | auth_group_entries auth_group_entry SEMICOLON ; auth_group_entry: auth_group_auth_type | auth_group_chap | auth_group_chap_mutual | auth_group_initiator_name | auth_group_initiator_portal ; auth_group_auth_type: AUTH_TYPE STR { int error; error = auth_group_set_type(auth_group, $2); free($2); if (error != 0) return (1); } ; auth_group_chap: CHAP STR STR { const struct auth *ca; ca = auth_new_chap(auth_group, $2, $3); free($2); free($3); if (ca == NULL) return (1); } ; auth_group_chap_mutual: CHAP_MUTUAL STR STR STR STR { const struct auth *ca; ca = auth_new_chap_mutual(auth_group, $2, $3, $4, $5); free($2); free($3); free($4); free($5); if (ca == NULL) return (1); } ; auth_group_initiator_name: INITIATOR_NAME STR { const struct auth_name *an; an = auth_name_new(auth_group, $2); free($2); if (an == NULL) return (1); } ; auth_group_initiator_portal: INITIATOR_PORTAL STR { const struct auth_portal *ap; ap = auth_portal_new(auth_group, $2); free($2); if (ap == NULL) return (1); } ; portal_group: PORTAL_GROUP portal_group_name OPENING_BRACKET portal_group_entries CLOSING_BRACKET { portal_group = NULL; } ; portal_group_name: STR { /* * Make it possible to redefine default * portal-group. but only once. */ if (strcmp($1, "default") == 0 && conf->conf_default_pg_defined == false) { portal_group = portal_group_find(conf, $1); conf->conf_default_pg_defined = true; } else { portal_group = portal_group_new(conf, $1); } free($1); if (portal_group == NULL) return (1); } ; portal_group_entries: | portal_group_entries portal_group_entry | portal_group_entries portal_group_entry SEMICOLON ; portal_group_entry: portal_group_discovery_auth_group | portal_group_discovery_filter | portal_group_listen | portal_group_listen_iser | portal_group_redirect ; portal_group_discovery_auth_group: DISCOVERY_AUTH_GROUP STR { if (portal_group->pg_discovery_auth_group != NULL) { log_warnx("discovery-auth-group for portal-group " "\"%s\" specified more than once", portal_group->pg_name); return (1); } portal_group->pg_discovery_auth_group = auth_group_find(conf, $2); if (portal_group->pg_discovery_auth_group == NULL) { log_warnx("unknown discovery-auth-group \"%s\" " "for portal-group \"%s\"", $2, portal_group->pg_name); return (1); } free($2); } ; portal_group_discovery_filter: DISCOVERY_FILTER STR { int error; error = portal_group_set_filter(portal_group, $2); free($2); if (error != 0) return (1); } ; portal_group_listen: LISTEN STR { int error; error = portal_group_add_listen(portal_group, $2, false); free($2); if (error != 0) return (1); } ; portal_group_listen_iser: LISTEN_ISER STR { int error; error = portal_group_add_listen(portal_group, $2, true); free($2); if (error != 0) return (1); } ; portal_group_redirect: REDIRECT STR { int error; error = portal_group_set_redirection(portal_group, $2); free($2); if (error != 0) return (1); } ; +lun: LUN lun_name + OPENING_BRACKET lun_entries CLOSING_BRACKET + { + lun = NULL; + } + ; + +lun_name: STR + { + lun = lun_new(conf, $1); + free($1); + if (lun == NULL) + return (1); + } + ; + target: TARGET target_name OPENING_BRACKET target_entries CLOSING_BRACKET { target = NULL; } ; target_name: STR { target = target_new(conf, $1); free($1); if (target == NULL) return (1); } ; target_entries: | target_entries target_entry | target_entries target_entry SEMICOLON ; target_entry: target_alias | target_auth_group | target_auth_type | target_chap | target_chap_mutual | target_initiator_name | target_initiator_portal | target_portal_group | target_redirect | target_lun + | + target_lun_ref ; target_alias: ALIAS STR { if (target->t_alias != NULL) { log_warnx("alias for target \"%s\" " "specified more than once", target->t_name); return (1); } target->t_alias = $2; } ; target_auth_group: AUTH_GROUP STR { if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) log_warnx("auth-group for target \"%s\" " "specified more than once", target->t_name); else log_warnx("cannot use both auth-group and explicit " "authorisations for target \"%s\"", target->t_name); return (1); } target->t_auth_group = auth_group_find(conf, $2); if (target->t_auth_group == NULL) { log_warnx("unknown auth-group \"%s\" for target " "\"%s\"", $2, target->t_name); return (1); } free($2); } ; target_auth_type: AUTH_TYPE STR { int error; if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) { log_warnx("cannot use both auth-group and " "auth-type for target \"%s\"", target->t_name); return (1); } } else { target->t_auth_group = auth_group_new(conf, NULL); if (target->t_auth_group == NULL) { free($2); return (1); } target->t_auth_group->ag_target = target; } error = auth_group_set_type(target->t_auth_group, $2); free($2); if (error != 0) return (1); } ; target_chap: CHAP STR STR { const struct auth *ca; if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) { log_warnx("cannot use both auth-group and " "chap for target \"%s\"", target->t_name); free($2); free($3); return (1); } } else { target->t_auth_group = auth_group_new(conf, NULL); if (target->t_auth_group == NULL) { free($2); free($3); return (1); } target->t_auth_group->ag_target = target; } ca = auth_new_chap(target->t_auth_group, $2, $3); free($2); free($3); if (ca == NULL) return (1); } ; target_chap_mutual: CHAP_MUTUAL STR STR STR STR { const struct auth *ca; if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) { log_warnx("cannot use both auth-group and " "chap-mutual for target \"%s\"", target->t_name); free($2); free($3); free($4); free($5); return (1); } } else { target->t_auth_group = auth_group_new(conf, NULL); if (target->t_auth_group == NULL) { free($2); free($3); free($4); free($5); return (1); } target->t_auth_group->ag_target = target; } ca = auth_new_chap_mutual(target->t_auth_group, $2, $3, $4, $5); free($2); free($3); free($4); free($5); if (ca == NULL) return (1); } ; target_initiator_name: INITIATOR_NAME STR { const struct auth_name *an; if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) { log_warnx("cannot use both auth-group and " "initiator-name for target \"%s\"", target->t_name); free($2); return (1); } } else { target->t_auth_group = auth_group_new(conf, NULL); if (target->t_auth_group == NULL) { free($2); return (1); } target->t_auth_group->ag_target = target; } an = auth_name_new(target->t_auth_group, $2); free($2); if (an == NULL) return (1); } ; target_initiator_portal: INITIATOR_PORTAL STR { const struct auth_portal *ap; if (target->t_auth_group != NULL) { if (target->t_auth_group->ag_name != NULL) { log_warnx("cannot use both auth-group and " "initiator-portal for target \"%s\"", target->t_name); free($2); return (1); } } else { target->t_auth_group = auth_group_new(conf, NULL); if (target->t_auth_group == NULL) { free($2); return (1); } target->t_auth_group->ag_target = target; } ap = auth_portal_new(target->t_auth_group, $2); free($2); if (ap == NULL) return (1); } ; target_portal_group: PORTAL_GROUP STR { if (target->t_portal_group != NULL) { log_warnx("portal-group for target \"%s\" " "specified more than once", target->t_name); free($2); return (1); } target->t_portal_group = portal_group_find(conf, $2); if (target->t_portal_group == NULL) { log_warnx("unknown portal-group \"%s\" for target " "\"%s\"", $2, target->t_name); free($2); return (1); } free($2); } ; target_redirect: REDIRECT STR { int error; error = target_set_redirection(target, $2); free($2); if (error != 0) return (1); } ; target_lun: LUN lun_number OPENING_BRACKET lun_entries CLOSING_BRACKET { lun = NULL; } ; lun_number: STR { uint64_t tmp; + char *name; if (expand_number($1, &tmp) != 0) { yyerror("invalid numeric value"); free($1); return (1); } - lun = lun_new(target, tmp); + asprintf(&name, "%s,lun,%ju", target->t_name, tmp); + lun = lun_new(conf, name); if (lun == NULL) return (1); + + lun_set_scsiname(lun, name); + target->t_luns[tmp] = lun; } ; +target_lun_ref: LUN STR STR + { + uint64_t tmp; + + if (expand_number($2, &tmp) != 0) { + yyerror("invalid numeric value"); + free($2); + free($3); + return (1); + } + free($2); + + lun = lun_find(conf, $3); + free($3); + if (lun == NULL) + return (1); + + target->t_luns[tmp] = lun; + } + ; + lun_entries: | lun_entries lun_entry | lun_entries lun_entry SEMICOLON ; lun_entry: lun_backend | lun_blocksize | lun_device_id | lun_option | lun_path | lun_serial | lun_size ; lun_backend: BACKEND STR { if (lun->l_backend != NULL) { - log_warnx("backend for lun %d, target \"%s\" " + log_warnx("backend for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); free($2); return (1); } lun_set_backend(lun, $2); free($2); } ; lun_blocksize: BLOCKSIZE STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } if (lun->l_blocksize != 0) { - log_warnx("blocksize for lun %d, target \"%s\" " + log_warnx("blocksize for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); return (1); } lun_set_blocksize(lun, tmp); } ; lun_device_id: DEVICE_ID STR { if (lun->l_device_id != NULL) { - log_warnx("device_id for lun %d, target \"%s\" " + log_warnx("device_id for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); free($2); return (1); } lun_set_device_id(lun, $2); free($2); } ; lun_option: OPTION STR STR { struct lun_option *clo; clo = lun_option_new(lun, $2, $3); free($2); free($3); if (clo == NULL) return (1); } ; lun_path: PATH STR { if (lun->l_path != NULL) { - log_warnx("path for lun %d, target \"%s\" " + log_warnx("path for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); free($2); return (1); } lun_set_path(lun, $2); free($2); } ; lun_serial: SERIAL STR { if (lun->l_serial != NULL) { - log_warnx("serial for lun %d, target \"%s\" " + log_warnx("serial for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); free($2); return (1); } lun_set_serial(lun, $2); free($2); } ; lun_size: SIZE STR { uint64_t tmp; if (expand_number($2, &tmp) != 0) { yyerror("invalid numeric value"); free($2); return (1); } if (lun->l_size != 0) { - log_warnx("size for lun %d, target \"%s\" " + log_warnx("size for lun \"%s\" " "specified more than once", - lun->l_lun, target->t_name); + lun->l_name); return (1); } lun_set_size(lun, tmp); } ; %% void yyerror(const char *str) { log_warnx("error in configuration file at line %d near '%s': %s", lineno, yytext, str); } static void check_perms(const char *path) { struct stat sb; int error; error = stat(path, &sb); if (error != 0) { log_warn("stat"); return; } if (sb.st_mode & S_IWOTH) { log_warnx("%s is world-writable", path); } else if (sb.st_mode & S_IROTH) { log_warnx("%s is world-readable", path); } else if (sb.st_mode & S_IXOTH) { /* * Ok, this one doesn't matter, but still do it, * just for consistency. */ log_warnx("%s is world-executable", path); } /* * XXX: Should we also check for owner != 0? */ } struct conf * conf_new_from_file(const char *path) { struct auth_group *ag; struct portal_group *pg; int error; log_debugx("obtaining configuration from %s", path); conf = conf_new(); ag = auth_group_new(conf, "default"); assert(ag != NULL); ag = auth_group_new(conf, "no-authentication"); assert(ag != NULL); ag->ag_type = AG_TYPE_NO_AUTHENTICATION; ag = auth_group_new(conf, "no-access"); assert(ag != NULL); ag->ag_type = AG_TYPE_DENY; pg = portal_group_new(conf, "default"); assert(pg != NULL); yyin = fopen(path, "r"); if (yyin == NULL) { log_warn("unable to open configuration file %s", path); conf_delete(conf); return (NULL); } check_perms(path); lineno = 1; yyrestart(yyin); error = yyparse(); auth_group = NULL; portal_group = NULL; target = NULL; lun = NULL; fclose(yyin); if (error != 0) { conf_delete(conf); return (NULL); } if (conf->conf_default_ag_defined == false) { log_debugx("auth-group \"default\" not defined; " "going with defaults"); ag = auth_group_find(conf, "default"); assert(ag != NULL); ag->ag_type = AG_TYPE_DENY; } if (conf->conf_default_pg_defined == false) { log_debugx("portal-group \"default\" not defined; " "going with defaults"); pg = portal_group_find(conf, "default"); assert(pg != NULL); portal_group_add_listen(pg, "0.0.0.0:3260", false); portal_group_add_listen(pg, "[::]:3260", false); } conf->conf_kernel_port_on = true; error = conf_verify(conf); if (error != 0) { conf_delete(conf); return (NULL); } return (conf); } Index: stable/10 =================================================================== --- stable/10 (revision 279001) +++ stable/10 (revision 279002) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r278037