Index: head/sbin/camcontrol/camcontrol.8 =================================================================== --- head/sbin/camcontrol/camcontrol.8 (revision 299370) +++ head/sbin/camcontrol/camcontrol.8 (revision 299371) @@ -1,2376 +1,2391 @@ .\" .\" Copyright (c) 1998, 1999, 2000, 2002, 2005, 2006, 2007 Kenneth D. Merry. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd August 6, 2015 +.Dd April 26, 2016 .Dt CAMCONTROL 8 .Os .Sh NAME .Nm camcontrol .Nd CAM control program .Sh SYNOPSIS .Nm .Aq Ar command .Op device id .Op generic args .Op command args .Nm .Ic devlist .Op Fl b .Op Fl v .Nm .Ic periphlist .Op device id .Op Fl n Ar dev_name .Op Fl u Ar unit_number .Nm .Ic tur .Op device id .Op generic args .Nm .Ic inquiry .Op device id .Op generic args .Op Fl D .Op Fl S .Op Fl R .Nm .Ic identify .Op device id .Op generic args .Op Fl v .Nm .Ic reportluns .Op device id .Op generic args .Op Fl c .Op Fl l .Op Fl r Ar reporttype .Nm .Ic readcap .Op device id .Op generic args .Op Fl b .Op Fl h .Op Fl H .Op Fl N .Op Fl q .Op Fl s .Nm .Ic start .Op device id .Op generic args .Nm .Ic stop .Op device id .Op generic args .Nm .Ic load .Op device id .Op generic args .Nm .Ic eject .Op device id .Op generic args .Nm +.Ic reprobe +.Op device id +.Nm .Ic rescan .Aq all | bus Ns Op :target:lun .Nm .Ic reset .Aq all | bus Ns Op :target:lun .Nm .Ic defects .Op device id .Op generic args .Aq Fl f Ar format .Op Fl P .Op Fl G .Op Fl q .Op Fl s .Op Fl S Ar offset .Op Fl X .Nm .Ic modepage .Op device id .Op generic args .Aq Fl m Ar page | Fl l .Op Fl P Ar pgctl .Op Fl b | Fl e .Op Fl d .Nm .Ic cmd .Op device id .Op generic args .Aq Fl a Ar cmd Op args .Aq Fl c Ar cmd Op args .Op Fl d .Op Fl f .Op Fl i Ar len Ar fmt .Bk -words .Op Fl o Ar len Ar fmt Op args .Op Fl r Ar fmt .Ek .Nm .Ic smpcmd .Op device id .Op generic args .Aq Fl r Ar len Ar fmt Op args .Aq Fl R Ar len Ar fmt Op args .Nm .Ic smprg .Op device id .Op generic args .Op Fl l .Nm .Ic smppc .Op device id .Op generic args .Aq Fl p Ar phy .Op Fl l .Op Fl o Ar operation .Op Fl d Ar name .Op Fl m Ar rate .Op Fl M Ar rate .Op Fl T Ar pp_timeout .Op Fl a Ar enable|disable .Op Fl A Ar enable|disable .Op Fl s Ar enable|disable .Op Fl S Ar enable|disable .Nm .Ic smpphylist .Op device id .Op generic args .Op Fl l .Op Fl q .Nm .Ic smpmaninfo .Op device id .Op generic args .Op Fl l .Nm .Ic debug .Op Fl I .Op Fl P .Op Fl T .Op Fl S .Op Fl X .Op Fl c .Op Fl p .Aq all|off|bus Ns Op :target Ns Op :lun .Nm .Ic tags .Op device id .Op generic args .Op Fl N Ar tags .Op Fl q .Op Fl v .Nm .Ic negotiate .Op device id .Op generic args .Op Fl c .Op Fl D Ar enable|disable .Op Fl M Ar mode .Op Fl O Ar offset .Op Fl q .Op Fl R Ar syncrate .Op Fl T Ar enable|disable .Op Fl U .Op Fl W Ar bus_width .Op Fl v .Nm .Ic format .Op device id .Op generic args .Op Fl q .Op Fl r .Op Fl w .Op Fl y .Nm .Ic sanitize .Op device id .Op generic args .Aq Fl a Ar overwrite | block | crypto | exitfailure .Op Fl c Ar passes .Op Fl I .Op Fl P Ar pattern .Op Fl q .Op Fl U .Op Fl r .Op Fl w .Op Fl y .Nm .Ic idle .Op device id .Op generic args .Op Fl t Ar time .Nm .Ic standby .Op device id .Op generic args .Op Fl t Ar time .Nm .Ic sleep .Op device id .Op generic args .Nm .Ic apm .Op device id .Op generic args .Op Fl l Ar level .Nm .Ic aam .Op device id .Op generic args .Op Fl l Ar level .Nm .Ic fwdownload .Op device id .Op generic args .Aq Fl f Ar fw_image .Op Fl q .Op Fl s .Op Fl y .Nm .Ic security .Op device id .Op generic args .Op Fl d Ar pwd .Op Fl e Ar pwd .Op Fl f .Op Fl h Ar pwd .Op Fl k Ar pwd .Op Fl l Ar high|maximum .Op Fl q .Op Fl s Ar pwd .Op Fl T Ar timeout .Op Fl U Ar user|master .Op Fl y .Nm .Ic hpa .Op device id .Op generic args .Op Fl f .Op Fl l .Op Fl P .Op Fl p Ar pwd .Op Fl q .Op Fl s Ar max_sectors .Op Fl U Ar pwd .Op Fl y .Nm .Ic persist .Op device id .Op generic args .Aq Fl i Ar action | Fl o Ar action .Op Fl a .Op Fl I Ar trans_id .Op Fl k Ar key .Op Fl K Ar sa_key .Op Fl p .Op Fl R Ar rel_tgt_port .Op Fl s Ar scope .Op Fl S .Op Fl T Ar res_type .Op Fl U .Nm .Ic attrib .Op device id .Op generic args .Aq Fl r Ar action | Fl w Ar attrib .Op Fl a Ar attr_num .Op Fl c .Op Fl e Ar elem_addr .Op Fl F Ar form1,form2 .Op Fl p Ar part .Op Fl s Ar start_addr .Op Fl T Ar elem_type .Op Fl V Ar lv_num .Nm .Ic opcodes .Op device id .Op generic args .Op Fl o Ar opcode .Op Fl s Ar service_action .Op Fl N .Op Fl T .Nm .Ic help .Sh DESCRIPTION The .Nm utility is designed to provide a way for users to access and control the .Fx CAM subsystem. .Pp The .Nm utility can cause a loss of data and/or system crashes if used improperly. Even expert users are encouraged to exercise caution when using this command. Novice users should stay away from this utility. .Pp The .Nm utility has a number of primary functions, many of which support an optional device identifier. A device identifier can take one of three forms: .Bl -tag -width 14n .It deviceUNIT Specify a device name and unit number combination, like "da5" or "cd3". .It bus:target Specify a bus number and target id. The bus number can be determined from the output of .Dq camcontrol devlist . The lun defaults to 0. .It bus:target:lun Specify the bus, target and lun for a device. (e.g.\& 1:2:0) .El .Pp The device identifier, if it is specified, .Em must come immediately after the function name, and before any generic or function-specific arguments. Note that the .Fl n and .Fl u arguments described below will override any device name or unit number specified beforehand. The .Fl n and .Fl u arguments will .Em not override a specified bus:target or bus:target:lun, however. .Pp Most of the .Nm primary functions support these generic arguments: .Bl -tag -width 14n .It Fl C Ar count SCSI command retry count. In order for this to work, error recovery .Pq Fl E must be turned on. .It Fl E Instruct the kernel to perform generic SCSI error recovery for the given command. This is needed in order for the retry count .Pq Fl C to be honored. Other than retrying commands, the generic error recovery in the code will generally attempt to spin up drives that are not spinning. It may take some other actions, depending upon the sense code returned from the command. .It Fl n Ar dev_name Specify the device type to operate on, e.g.\& "da", "cd". .It Fl t Ar timeout SCSI command timeout in seconds. This overrides the default timeout for any given command. .It Fl u Ar unit_number Specify the device unit number, e.g.\& "1", "5". .It Fl v Be verbose, print out sense information for failed SCSI commands. .El .Pp Primary command functions: .Bl -tag -width periphlist .It Ic devlist List all physical devices (logical units) attached to the CAM subsystem. This also includes a list of peripheral drivers attached to each device. With the .Fl v argument, SCSI bus number, adapter name and unit numbers are printed as well. On the other hand, with the .Fl b argument, only the bus adapter, and unit information will be printed, and device information will be omitted. .It Ic periphlist List all peripheral drivers attached to a given physical device (logical unit). .It Ic tur Send the SCSI test unit ready (0x00) command to the given device. The .Nm utility will report whether the device is ready or not. .It Ic inquiry Send a SCSI inquiry command (0x12) to a device. By default, .Nm will print out the standard inquiry data, device serial number, and transfer rate information. The user can specify that only certain types of inquiry data be printed: .Bl -tag -width 4n .It Fl D Get the standard inquiry data. .It Fl S Print out the serial number. If this flag is the only one specified, .Nm will not print out "Serial Number" before the value returned by the drive. This is to aid in script writing. .It Fl R Print out transfer rate information. .El .It Ic identify Send a ATA identify command (0xec) to a device. .It Ic reportluns Send the SCSI REPORT LUNS (0xA0) command to the given device. By default, .Nm will print out the list of logical units (LUNs) supported by the target device. There are a couple of options to modify the output: .Bl -tag -width 14n .It Fl c Just print out a count of LUNs, not the actual LUN numbers. .It Fl l Just print out the LUNs, and do not print out the count. .It Fl r Ar reporttype Specify the type of report to request from the target: .Bl -tag -width 012345678 .It default Return the default report. This is the .Nm default. Most targets will support this report if they support the REPORT LUNS command. .It wellknown Return only well known LUNs. .It all Return all available LUNs. .El .El .Pp .Nm will try to print out LUN numbers in a reasonable format. It can understand the peripheral, flat, LUN and extended LUN formats. .It Ic readcap Send the SCSI READ CAPACITY command to the given device and display the results. If the device is larger than 2TB, the SCSI READ CAPACITY (16) service action will be sent to obtain the full size of the device. By default, .Nm will print out the last logical block of the device, and the blocksize of the device in bytes. To modify the output format, use the following options: .Bl -tag -width 5n .It Fl b Just print out the blocksize, not the last block or device size. This cannot be used with .Fl N or .Fl s . .It Fl h Print out the device size in human readable (base 2, 1K == 1024) format. This implies .Fl N and cannot be used with .Fl q or .Fl b . .It Fl H Print out the device size in human readable (base 10, 1K == 1000) format. .It Fl N Print out the number of blocks in the device instead of the last logical block. .It Fl q Quiet, print out the numbers only (separated by a comma if .Fl b or .Fl s are not specified). .It Fl s Print out the last logical block or the size of the device only, and omit the blocksize. .El +.Pp +Note that this command only displays the information, it does not update +the kernel data structures. +Use the +.Nm +reprobe subcommand to do that. .It Ic start Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit set. .It Ic stop Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit cleared. .It Ic load Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit set and the load/eject bit set. .It Ic eject Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit cleared and the load/eject bit set. .It Ic rescan Tell the kernel to scan all busses in the system (with the .Ar all argument), the given bus (XPT_SCAN_BUS), or bus:target:lun (XPT_SCAN_LUN) for new devices or devices that have gone away. The user may specify a scan of all busses, a single bus, or a lun. Scanning all luns on a target is not supported. +.It Ic reprobe +Tell the kernel to refresh the information about the device and +notify the upper layer, +.Xr GEOM 4 . +This includes sending the SCSI READ CAPACITY command and updating +the disk size visible to the rest of the system. .It Ic reset Tell the kernel to reset all busses in the system (with the .Ar all argument) or the given bus (XPT_RESET_BUS) by issuing a SCSI bus reset for that bus, or to reset the given bus:target:lun (XPT_RESET_DEV), typically by issuing a BUS DEVICE RESET message after connecting to that device. Note that this can have a destructive impact on the system. .It Ic defects Send the .Tn SCSI READ DEFECT DATA (10) command (0x37) or the .Tn SCSI READ DEFECT DATA (12) command (0xB7) to the given device, and print out any combination of: the total number of defects, the primary defect list (PLIST), and the grown defect list (GLIST). .Bl -tag -width 11n .It Fl f Ar format Specify the requested format of the defect list. The format argument is required. Most drives support the physical sector format. Some drives support the logical block format. Many drives, if they do not support the requested format, return the data in an alternate format, along with sense information indicating that the requested data format is not supported. The .Nm utility attempts to detect this, and print out whatever format the drive returns. If the drive uses a non-standard sense code to report that it does not support the requested format, .Nm will probably see the error as a failure to complete the request. .Pp The format options are: .Bl -tag -width 9n .It block Print out the list as logical blocks. This is limited to 32-bit block sizes, and isn't supported by many modern drives. .It longblock Print out the list as logical blocks. This option uses a 64-bit block size. .It bfi Print out the list in bytes from index format. .It extbfi Print out the list in extended bytes from index format. The extended format allows for ranges of blocks to be printed. .It phys Print out the list in physical sector format. Most drives support this format. .It extphys Print out the list in extended physical sector format. The extended format allows for ranges of blocks to be printed. .El .It Fl G Print out the grown defect list. This is a list of bad blocks that have been remapped since the disk left the factory. .It Fl P Print out the primary defect list. This is the list of defects that were present in the factory. .It Fl q When printing status information with .Fl s , only print the number of defects. .It Fl s Just print the number of defects, not the list of defects. .It Fl S Ar offset Specify the starting offset into the defect list. This implies using the .Tn SCSI READ DEFECT DATA (12) command, as the 10 byte version of the command doesn't support the address descriptor index field. Not all drives support the 12 byte command, and some drives that support the 12 byte command don't support the address descriptor index field. .It Fl X Print out defects in hexadecimal (base 16) form instead of base 10 form. .El .Pp If neither .Fl P nor .Fl G is specified, .Nm will print out the number of defects given in the READ DEFECT DATA header returned from the drive. Some drives will report 0 defects if neither the primary or grown defect lists are requested. .It Ic modepage Allows the user to display and optionally edit a SCSI mode page. The mode page formats are located in .Pa /usr/share/misc/scsi_modes . This can be overridden by specifying a different file in the .Ev SCSI_MODES environment variable. The .Ic modepage command takes several arguments: .Bl -tag -width 12n .It Fl d Disable block descriptors for mode sense. .It Fl b Displays mode page data in binary format. .It Fl e This flag allows the user to edit values in the mode page. The user may either edit mode page values with the text editor pointed to by his .Ev EDITOR environment variable, or supply mode page values via standard input, using the same format that .Nm uses to display mode page values. The editor will be invoked if .Nm detects that standard input is terminal. .It Fl l Lists all available mode pages. .It Fl m Ar mode_page This specifies the number of the mode page the user would like to view and/or edit. This argument is mandatory unless .Fl l is specified. .It Fl P Ar pgctl This allows the user to specify the page control field. Possible values are: .Bl -tag -width xxx -compact .It 0 Current values .It 1 Changeable values .It 2 Default values .It 3 Saved values .El .El .It Ic cmd Allows the user to send an arbitrary ATA or SCSI CDB to any device. The .Ic cmd function requires the .Fl c argument to specify SCSI CDB or the .Fl a argument to specify ATA Command Block registers values. Other arguments are optional, depending on the command type. The command and data specification syntax is documented in .Xr cam_cdbparse 3 . NOTE: If the CDB specified causes data to be transferred to or from the SCSI device in question, you MUST specify either .Fl i or .Fl o . .Bl -tag -width 17n .It Fl a Ar cmd Op args This specifies the content of 12 ATA Command Block registers (command, features, lba_low, lba_mid, lba_high, device, lba_low_exp, lba_mid_exp. lba_high_exp, features_exp, sector_count, sector_count_exp). .It Fl c Ar cmd Op args This specifies the SCSI CDB. SCSI CDBs may be 6, 10, 12 or 16 bytes. .It Fl d Specifies DMA protocol to be used for ATA command. .It Fl f Specifies FPDMA (NCQ) protocol to be used for ATA command. .It Fl i Ar len Ar fmt This specifies the amount of data to read, and how it should be displayed. If the format is .Sq - , .Ar len bytes of data will be read from the device and written to standard output. .It Fl o Ar len Ar fmt Op args This specifies the amount of data to be written to a device, and the data that is to be written. If the format is .Sq - , .Ar len bytes of data will be read from standard input and written to the device. .It Fl r Ar fmt This specifies that 11 result ATA Command Block registers should be displayed (status, error, lba_low, lba_mid, lba_high, device, lba_low_exp, lba_mid_exp, lba_high_exp, sector_count, sector_count_exp), and how. If the format is .Sq - , 11 result registers will be written to standard output in hex. .El .It Ic smpcmd Allows the user to send an arbitrary Serial Management Protocol (SMP) command to a device. The .Ic smpcmd function requires the .Fl r argument to specify the SMP request to be sent, and the .Fl R argument to specify the format of the SMP response. The syntax for the SMP request and response arguments is documented in .Xr cam_cdbparse 3 . .Pp Note that SAS adapters that support SMP passthrough (at least the currently known adapters) do not accept CRC bytes from the user in the request and do not pass CRC bytes back to the user in the response. Therefore users should not include the CRC bytes in the length of the request and not expect CRC bytes to be returned in the response. .Bl -tag -width 17n .It Fl r Ar len Ar fmt Op args This specifies the size of the SMP request, without the CRC bytes, and the SMP request format. If the format is .Sq - , .Ar len bytes of data will be read from standard input and written as the SMP request. .It Fl R Ar len Ar fmt Op args This specifies the size of the buffer allocated for the SMP response, and the SMP response format. If the format is .Sq - , .Ar len bytes of data will be allocated for the response and the response will be written to standard output. .El .It Ic smprg Allows the user to send the Serial Management Protocol (SMP) Report General command to a device. .Nm will display the data returned by the Report General command. If the SMP target supports the long response format, the additional data will be requested and displayed automatically. .Bl -tag -width 8n .It Fl l Request the long response format only. Not all SMP targets support the long response format. This option causes .Nm to skip sending the initial report general request without the long bit set and only issue a report general request with the long bit set. .El .It Ic smppc Allows the user to issue the Serial Management Protocol (SMP) PHY Control command to a device. This function should be used with some caution, as it can render devices inaccessible, and could potentially cause data corruption as well. The .Fl p argument is required to specify the PHY to operate on. .Bl -tag -width 17n .It Fl p Ar phy Specify the PHY to operate on. This argument is required. .It Fl l Request the long request/response format. Not all SMP targets support the long response format. For the PHY Control command, this currently only affects whether the request length is set to a value other than 0. .It Fl o Ar operation Specify a PHY control operation. Only one .Fl o operation may be specified. The operation may be specified numerically (in decimal, hexadecimal, or octal) or one of the following operation names may be specified: .Bl -tag -width 16n .It nop No operation. It is not necessary to specify this argument. .It linkreset Send the LINK RESET command to the phy. .It hardreset Send the HARD RESET command to the phy. .It disable Send the DISABLE command to the phy. Note that the LINK RESET or HARD RESET commands should re-enable the phy. .It clearerrlog Send the CLEAR ERROR LOG command. This clears the error log counters for the specified phy. .It clearaffiliation Send the CLEAR AFFILIATION command. This clears the affiliation from the STP initiator port with the same SAS address as the SMP initiator that requests the clear operation. .It sataportsel Send the TRANSMIT SATA PORT SELECTION SIGNAL command to the phy. This will cause a SATA port selector to use the given phy as its active phy and make the other phy inactive. .It clearitnl Send the CLEAR STP I_T NEXUS LOSS command to the PHY. .It setdevname Send the SET ATTACHED DEVICE NAME command to the PHY. This requires the .Fl d argument to specify the device name. .El .It Fl d Ar name Specify the attached device name. This option is needed with the .Fl o Ar setdevname phy operation. The name is a 64-bit number, and can be specified in decimal, hexadecimal or octal format. .It Fl m Ar rate Set the minimum physical link rate for the phy. This is a numeric argument. Currently known link rates are: .Bl -tag -width 5n .It 0x0 Do not change current value. .It 0x8 1.5 Gbps .It 0x9 3 Gbps .It 0xa 6 Gbps .El .Pp Other values may be specified for newer physical link rates. .It Fl M Ar rate Set the maximum physical link rate for the phy. This is a numeric argument. See the .Fl m argument description for known link rate arguments. .It Fl T Ar pp_timeout Set the partial pathway timeout value, in microseconds. See the .Tn ANSI .Tn SAS Protocol Layer (SPL) specification for more information on this field. .It Fl a Ar enable|disable Enable or disable SATA slumber phy power conditions. .It Fl A Ar enable|disable Enable or disable SATA partial power conditions. .It Fl s Ar enable|disable Enable or disable SAS slumber phy power conditions. .It Fl S Ar enable|disable Enable or disable SAS partial phy power conditions. .El .It Ic smpphylist List phys attached to a SAS expander, the address of the end device attached to the phy, and the inquiry data for that device and peripheral devices attached to that device. The inquiry data and peripheral devices are displayed if available. .Bl -tag -width 5n .It Fl l Turn on the long response format for the underlying SMP commands used for this command. .It Fl q Only print out phys that are attached to a device in the CAM EDT (Existing Device Table). .El .It Ic smpmaninfo Send the SMP Report Manufacturer Information command to the device and display the response. .Bl -tag -width 5n .It Fl l Turn on the long response format for the underlying SMP commands used for this command. .El .It Ic debug Turn on CAM debugging printfs in the kernel. This requires options CAMDEBUG in your kernel config file. WARNING: enabling debugging printfs currently causes an EXTREME number of kernel printfs. You may have difficulty turning off the debugging printfs once they start, since the kernel will be busy printing messages and unable to service other requests quickly. The .Ic debug function takes a number of arguments: .Bl -tag -width 18n .It Fl I Enable CAM_DEBUG_INFO printfs. .It Fl P Enable CAM_DEBUG_PERIPH printfs. .It Fl T Enable CAM_DEBUG_TRACE printfs. .It Fl S Enable CAM_DEBUG_SUBTRACE printfs. .It Fl X Enable CAM_DEBUG_XPT printfs. .It Fl c Enable CAM_DEBUG_CDB printfs. This will cause the kernel to print out the SCSI CDBs sent to the specified device(s). .It Fl p Enable CAM_DEBUG_PROBE printfs. .It all Enable debugging for all devices. .It off Turn off debugging for all devices .It bus Ns Op :target Ns Op :lun Turn on debugging for the given bus, target or lun. If the lun or target and lun are not specified, they are wildcarded. (i.e., just specifying a bus turns on debugging printfs for all devices on that bus.) .El .It Ic tags Show or set the number of "tagged openings" or simultaneous transactions we attempt to queue to a particular device. By default, the .Ic tags command, with no command-specific arguments (i.e., only generic arguments) prints out the "soft" maximum number of transactions that can be queued to the device in question. For more detailed information, use the .Fl v argument described below. .Bl -tag -width 7n .It Fl N Ar tags Set the number of tags for the given device. This must be between the minimum and maximum number set in the kernel quirk table. The default for most devices that support tagged queueing is a minimum of 2 and a maximum of 255. The minimum and maximum values for a given device may be determined by using the .Fl v switch. The meaning of the .Fl v switch for this .Nm subcommand is described below. .It Fl q Be quiet, and do not report the number of tags. This is generally used when setting the number of tags. .It Fl v The verbose flag has special functionality for the .Em tags argument. It causes .Nm to print out the tagged queueing related fields of the XPT_GDEV_TYPE CCB: .Bl -tag -width 13n .It dev_openings This is the amount of capacity for transactions queued to a given device. .It dev_active This is the number of transactions currently queued to a device. .It devq_openings This is the kernel queue space for transactions. This count usually mirrors dev_openings except during error recovery operations when the device queue is frozen (device is not allowed to receive commands), the number of dev_openings is reduced, or transaction replay is occurring. .It devq_queued This is the number of transactions waiting in the kernel queue for capacity on the device. This number is usually zero unless error recovery is in progress. .It held The held count is the number of CCBs held by peripheral drivers that have either just been completed or are about to be released to the transport layer for service by a device. Held CCBs reserve capacity on a given device. .It mintags This is the current "hard" minimum number of transactions that can be queued to a device at once. The .Ar dev_openings value above cannot go below this number. The default value for .Ar mintags is 2, although it may be set higher or lower for various devices. .It maxtags This is the "hard" maximum number of transactions that can be queued to a device at one time. The .Ar dev_openings value cannot go above this number. The default value for .Ar maxtags is 255, although it may be set higher or lower for various devices. .El .El .It Ic negotiate Show or negotiate various communication parameters. Some controllers may not support setting or changing some of these values. For instance, the Adaptec 174x controllers do not support changing a device's sync rate or offset. The .Nm utility will not attempt to set the parameter if the controller indicates that it does not support setting the parameter. To find out what the controller supports, use the .Fl v flag. The meaning of the .Fl v flag for the .Ic negotiate command is described below. Also, some controller drivers do not support setting negotiation parameters, even if the underlying controller supports negotiation changes. Some controllers, such as the Advansys wide controllers, support enabling and disabling synchronous negotiation for a device, but do not support setting the synchronous negotiation rate. .Bl -tag -width 17n .It Fl a Attempt to make the negotiation settings take effect immediately by sending a Test Unit Ready command to the device. .It Fl c Show or set current negotiation settings. This is the default. .It Fl D Ar enable|disable Enable or disable disconnection. .It Fl M Ar mode Set ATA mode. .It Fl O Ar offset Set the command delay offset. .It Fl q Be quiet, do not print anything. This is generally useful when you want to set a parameter, but do not want any status information. .It Fl R Ar syncrate Change the synchronization rate for a device. The sync rate is a floating point value specified in MHz. So, for instance, .Sq 20.000 is a legal value, as is .Sq 20 . .It Fl T Ar enable|disable Enable or disable tagged queueing for a device. .It Fl U Show or set user negotiation settings. The default is to show or set current negotiation settings. .It Fl v The verbose switch has special meaning for the .Ic negotiate subcommand. It causes .Nm to print out the contents of a Path Inquiry (XPT_PATH_INQ) CCB sent to the controller driver. .It Fl W Ar bus_width Specify the bus width to negotiate with a device. The bus width is specified in bits. The only useful values to specify are 8, 16, and 32 bits. The controller must support the bus width in question in order for the setting to take effect. .El .Pp In general, sync rate and offset settings will not take effect for a device until a command has been sent to the device. The .Fl a switch above will automatically send a Test Unit Ready to the device so negotiation parameters will take effect. .It Ic format Issue the .Tn SCSI FORMAT UNIT command to the named device. .Pp .Em WARNING! WARNING! WARNING! .Pp Low level formatting a disk will destroy ALL data on the disk. Use extreme caution when issuing this command. Many users low-level format disks that do not really need to be low-level formatted. There are relatively few scenarios that call for low-level formatting a disk. One reason for low-level formatting a disk is to initialize the disk after changing its physical sector size. Another reason for low-level formatting a disk is to revive the disk if you are getting "medium format corrupted" errors from the disk in response to read and write requests. .Pp Some disks take longer than others to format. Users should specify a timeout long enough to allow the format to complete. The default format timeout is 3 hours, which should be long enough for most disks. Some hard disks will complete a format operation in a very short period of time (on the order of 5 minutes or less). This is often because the drive does not really support the FORMAT UNIT command -- it just accepts the command, waits a few minutes and then returns it. .Pp The .Sq format subcommand takes several arguments that modify its default behavior. The .Fl q and .Fl y arguments can be useful for scripts. .Bl -tag -width 6n .It Fl q Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl r Run in .Dq report only mode. This will report status on a format that is already running on the drive. .It Fl w Issue a non-immediate format command. By default, .Nm issues the FORMAT UNIT command with the immediate bit set. This tells the device to immediately return the format command, before the format has actually completed. Then, .Nm gathers .Tn SCSI sense information from the device every second to determine how far along in the format process it is. If the .Fl w argument is specified, .Nm will issue a non-immediate format command, and will be unable to print any information to let the user know what percentage of the disk has been formatted. .It Fl y Do not ask any questions. By default, .Nm will ask the user if he/she really wants to format the disk in question, and also if the default format command timeout is acceptable. The user will not be asked about the timeout if a timeout is specified on the command line. .El .It Ic sanitize Issue the .Tn SCSI SANITIZE command to the named device. .Pp .Em WARNING! WARNING! WARNING! .Pp ALL data in the cache and on the disk will be destroyed or made inaccessible. Recovery of the data is not possible. Use extreme caution when issuing this command. .Pp The .Sq sanitize subcommand takes several arguments that modify its default behavior. The .Fl q and .Fl y arguments can be useful for scripts. .Bl -tag -width 6n .It Fl a Ar operation Specify the sanitize operation to perform. .Bl -tag -width 16n .It overwrite Perform an overwrite operation by writing a user supplied data pattern to the device one or more times. The pattern is given by the .Fl P argument. The number of times is given by the .Fl c argument. .It block Perform a block erase operation. All the device's blocks are set to a vendor defined value, typically zero. .It crypto Perform a cryptographic erase operation. The encryption keys are changed to prevent the decryption of the data. .It exitfailure Exits a previously failed sanitize operation. A failed sanitize operation can only be exited if it was run in the unrestricted completion mode, as provided by the .Fl U argument. .El .It Fl c Ar passes The number of passes when performing an .Sq overwrite operation. Valid values are between 1 and 31. The default is 1. .It Fl I When performing an .Sq overwrite operation, the pattern is inverted between consecutive passes. .It Fl P Ar pattern Path to the file containing the pattern to use when performing an .Sq overwrite operation. The pattern is repeated as needed to fill each block. .It Fl q Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl U Perform the sanitize in the unrestricted completion mode. If the operation fails, it can later be exited with the .Sq exitfailure operation. .It Fl r Run in .Dq report only mode. This will report status on a sanitize that is already running on the drive. .It Fl w Issue a non-immediate sanitize command. By default, .Nm issues the SANITIZE command with the immediate bit set. This tells the device to immediately return the sanitize command, before the sanitize has actually completed. Then, .Nm gathers .Tn SCSI sense information from the device every second to determine how far along in the sanitize process it is. If the .Fl w argument is specified, .Nm will issue a non-immediate sanitize command, and will be unable to print any information to let the user know what percentage of the disk has been sanitized. .It Fl y Do not ask any questions. By default, .Nm will ask the user if he/she really wants to sanitize the disk in question, and also if the default sanitize command timeout is acceptable. The user will not be asked about the timeout if a timeout is specified on the command line. .El .It Ic idle Put ATA device into IDLE state. Optional parameter .Pq Fl t specifies automatic standby timer value in seconds. Value 0 disables timer. .It Ic standby Put ATA device into STANDBY state. Optional parameter .Pq Fl t specifies automatic standby timer value in seconds. Value 0 disables timer. .It Ic sleep Put ATA device into SLEEP state. Note that the only way get device out of this state may be reset. .It Ic apm It optional parameter .Pq Fl l specified, enables and sets advanced power management level, where 1 -- minimum power, 127 -- maximum performance with standby, 128 -- minimum power without standby, 254 -- maximum performance. If not specified -- APM is disabled. .It Ic aam It optional parameter .Pq Fl l specified, enables and sets automatic acoustic management level, where 1 -- minimum noise, 254 -- maximum performance. If not specified -- AAM is disabled. .It Ic security Update or report security settings, using an ATA identify command (0xec). By default, .Nm will print out the security support and associated settings of the device. The .Ic security command takes several arguments: .Bl -tag -width 0n .It Fl d Ar pwd .Pp Disable device security using the given password for the selected user according to the devices configured security level. .It Fl e Ar pwd .Pp Erase the device using the given password for the selected user. .Pp .Em WARNING! WARNING! WARNING! .Pp Issuing a secure erase will .Em ERASE ALL user data on the device and may take several hours to complete. .Pp When this command is used against an SSD drive all its cells will be marked as empty, restoring it to factory default write performance. For SSD's this action usually takes just a few seconds. .It Fl f .Pp Freeze the security configuration of the specified device. .Pp After command completion any other commands that update the device lock mode shall be command aborted. Frozen mode is disabled by power-off or hardware reset. .It Fl h Ar pwd .Pp Enhanced erase the device using the given password for the selected user. .Pp .Em WARNING! WARNING! WARNING! .Pp Issuing an enhanced secure erase will .Em ERASE ALL user data on the device and may take several hours to complete. .Pp An enhanced erase writes predetermined data patterns to all user data areas, all previously written user data shall be overwritten, including sectors that are no longer in use due to reallocation. .It Fl k Ar pwd .Pp Unlock the device using the given password for the selected user according to the devices configured security level. .It Fl l Ar high|maximum .Pp Specifies which security level to set when issuing a .Fl s Ar pwd command. The security level determines device behavior when the master password is used to unlock the device. When the security level is set to high the device requires the unlock command and the master password to unlock. When the security level is set to maximum the device requires a secure erase with the master password to unlock. .Pp This option must be used in conjunction with one of the security action commands. .Pp Defaults to .Em high .It Fl q .Pp Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl s Ar pwd .Pp Password the device (enable security) using the given password for the selected user. This option can be combined with other options such as .Fl e Em pwd .Pp A master password may be set in a addition to the user password. The purpose of the master password is to allow an administrator to establish a password that is kept secret from the user, and which may be used to unlock the device if the user password is lost. .Pp .Em Note: Setting the master password does not enable device security. .Pp If the master password is set and the drive supports a Master Revision Code feature the Master Password Revision Code will be decremented. .It Fl T Ar timeout .Pp Overrides the default timeout, specified in seconds, used for both .Fl e and .Fl h this is useful if your system has problems processing long timeouts correctly. .Pp Usually the timeout is calculated from the information stored on the drive if present, otherwise it defaults to 2 hours. .It Fl U Ar user|master .Pp Specifies which user to set / use for the running action command, valid values are user or master and defaults to master if not set. .Pp This option must be used in conjunction with one of the security action commands. .Pp Defaults to .Em master .It Fl y .Pp Confirm yes to dangerous options such as .Fl e without prompting for confirmation. .El .Pp If the password specified for any action commands does not match the configured password for the specified user the command will fail. .Pp The password in all cases is limited to 32 characters, longer passwords will fail. .It Ic hpa Update or report Host Protected Area details. By default .Nm will print out the HPA support and associated settings of the device. The .Ic hpa command takes several optional arguments: .Bl -tag -width 0n .It Fl f .Pp Freeze the HPA configuration of the specified device. .Pp After command completion any other commands that update the HPA configuration shall be command aborted. Frozen mode is disabled by power-off or hardware reset. .It Fl l .Pp Lock the HPA configuration of the device until a successful call to unlock or the next power-on reset occurs. .It Fl P .Pp Make the HPA max sectors persist across power-on reset or a hardware reset. This must be used in combination with .Fl s Ar max_sectors . .It Fl p Ar pwd .Pp Set the HPA configuration password required for unlock calls. .It Fl q .Pp Be quiet, do not print any status messages. This option will not disable the questions. To disable questions, use the .Fl y argument, below. .It Fl s Ar max_sectors .Pp Configures the maximum user accessible sectors of the device. This will change the number of sectors the device reports. .Pp .Em WARNING! WARNING! WARNING! .Pp Changing the max sectors of a device using this option will make the data on the device beyond the specified value inaccessible. .Pp Only one successful .Fl s Ar max_sectors call can be made without a power-on reset or a hardware reset of the device. .It Fl U Ar pwd .Pp Unlock the HPA configuration of the specified device using the given password. If the password specified does not match the password configured via .Fl p Ar pwd the command will fail. .Pp After 5 failed unlock calls, due to password miss-match, the device will refuse additional unlock calls until after a power-on reset. .It Fl y .Pp Confirm yes to dangerous options such as .Fl e without prompting for confirmation .El .Pp The password for all HPA commands is limited to 32 characters, longer passwords will fail. .It Ic fwdownload Program firmware of the named .Tn SCSI or ATA device using the image file provided. .Pp If the device is a .Tn SCSI device and it provides a recommended timeout for the WRITE BUFFER command (see the .Nm opcodes subcommand), that timeout will be used for the firmware download. The drive-recommended timeout value may be overridden on the command line with the .Fl t option. .Pp Current list of supported vendors for SCSI/SAS drives: .Bl -tag -width 10n .It HGST Tested with 4TB SAS drives, model number HUS724040ALS640. .It HITACHI .It HP .It IBM Tested with LTO-5 (ULTRIUM-HH5) and LTO-6 (ULTRIUM-HH6) tape drives. There is a separate table entry for hard drives, because the update method for hard drives is different than the method for tape drives. .It PLEXTOR .It QUALSTAR .It QUANTUM .It SAMSUNG Tested with SM1625 SSDs. .It SEAGATE Tested with Constellation ES (ST32000444SS), ES.2 (ST33000651SS) and ES.3 (ST1000NM0023) drives. .It SmrtStor Tested with 400GB Optimus SSDs (TXA2D20400GA6001). .El .Pp .Em WARNING! WARNING! WARNING! .Pp Little testing has been done to make sure that different device models from each vendor work correctly with the fwdownload command. A vendor name appearing in the supported list means only that firmware of at least one device type from that vendor has successfully been programmed with the fwdownload command. Extra caution should be taken when using this command since there is no guarantee it will not break a device from the listed vendors. Ensure that you have a recent backup of the data on the device before performing a firmware update. .Pp Note that unknown .Tn SCSI protocol devices will not be programmed, since there is little chance of the firmware download succeeding. .Pp .Nm will currently attempt a firmware download to any .Tn ATA or .Tn SATA device, since the standard .Tn ATA DOWNLOAD MICROCODE command may work. Firmware downloads to .Tn ATA and .Tn SATA devices are supported for devices connected to standard .Tn ATA and .Tn SATA controllers, and devices connected to SAS controllers with .Tn SCSI to .Tn ATA translation capability. In the latter case, .Nm uses the .Tn SCSI .Tn ATA PASS-THROUGH command to send the .Tn ATA DOWNLOAD MICROCODE command to the drive. Some .Tn SCSI to .Tn ATA translation implementations don't work fully when translating .Tn SCSI WRITE BUFFER commands to .Tn ATA DOWNLOAD MICROCODE commands, but do support .Tn ATA passthrough well enough to do a firmware download. .Bl -tag -width 11n .It Fl f Ar fw_image Path to the firmware image file to be downloaded to the specified device. .It Fl q Do not print informational messages, only print errors. This option should be used with the .Fl y option to suppress all output. .It Fl s Run in simulation mode. Device checks are run and the confirmation dialog is shown, but no firmware download will occur. .It Fl v Show .Tn SCSI or .Tn ATA errors in the event of a failure. .Pp In simulation mode, print out the .Tn SCSI CDB or .Tn ATA register values that would be used for the firmware download command. .It Fl y Do not ask for confirmation. .El .It Ic persist Persistent reservation support. Persistent reservations are a way to reserve a particular .Tn SCSI LUN for use by one or more .Tn SCSI initiators. If the .Fl i option is specified, .Nm will issue the .Tn SCSI PERSISTENT RESERVE IN command using the requested service action. If the .Fl o option is specified, .Nm will issue the .Tn SCSI PERSISTENT RESERVE OUT command using the requested service action. One of those two options is required. .Pp Persistent reservations are complex, and fully explaining them is outside the scope of this manual. Please visit http://www.t10.org and download the latest SPC spec for a full explanation of persistent reservations. .Bl -tag -width 8n .It Fl i Ar mode Specify the service action for the PERSISTENT RESERVE IN command. Supported service actions: .Bl -tag -width 19n .It read_keys Report the current persistent reservation generation (PRgeneration) and any registered keys. .It read_reservation Report the persistent reservation, if any. .It report_capabilities Report the persistent reservation capabilities of the LUN. .It read_full_status Report the full status of persistent reservations on the LUN. .El .It Fl o Ar mode Specify the service action for the PERSISTENT RESERVE OUT command. For service actions like register that are components of other service action names, the entire name must be specified. Otherwise, enough of the service action name must be specified to distinguish it from other possible service actions. Supported service actions: .Bl -tag -width 15n .It register Register a reservation key with the LUN or unregister a reservation key. To register a key, specify the requested key as the Service Action Reservation Key. To unregister a key, specify the previously registered key as the Reservation Key. To change a key, specify the old key as the Reservation Key and the new key as the Service Action Reservation Key. .It register_ignore This is similar to the register subcommand, except that the Reservation Key is ignored. The Service Action Reservation Key will overwrite any previous key registered for the initiator. .It reserve Create a reservation. A key must be registered with the LUN before the LUN can be reserved, and it must be specified as the Reservation Key. The type of reservation must also be specified. The scope defaults to LUN scope (LU_SCOPE), but may be changed. .It release Release a reservation. The Reservation Key must be specified. .It clear Release a reservation and remove all keys from the device. The Reservation Key must be specified. .It preempt Remove a reservation belonging to another initiator. The Reservation Key must be specified. The Service Action Reservation Key may be specified, depending on the operation being performed. .It preempt_abort Remove a reservation belonging to another initiator and abort all outstanding commands from that initiator. The Reservation Key must be specified. The Service Action Reservation Key may be specified, depending on the operation being performed. .It register_move Register another initiator with the LUN, and establish a reservation on the LUN for that initiator. The Reservation Key and Service Action Reservation Key must be specified. .It replace_lost Replace Lost Reservation information. .El .It Fl a Set the All Target Ports (ALL_TG_PT) bit. This requests that the key registration be applied to all target ports and not just the particular target port that receives the command. This only applies to the register and register_ignore actions. .It Fl I Ar tid Specify a Transport ID. This only applies to the Register and Register and Move service actions for Persistent Reserve Out. Multiple Transport IDs may be specified with multiple .Fl I arguments. With the Register service action, specifying one or more Transport IDs implicitly enables the .Fl S option which turns on the SPEC_I_PT bit. Transport IDs generally have the format protocol,id. .Bl -tag -width 5n .It SAS A SAS Transport ID consists of .Dq sas, followed by a 64-bit SAS address. For example: .Pp .Dl sas,0x1234567812345678 .It FC A Fibre Channel Transport ID consists of .Dq fcp, followed by a 64-bit Fibre Channel World Wide Name. For example: .Pp .Dl fcp,0x1234567812345678 .It SPI A Parallel SCSI address consists of .Dq spi, followed by a SCSI target ID and a relative target port identifier. For example: .Pp .Dl spi,4,1 .It 1394 An IEEE 1394 (Firewire) Transport ID consists of .Dq sbp, followed by a 64-bit EUI-64 IEEE 1394 node unique identifier. For example: .Pp .Dl sbp,0x1234567812345678 .It RDMA A SCSI over RDMA Transport ID consists of .Dq srp, followed by a 128-bit RDMA initiator port identifier. The port identifier must be exactly 32 or 34 (if the leading 0x is included) hexadecimal digits. Only hexadecimal (base 16) numbers are supported. For example: .Pp .Dl srp,0x12345678123456781234567812345678 .It iSCSI An iSCSI Transport ID consists an iSCSI name and optionally a separator and iSCSI session ID. For example, if only the iSCSI name is specified: .Pp .Dl iqn.2012-06.com.example:target0 .Pp If the iSCSI separator and initiator session ID are specified: .Pp .Dl iqn.2012-06.com.example:target0,i,0x123 .It PCIe A SCSI over PCIe Transport ID consists of .Dq sop, followed by a PCIe Routing ID. The Routing ID consists of a bus, device and function or in the alternate form, a bus and function. The bus must be in the range of 0 to 255 inclusive and the device must be in the range of 0 to 31 inclusive. The function must be in the range of 0 to 7 inclusive if the standard form is used, and in the range of 0 to 255 inclusive if the alternate form is used. For example, if a bus, device and function are specified for the standard Routing ID form: .Pp .Dl sop,4,5,1 .Pp If the alternate Routing ID form is used: .Pp .Dl sop,4,1 .El .It Fl k Ar key Specify the Reservation Key. This may be in decimal, octal or hexadecimal format. The value is zero by default if not otherwise specified. The value must be between 0 and 2^64 - 1, inclusive. .It Fl K Ar key Specify the Service Action Reservation Key. This may be in decimal, octal or hexadecimal format. The value is zero by default if not otherwise specified. The value must be between 0 and 2^64 - 1, inclusive. .It Fl p Enable the Activate Persist Through Power Loss bit. This is only used for the register and register_ignore actions. This requests that the reservation persist across power loss events. .It Fl s Ar scope Specify the scope of the reservation. The scope may be specified by name or by number. The scope is ignored for register, register_ignore and clear. If the desired scope isn't available by name, you may specify the number. .Bl -tag -width 7n .It lun LUN scope (0x00). This encompasses the entire LUN. .It extent Extent scope (0x01). .It element Element scope (0x02). .El .It Fl R Ar rtp Specify the Relative Target Port. This only applies to the Register and Move service action of the Persistent Reserve Out command. .It Fl S Enable the SPEC_I_PT bit. This only applies to the Register service action of Persistent Reserve Out. You must also specify at least one Transport ID with .Fl I if this option is set. If you specify a Transport ID, this option is automatically set. It is an error to specify this option for any service action other than Register. .It Fl T Ar type Specify the reservation type. The reservation type may be specified by name or by number. If the desired reservation type isn't available by name, you may specify the number. Supported reservation type names: .Bl -tag -width 11n .It read_shared Read Shared mode. .It wr_ex Write Exclusive mode. May also be specified as .Dq write_exclusive . .It rd_ex Read Exclusive mode. May also be specified as .Dq read_exclusive . .It ex_ac Exclusive access mode. May also be specified as .Dq exclusive_access . .It wr_ex_ro Write Exclusive Registrants Only mode. May also be specified as .Dq write_exclusive_reg_only . .It ex_ac_ro Exclusive Access Registrants Only mode. May also be specified as .Dq exclusive_access_reg_only . .It wr_ex_ar Write Exclusive All Registrants mode. May also be specified as .Dq write_exclusive_all_regs . .It ex_ac_ar Exclusive Access All Registrants mode. May also be specified as .Dq exclusive_access_all_regs . .El .It Fl U Specify that the target should unregister the initiator that sent the Register and Move request. By default, the target will not unregister the initiator that sends the Register and Move request. This option only applies to the Register and Move service action of the Persistent Reserve Out command. .El .It Ic attrib Issue the .Tn SCSI READ or WRITE ATTRIBUTE commands. These commands are used to read and write attributes in Medium Auxiliary Memory (MAM). The most common place Medium Auxiliary Memory is found is small flash chips included tape cartriges. For instance, .Tn LTO tapes have MAM. Either the .Fl r option or the .Fl w option must be specified. .Bl -tag -width 14n .It Fl r Ar action Specify the READ ATTRIBUTE service action. .Bl -tag -width 11n .It attr_values Issue the ATTRIBUTE VALUES service action. Read and decode the available attributes and their values. .It attr_list Issue the ATTRIBUTE LIST service action. List the attributes that are available to read and write. .It lv_list Issue the LOGICAL VOLUME LIST service action. List the available logical volumes in the MAM. .It part_list Issue the PARTITION LIST service action. List the available partitions in the MAM. .It supp_attr Issue the SUPPORTED ATTRIBUTES service action. List attributes that are supported for reading or writing. These attributes may or may not be currently present in the MAM. .El .It Fl w Ar attr Specify an attribute to write to the MAM. This option is not yet implemented. .It Fl a Ar num Specify the attribute number to display. This option only works with the attr_values, attr_list and supp_attr arguments to .Fl r . .It Fl c Display cached attributes. If the device supports this flag, it allows displaying attributes for the last piece of media loaded in the drive. .It Fl e Ar num Specify the element address. This is used for specifying which element number in a medium changer to access when reading attributes. The element number could be for a picker, portal, slot or drive. .It Fl F Ar form1,form2 Specify the output format for the attribute values (attr_val) display as a comma separated list of options. The default output is currently set to field_all,nonascii_trim,text_raw. Once this code is ported to FreeBSD 10, any text fields will be converted from their codeset to the user's native codeset with .Xr iconv 3 . .Pp The text options are mutually exclusive; if you specify more than one, you will get unpredictable results. The nonascii options are also mutually exclusive. Most of the field options may be logically ORed together. .Bl -tag -width 12n .It text_esc Print text fields with non-ASCII characters escaped. .It text_raw Print text fields natively, with no codeset conversion. .It nonascii_esc If any non-ASCII characters occur in fields that are supposed to be ASCII, escape the non-ASCII characters. .It nonascii_trim If any non-ASCII characters occur in fields that are supposed to be ASCII, omit the non-ASCII characters. .It nonascii_raw If any non-ASCII characters occur in fields that are supposed to be ASCII, print them as they are. .It field_all Print all of the prefix fields: description, attribute number, attribute size, and the attribute's readonly status. If field_all is specified, specifying any other field options will not have an effect. .It field_none Print none of the prefix fields, and only print out the attribute value. If field_none is specified, specifying any other field options will result in those fields being printed. .It field_desc Print out the attribute description. .It field_num Print out the attribute number. .It field_size Print out the attribute size. .It field_rw Print out the attribute's readonly status. .El .It Fl p Ar part Specify the partition. When the media has multiple partitions, specifying different partition numbers allows seeing the values for each individual partition. .It Fl s Ar start_num Specify the starting attribute number. This requests that the target device return attribute information starting at the given number. .It Fl T Ar elem_type Specify the element type. For medium changer devices, this allows specifying the type the element referenced in the element address ( .Fl e ) . Valid types are: .Dq all , .Dq picker , .Dq slot , .Dq portal , and .Dq drive . .El .It Fl V Ar vol_num Specify the number of the logical volume to operate on. If the media has multiple logical volumes, this will allow displaying or writing attributes on the given logical volume. .It Ic opcodes Issue the REPORT SUPPORTED OPCODES service action of the .Tn SCSI MAINTENANCE IN command. Without arguments, this command will return a list of all .Tn SCSI commands supported by the device, including service actions of commands that support service actions. It will also include the .Tn SCSI CDB (Command Data Block) length for each command, and the description of each command if it is known. .Bl -tag -width 18n .It Fl o Ar opcode Request information on a specific opcode instead of the list of supported commands. If supported, the target will return a CDB-like structure that indicates the opcode, service action (if any), and a mask of bits that are supported in that CDB. .It Fl s Ar service_action For commands that support a service action, specify the service action to query. .It Fl N If a service action is specified for a given opcode, and the device does not support the given service action, the device should not return a .Tn SCSI error, but rather indicate in the returned parameter data that the command is not supported. By default, if a service action is specified for an opcode, and service actions are not supported for the opcode in question, the device will return an error. .It Fl T Include timeout values. This option works with the default display, which includes all commands supported by the device, and with the .Fl o and .Fl s options, which request information on a specific command and service action. This requests that the device report Nominal and Recommended timeout values for the given command or commands. The timeout values are in seconds. The timeout descriptor also includes a command-specific .El .It Ic help Print out verbose usage information. .El .Sh ENVIRONMENT The .Ev SCSI_MODES variable allows the user to specify an alternate mode page format file. .Pp The .Ev EDITOR variable determines which text editor .Nm starts when editing mode pages. .Sh FILES .Bl -tag -width /usr/share/misc/scsi_modes -compact .It Pa /usr/share/misc/scsi_modes is the SCSI mode format database. .It Pa /dev/xpt0 is the transport layer device. .It Pa /dev/pass* are the CAM application passthrough devices. .El .Sh EXAMPLES .Dl camcontrol eject -n cd -u 1 -v .Pp Eject the CD from cd1, and print SCSI sense information if the command fails. .Pp .Dl camcontrol tur da0 .Pp Send the SCSI test unit ready command to da0. The .Nm utility will report whether the disk is ready, but will not display sense information if the command fails since the .Fl v switch was not specified. .Bd -literal -offset indent camcontrol tur da1 -E -C 4 -t 50 -v .Ed .Pp Send a test unit ready command to da1. Enable kernel error recovery. Specify a retry count of 4, and a timeout of 50 seconds. Enable sense printing (with the .Fl v flag) if the command fails. Since error recovery is turned on, the disk will be spun up if it is not currently spinning. The .Nm utility will report whether the disk is ready. .Bd -literal -offset indent camcontrol cmd -n cd -u 1 -v -c "3C 00 00 00 00 00 00 00 0e 00" \e -i 0xe "s1 i3 i1 i1 i1 i1 i1 i1 i1 i1 i1 i1" .Ed .Pp Issue a READ BUFFER command (0x3C) to cd1. Display the buffer size of cd1, and display the first 10 bytes from the cache on cd1. Display SCSI sense information if the command fails. .Bd -literal -offset indent camcontrol cmd -n cd -u 1 -v -c "3B 00 00 00 00 00 00 00 0e 00" \e -o 14 "00 00 00 00 1 2 3 4 5 6 v v v v" 7 8 9 8 .Ed .Pp Issue a WRITE BUFFER (0x3B) command to cd1. Write out 10 bytes of data, not including the (reserved) 4 byte header. Print out sense information if the command fails. Be very careful with this command, improper use may cause data corruption. .Bd -literal -offset indent camcontrol modepage da3 -m 1 -e -P 3 .Ed .Pp Edit mode page 1 (the Read-Write Error Recover page) for da3, and save the settings on the drive. Mode page 1 contains a disk drive's auto read and write reallocation settings, among other things. .Pp .Dl camcontrol rescan all .Pp Rescan all SCSI busses in the system for devices that have been added, removed or changed. .Pp .Dl camcontrol rescan 0 .Pp Rescan SCSI bus 0 for devices that have been added, removed or changed. .Pp .Dl camcontrol rescan 0:1:0 .Pp Rescan SCSI bus 0, target 1, lun 0 to see if it has been added, removed, or changed. .Pp .Dl camcontrol tags da5 -N 24 .Pp Set the number of concurrent transactions for da5 to 24. .Bd -literal -offset indent camcontrol negotiate -n da -u 4 -T disable .Ed .Pp Disable tagged queueing for da4. .Bd -literal -offset indent camcontrol negotiate -n da -u 3 -R 20.000 -O 15 -a .Ed .Pp Negotiate a sync rate of 20MHz and an offset of 15 with da3. Then send a Test Unit Ready command to make the settings take effect. .Bd -literal -offset indent camcontrol smpcmd ses0 -v -r 4 "40 0 00 0" -R 1020 "s9 i1" .Ed .Pp Send the SMP REPORT GENERAL command to ses0, and display the number of PHYs it contains. Display SMP errors if the command fails. .Bd -literal -offset indent camcontrol security ada0 .Ed .Pp Report security support and settings for ada0 .Bd -literal -offset indent camcontrol security ada0 -U user -s MyPass .Ed .Pp Enable security on device ada0 with the password MyPass .Bd -literal -offset indent camcontrol security ada0 -U user -e MyPass .Ed .Pp Secure erase ada0 which has had security enabled with user password MyPass .Pp .Em WARNING! WARNING! WARNING! .Pp This will .Em ERASE ALL data from the device, so backup your data before using! .Pp This command can be used against an SSD drive to restoring it to factory default write performance. .Bd -literal -offset indent camcontrol hpa ada0 .Ed .Pp Report HPA support and settings for ada0 (also reported via identify). .Bd -literal -offset indent camcontrol hpa ada0 -s 10240 .Ed .Pp Enables HPA on ada0 setting the maximum reported sectors to 10240. .Pp .Em WARNING! WARNING! WARNING! .Pp This will .Em PREVENT ACCESS to all data on the device beyond this limit until HPA is disabled by setting HPA to native max sectors of the device, which can only be done after a power-on or hardware reset! .Pp .Em DO NOT use this on a device which has an active filesystem! .Bd -literal -offset indent camcontrol persist da0 -v -i read_keys .Ed .Pp This will read any persistent reservation keys registered with da0, and display any errors encountered when sending the PERSISTENT RESERVE IN .Tn SCSI command. .Bd -literal -offset indent camcontrol persist da0 -v -o register -a -K 0x12345678 .Ed .Pp This will register the persistent reservation key 0x12345678 with da0, apply that registration to all ports on da0, and display any errors that occur when sending the PERSISTENT RESERVE OUT command. .Bd -literal -offset indent camcontrol persist da0 -v -o reserve -s lun -k 0x12345678 -T ex_ac .Ed .Pp This will reserve da0 for the exlusive use of the initiator issuing the command. The scope of the reservation is the entire LUN. Any errors sending the PERSISTENT RESERVE OUT command will be displayed. .Bd -literal -offset indent camcontrol persist da0 -v -i read_full .Ed .Pp This will display the full status of all reservations on da0 and print out status if there are any errors. .Bd -literal -offset indent camcontrol persist da0 -v -o release -k 0x12345678 -T ex_ac .Ed .Pp This will release a reservation on da0 of the type ex_ac (Exclusive Access). The Reservation Key for this registration is 0x12345678. Any errors that occur will be displayed. .Bd -literal -offset indent camcontrol persist da0 -v -o register -K 0x12345678 -S \e -I sas,0x1234567812345678 -I sas,0x8765432187654321 .Ed .Pp This will register the key 0x12345678 with da0, specifying that it applies to the SAS initiators with SAS addresses 0x1234567812345678 and 0x8765432187654321. .Bd -literal -offset indent camcontrol persist da0 -v -o register_move -k 0x87654321 \e -K 0x12345678 -U -p -R 2 -I fcp,0x1234567812345678 .Ed .Pp This will move the registration from the current initiator, whose Registration Key is 0x87654321, to the Fibre Channel initiator with the Fiber Channel World Wide Node Name 0x1234567812345678. A new registration key, 0x12345678, will be registered for the initiator with the Fibre Channel World Wide Node Name 0x1234567812345678, and the current initiator will be unregistered from the target. The reservation will be moved to relative target port 2 on the target device. The registration will persist across power losses. .Bd -literal -offset indent camcontrol attrib sa0 -v -i attr_values -p 1 .Ed .Pp This will read and decode the attribute values from partition 1 on the tape in tape drive sa0, and will display any .Tn SCSI errors that result. .Sh SEE ALSO .Xr cam 3 , .Xr cam_cdbparse 3 , .Xr cam 4 , .Xr pass 4 , .Xr xpt 4 .Sh HISTORY The .Nm utility first appeared in .Fx 3.0 . .Pp The mode page editing code and arbitrary SCSI command code are based upon code in the old .Xr scsi 8 utility and .Xr scsi 3 library, written by Julian Elischer and Peter Dufault. The .Xr scsi 8 program first appeared in .Bx 386 0.1.2.4 , and first appeared in .Fx in .Fx 2.0.5 . .Sh AUTHORS .An Kenneth Merry Aq Mt ken@FreeBSD.org .Sh BUGS The code that parses the generic command line arguments does not know that some of the subcommands take multiple arguments. So if, for instance, you tried something like this: .Bd -literal -offset indent camcontrol cmd -n da -u 1 -c "00 00 00 00 00 v" 0x00 -v .Ed .Pp The sense information from the test unit ready command would not get printed out, since the first .Xr getopt 3 call in .Nm bails out when it sees the second argument to .Fl c (0x00), above. Fixing this behavior would take some gross code, or changes to the .Xr getopt 3 interface. The best way to circumvent this problem is to always make sure to specify generic .Nm arguments before any command-specific arguments. Index: head/sbin/camcontrol/camcontrol.c =================================================================== --- head/sbin/camcontrol/camcontrol.c (revision 299370) +++ head/sbin/camcontrol/camcontrol.c (revision 299371) @@ -1,9313 +1,9358 @@ /* * Copyright (c) 1997-2007 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MINIMALISTIC #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" typedef enum { CAM_CMD_NONE = 0x00000000, CAM_CMD_DEVLIST = 0x00000001, CAM_CMD_TUR = 0x00000002, CAM_CMD_INQUIRY = 0x00000003, CAM_CMD_STARTSTOP = 0x00000004, CAM_CMD_RESCAN = 0x00000005, CAM_CMD_READ_DEFECTS = 0x00000006, CAM_CMD_MODE_PAGE = 0x00000007, CAM_CMD_SCSI_CMD = 0x00000008, CAM_CMD_DEVTREE = 0x00000009, CAM_CMD_USAGE = 0x0000000a, CAM_CMD_DEBUG = 0x0000000b, CAM_CMD_RESET = 0x0000000c, CAM_CMD_FORMAT = 0x0000000d, CAM_CMD_TAG = 0x0000000e, CAM_CMD_RATE = 0x0000000f, CAM_CMD_DETACH = 0x00000010, CAM_CMD_REPORTLUNS = 0x00000011, CAM_CMD_READCAP = 0x00000012, CAM_CMD_IDENTIFY = 0x00000013, CAM_CMD_IDLE = 0x00000014, CAM_CMD_STANDBY = 0x00000015, CAM_CMD_SLEEP = 0x00000016, CAM_CMD_SMP_CMD = 0x00000017, CAM_CMD_SMP_RG = 0x00000018, CAM_CMD_SMP_PC = 0x00000019, CAM_CMD_SMP_PHYLIST = 0x0000001a, CAM_CMD_SMP_MANINFO = 0x0000001b, CAM_CMD_DOWNLOAD_FW = 0x0000001c, CAM_CMD_SECURITY = 0x0000001d, CAM_CMD_HPA = 0x0000001e, CAM_CMD_SANITIZE = 0x0000001f, CAM_CMD_PERSIST = 0x00000020, CAM_CMD_APM = 0x00000021, CAM_CMD_AAM = 0x00000022, CAM_CMD_ATTRIB = 0x00000023, - CAM_CMD_OPCODES = 0x00000024 + CAM_CMD_OPCODES = 0x00000024, + CAM_CMD_REPROBE = 0x00000025 } cam_cmdmask; typedef enum { CAM_ARG_NONE = 0x00000000, CAM_ARG_VERBOSE = 0x00000001, CAM_ARG_DEVICE = 0x00000002, CAM_ARG_BUS = 0x00000004, CAM_ARG_TARGET = 0x00000008, CAM_ARG_LUN = 0x00000010, CAM_ARG_EJECT = 0x00000020, CAM_ARG_UNIT = 0x00000040, CAM_ARG_FORMAT_BLOCK = 0x00000080, CAM_ARG_FORMAT_BFI = 0x00000100, CAM_ARG_FORMAT_PHYS = 0x00000200, CAM_ARG_PLIST = 0x00000400, CAM_ARG_GLIST = 0x00000800, CAM_ARG_GET_SERIAL = 0x00001000, CAM_ARG_GET_STDINQ = 0x00002000, CAM_ARG_GET_XFERRATE = 0x00004000, CAM_ARG_INQ_MASK = 0x00007000, CAM_ARG_MODE_EDIT = 0x00008000, CAM_ARG_PAGE_CNTL = 0x00010000, CAM_ARG_TIMEOUT = 0x00020000, CAM_ARG_CMD_IN = 0x00040000, CAM_ARG_CMD_OUT = 0x00080000, CAM_ARG_DBD = 0x00100000, CAM_ARG_ERR_RECOVER = 0x00200000, CAM_ARG_RETRIES = 0x00400000, CAM_ARG_START_UNIT = 0x00800000, CAM_ARG_DEBUG_INFO = 0x01000000, CAM_ARG_DEBUG_TRACE = 0x02000000, CAM_ARG_DEBUG_SUBTRACE = 0x04000000, CAM_ARG_DEBUG_CDB = 0x08000000, CAM_ARG_DEBUG_XPT = 0x10000000, CAM_ARG_DEBUG_PERIPH = 0x20000000, CAM_ARG_DEBUG_PROBE = 0x40000000, } cam_argmask; struct camcontrol_opts { const char *optname; uint32_t cmdnum; cam_argmask argnum; const char *subopt; }; #ifndef MINIMALISTIC struct ata_res_pass16 { u_int16_t reserved[5]; u_int8_t flags; u_int8_t error; u_int8_t sector_count_exp; u_int8_t sector_count; u_int8_t lba_low_exp; u_int8_t lba_low; u_int8_t lba_mid_exp; u_int8_t lba_mid; u_int8_t lba_high_exp; u_int8_t lba_high; u_int8_t device; u_int8_t status; }; struct ata_set_max_pwd { u_int16_t reserved1; u_int8_t password[32]; u_int16_t reserved2[239]; }; static const char scsicmd_opts[] = "a:c:dfi:o:r"; static const char readdefect_opts[] = "f:GPqsS:X"; static const char negotiate_opts[] = "acD:M:O:qR:T:UW:"; static const char smprg_opts[] = "l"; static const char smppc_opts[] = "a:A:d:lm:M:o:p:s:S:T:"; static const char smpphylist_opts[] = "lq"; static char pwd_opt; #endif static struct camcontrol_opts option_table[] = { #ifndef MINIMALISTIC {"tur", CAM_CMD_TUR, CAM_ARG_NONE, NULL}, {"inquiry", CAM_CMD_INQUIRY, CAM_ARG_NONE, "DSR"}, {"identify", CAM_CMD_IDENTIFY, CAM_ARG_NONE, NULL}, {"start", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT, NULL}, {"stop", CAM_CMD_STARTSTOP, CAM_ARG_NONE, NULL}, {"load", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT | CAM_ARG_EJECT, NULL}, {"eject", CAM_CMD_STARTSTOP, CAM_ARG_EJECT, NULL}, {"reportluns", CAM_CMD_REPORTLUNS, CAM_ARG_NONE, "clr:"}, {"readcapacity", CAM_CMD_READCAP, CAM_ARG_NONE, "bhHNqs"}, + {"reprobe", CAM_CMD_REPROBE, CAM_ARG_NONE, NULL}, #endif /* MINIMALISTIC */ {"rescan", CAM_CMD_RESCAN, CAM_ARG_NONE, NULL}, {"reset", CAM_CMD_RESET, CAM_ARG_NONE, NULL}, #ifndef MINIMALISTIC {"cmd", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"command", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"smpcmd", CAM_CMD_SMP_CMD, CAM_ARG_NONE, "r:R:"}, {"smprg", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smpreportgeneral", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smppc", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpphycontrol", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpplist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpphylist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpmaninfo", CAM_CMD_SMP_MANINFO, CAM_ARG_NONE, "l"}, {"defects", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, {"defectlist", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, #endif /* MINIMALISTIC */ {"devlist", CAM_CMD_DEVTREE, CAM_ARG_NONE, "-b"}, #ifndef MINIMALISTIC {"periphlist", CAM_CMD_DEVLIST, CAM_ARG_NONE, NULL}, {"modepage", CAM_CMD_MODE_PAGE, CAM_ARG_NONE, "bdelm:P:"}, {"tags", CAM_CMD_TAG, CAM_ARG_NONE, "N:q"}, {"negotiate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"rate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"debug", CAM_CMD_DEBUG, CAM_ARG_NONE, "IPTSXcp"}, {"format", CAM_CMD_FORMAT, CAM_ARG_NONE, "qrwy"}, {"sanitize", CAM_CMD_SANITIZE, CAM_ARG_NONE, "a:c:IP:qrUwy"}, {"idle", CAM_CMD_IDLE, CAM_ARG_NONE, "t:"}, {"standby", CAM_CMD_STANDBY, CAM_ARG_NONE, "t:"}, {"sleep", CAM_CMD_SLEEP, CAM_ARG_NONE, ""}, {"apm", CAM_CMD_APM, CAM_ARG_NONE, "l:"}, {"aam", CAM_CMD_AAM, CAM_ARG_NONE, "l:"}, {"fwdownload", CAM_CMD_DOWNLOAD_FW, CAM_ARG_NONE, "f:qsy"}, {"security", CAM_CMD_SECURITY, CAM_ARG_NONE, "d:e:fh:k:l:qs:T:U:y"}, {"hpa", CAM_CMD_HPA, CAM_ARG_NONE, "Pflp:qs:U:y"}, {"persist", CAM_CMD_PERSIST, CAM_ARG_NONE, "ai:I:k:K:o:ps:ST:U"}, {"attrib", CAM_CMD_ATTRIB, CAM_ARG_NONE, "a:ce:F:p:r:s:T:w:V:"}, {"opcodes", CAM_CMD_OPCODES, CAM_ARG_NONE, "No:s:T"}, #endif /* MINIMALISTIC */ {"help", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-?", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-h", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; struct cam_devitem { struct device_match_result dev_match; int num_periphs; struct periph_match_result *periph_matches; struct scsi_vpd_device_id *device_id; int device_id_len; STAILQ_ENTRY(cam_devitem) links; }; struct cam_devlist { STAILQ_HEAD(, cam_devitem) dev_queue; path_id_t path_id; }; static cam_cmdmask cmdlist; static cam_argmask arglist; camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt); #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device); #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt); #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet); static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout); static int scsiinquiry(struct cam_device *device, int retry_count, int timeout); static int scsiserial(struct cam_device *device, int retry_count, int timeout); #endif /* MINIMALISTIC */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst); static int dorescan_or_reset(int argc, char **argv, int rescan); static int rescan_or_reset_bus(path_id_t bus, int rescan); static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan); #ifndef MINIMALISTIC static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int getdevid(struct cam_devitem *item); static int buildbusdevlist(struct cam_devlist *devlist); static void freebusdevlist(struct cam_devlist *devlist); static struct cam_devitem *findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr); static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt); static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts); static void cpi_print(struct ccb_pathinq *cpi); static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi); static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd); static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts); static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len); static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len); static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbose); +static int scsireprobe(struct cam_device *device); #endif /* MINIMALISTIC */ #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #ifndef max #define max(a,b) (((a)>(b))?(a):(b)) #endif camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt) { struct camcontrol_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (++num_matches > 1) return(CC_OR_AMBIGUOUS); } } if (num_matches > 0) return(CC_OR_FOUND); else return(CC_OR_NOT_FOUND); } #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device) { union ccb *ccb; char status[32]; int error = 0; ccb = cam_getccb(device); ccb->ccb_h.func_code = XPT_GDEVLIST; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { if (cam_send_ccb(device, ccb) < 0) { perror("error getting device list"); cam_freeccb(ccb); return(1); } status[0] = '\0'; switch (ccb->cgdl.status) { case CAM_GDEVLIST_MORE_DEVS: strcpy(status, "MORE"); break; case CAM_GDEVLIST_LAST_DEVICE: strcpy(status, "LAST"); break; case CAM_GDEVLIST_LIST_CHANGED: strcpy(status, "CHANGED"); break; case CAM_GDEVLIST_ERROR: strcpy(status, "ERROR"); error = 1; break; } fprintf(stdout, "%s%d: generation: %d index: %d status: %s\n", ccb->cgdl.periph_name, ccb->cgdl.unit_number, ccb->cgdl.generation, ccb->cgdl.index, status); /* * If the list has changed, we need to start over from the * beginning. */ if (ccb->cgdl.status == CAM_GDEVLIST_LIST_CHANGED) ccb->cgdl.index = 0; } cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt) { union ccb ccb; int bufsize, fd; unsigned int i; int need_close = 0; int error = 0; int skip_device = 0; int busonly = 0; int c; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': if ((arglist & CAM_ARG_VERBOSE) == 0) busonly = 1; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; /* * We fetch all nodes, since we display most of them in the default * case, and all in the verbose case. */ ccb.cdm.num_patterns = 0; ccb.cdm.pattern_buf_len = 0; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); error = 1; break; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); error = 1; break; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_BUS: { struct bus_match_result *bus_result; /* * Only print the bus information if the * user turns on the verbose flag. */ if ((busonly == 0) && (arglist & CAM_ARG_VERBOSE) == 0) break; bus_result = &ccb.cdm.matches[i].result.bus_result; if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "scbus%d on %s%d bus %d%s\n", bus_result->path_id, bus_result->dev_name, bus_result->unit_number, bus_result->bus_id, (busonly ? "" : ":")); break; } case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; char vendor[16], product[48], revision[16]; char fw[5], tmpstr[256]; if (busonly == 1) break; dev_result = &ccb.cdm.matches[i].result.device_result; if ((dev_result->flags & DEV_RESULT_UNCONFIGURED) && ((arglist & CAM_ARG_VERBOSE) == 0)) { skip_device = 1; break; } else skip_device = 0; if (dev_result->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_result->inq_data.vendor, sizeof(dev_result->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_result->inq_data.product, sizeof(dev_result->inq_data.product), sizeof(product)); cam_strvis(revision, dev_result->inq_data.revision, sizeof(dev_result->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if (dev_result->protocol == PROTO_ATA || dev_result->protocol == PROTO_SATAPM) { cam_strvis(product, dev_result->ident_data.model, sizeof(dev_result->ident_data.model), sizeof(product)); cam_strvis(revision, dev_result->ident_data.revision, sizeof(dev_result->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else if (dev_result->protocol == PROTO_SEMB) { struct sep_identify_data *sid; sid = (struct sep_identify_data *) &dev_result->ident_data; cam_strvis(vendor, sid->vendor_id, sizeof(sid->vendor_id), sizeof(vendor)); cam_strvis(product, sid->product_id, sizeof(sid->product_id), sizeof(product)); cam_strvis(revision, sid->product_rev, sizeof(sid->product_rev), sizeof(revision)); cam_strvis(fw, sid->firmware_rev, sizeof(sid->firmware_rev), sizeof(fw)); sprintf(tmpstr, "<%s %s %s %s>", vendor, product, revision, fw); } else { sprintf(tmpstr, "<>"); } if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "%-33s at scbus%d " "target %d lun %jx (", tmpstr, dev_result->path_id, dev_result->target_id, (uintmax_t)dev_result->target_lun); need_close = 1; break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (busonly || skip_device != 0) break; if (need_close > 1) fprintf(stdout, ","); fprintf(stdout, "%s%d", periph_result->periph_name, periph_result->unit_number); need_close++; break; } default: fprintf(stdout, "unknown match type\n"); break; } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); if (need_close) fprintf(stdout, ")\n"); close(fd); return(error); } #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet) { int error = 0; union ccb *ccb; ccb = cam_getccb(device); scsi_test_unit_ready(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet == 0) perror("error sending test unit ready"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { if (quiet == 0) fprintf(stdout, "Unit is ready\n"); } else { if (quiet == 0) fprintf(stdout, "Unit is not ready\n"); error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout) { union ccb *ccb; int error = 0; ccb = cam_getccb(device); /* * If we're stopping, send an ordered tag so the drive in question * will finish any previously queued writes before stopping. If * the device isn't capable of tagged queueing, or if tagged * queueing is turned off, the tag action is a no-op. */ scsi_start_stop(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ startstop ? MSG_SIMPLE_Q_TAG : MSG_ORDERED_Q_TAG, /* start/stop */ startstop, /* load_eject */ loadeject, /* immediate */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 120000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending start unit"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) if (startstop) { fprintf(stdout, "Unit started successfully"); if (loadeject) fprintf(stdout,", Media loaded\n"); else fprintf(stdout,"\n"); } else { fprintf(stdout, "Unit stopped successfully"); if (loadeject) fprintf(stdout, ", Media ejected\n"); else fprintf(stdout, "\n"); } else { error = 1; if (startstop) fprintf(stdout, "Error received from start unit command\n"); else fprintf(stdout, "Error received from stop unit command\n"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } int scsidoinquiry(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c; int error = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'D': arglist |= CAM_ARG_GET_STDINQ; break; case 'R': arglist |= CAM_ARG_GET_XFERRATE; break; case 'S': arglist |= CAM_ARG_GET_SERIAL; break; default: break; } } /* * If the user didn't specify any inquiry options, he wants all of * them. */ if ((arglist & CAM_ARG_INQ_MASK) == 0) arglist |= CAM_ARG_INQ_MASK; if (arglist & CAM_ARG_GET_STDINQ) error = scsiinquiry(device, retry_count, timeout); if (error != 0) return(error); if (arglist & CAM_ARG_GET_SERIAL) scsiserial(device, retry_count, timeout); if (arglist & CAM_ARG_GET_XFERRATE) error = camxferrate(device); return(error); } static int scsiinquiry(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_inquiry_data *inq_buf; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); inq_buf = (struct scsi_inquiry_data *)malloc( sizeof(struct scsi_inquiry_data)); if (inq_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for inquiry\n"); return(1); } bzero(inq_buf, sizeof(*inq_buf)); /* * Note that although the size of the inquiry buffer is the full * 256 bytes specified in the SCSI spec, we only tell the device * that we have allocated SHORT_INQUIRY_LENGTH bytes. There are * two reasons for this: * * - The SCSI spec says that when a length field is only 1 byte, * a value of 0 will be interpreted as 256. Therefore * scsi_inquiry() will convert an inq_len (which is passed in as * a u_int32_t, but the field in the CDB is only 1 byte) of 256 * to 0. Evidently, very few devices meet the spec in that * regard. Some devices, like many Seagate disks, take the 0 as * 0, and don't return any data. One Pioneer DVD-R drive * returns more data than the command asked for. * * So, since there are numerous devices that just don't work * right with the full inquiry size, we don't send the full size. * * - The second reason not to use the full inquiry data length is * that we don't need it here. The only reason we issue a * standard inquiry is to get the vendor name, device name, * and revision so scsi_print_inquiry() can print them. * * If, at some point in the future, more inquiry data is needed for * some reason, this code should use a procedure similar to the * probe code. i.e., issue a short inquiry, and determine from * the additional length passed back from the device how much * inquiry data the device supports. Once the amount the device * supports is determined, issue an inquiry for that amount and no * more. * * KDM, 2/18/2000 */ scsi_inquiry(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)inq_buf, /* inq_len */ SHORT_INQUIRY_LENGTH, /* evpd */ 0, /* page_code */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending SCSI inquiry"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(inq_buf); return(error); } fprintf(stdout, "%s%d: ", device->device_name, device->dev_unit_num); scsi_print_inquiry(inq_buf); free(inq_buf); return(0); } static int scsiserial(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_vpd_unit_serial_number *serial_buf; char serial_num[SVPD_SERIAL_NUM_SIZE + 1]; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf)); if (serial_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for serial number"); return(1); } scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)serial_buf, /* inq_len */ sizeof(*serial_buf), /* evpd */ 1, /* page_code */ SVPD_UNIT_SERIAL_NUMBER, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error getting serial number"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); free(serial_buf); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(serial_buf); return(error); } bcopy(serial_buf->serial_num, serial_num, serial_buf->length); serial_num[serial_buf->length] = '\0'; if ((arglist & CAM_ARG_GET_STDINQ) || (arglist & CAM_ARG_GET_XFERRATE)) fprintf(stdout, "%s%d: Serial Number ", device->device_name, device->dev_unit_num); fprintf(stdout, "%.60s\n", serial_num); free(serial_buf); return(0); } int camxferrate(struct cam_device *device) { struct ccb_pathinq cpi; u_int32_t freq = 0; u_int32_t speed = 0; union ccb *ccb; u_int mb; int retval = 0; if ((retval = get_cpi(device, &cpi)) != 0) return (1); ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char error_string[] = "error getting transfer settings"; if (retval < 0) warn(error_string); else warnx(error_string); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto xferrate_bailout; } speed = cpi.base_transfer_speed; freq = 0; if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { freq = scsi_calc_syncsrate(spi->sync_period); speed = freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { speed *= (0x01 << spi->bus_width); } } else if (ccb->cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &ccb->cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) speed = fc->bitrate; } else if (ccb->cts.transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) speed = sas->bitrate; } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) speed = ata_mode2speed(pata->mode); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) speed = ata_revision2speed(sata->revision); } mb = speed / 1000; if (mb > 0) { fprintf(stdout, "%s%d: %d.%03dMB/s transfers", device->device_name, device->dev_unit_num, mb, speed % 1000); } else { fprintf(stdout, "%s%d: %dKB/s transfers", device->device_name, device->dev_unit_num, speed); } if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) fprintf(stdout, " (%d.%03dMHz, offset %d", freq / 1000, freq % 1000, spi->sync_offset); if (((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) && (spi->bus_width > 0)) { if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ", "); } else { fprintf(stdout, " ("); } fprintf(stdout, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ")"); } } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_SATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } if (ccb->cts.protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { fprintf(stdout, ", Command Queueing Enabled"); } } } fprintf(stdout, "\n"); xferrate_bailout: cam_freeccb(ccb); return(retval); } static void atahpa_print(struct ata_params *parm, u_int64_t hpasize, int header) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); if (header) { printf("\nFeature " "Support Enabled Value\n"); } printf("Host Protected Area (HPA) "); if (parm->support.command1 & ATA_SUPPORT_PROTECTED) { u_int64_t lba = lbasize48 ? lbasize48 : lbasize; printf("yes %s %ju/%ju\n", (hpasize > lba) ? "yes" : "no ", lba, hpasize); printf("HPA - Security "); if (parm->support.command1 & ATA_SUPPORT_MAXSECURITY) printf("yes\n"); else printf("no\n"); } else { printf("no\n"); } } static int atasata(struct ata_params *parm) { if (parm->satacapabilities != 0xffff && parm->satacapabilities != 0x0000) return 1; return 0; } static void atacapprint(struct ata_params *parm) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); printf("\n"); printf("protocol "); printf("ATA/ATAPI-%d", ata_version(parm->version_major)); if (parm->satacapabilities && parm->satacapabilities != 0xffff) { if (parm->satacapabilities & ATA_SATA_GEN3) printf(" SATA 3.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN2) printf(" SATA 2.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN1) printf(" SATA 1.x\n"); else printf(" SATA\n"); } else printf("\n"); printf("device model %.40s\n", parm->model); printf("firmware revision %.8s\n", parm->revision); printf("serial number %.20s\n", parm->serial); if (parm->enabled.extension & ATA_SUPPORT_64BITWWN) { printf("WWN %04x%04x%04x%04x\n", parm->wwn[0], parm->wwn[1], parm->wwn[2], parm->wwn[3]); } if (parm->enabled.extension & ATA_SUPPORT_MEDIASN) { printf("media serial number %.30s\n", parm->media_serial); } printf("cylinders %d\n", parm->cylinders); printf("heads %d\n", parm->heads); printf("sectors/track %d\n", parm->sectors); printf("sector size logical %u, physical %lu, offset %lu\n", ata_logical_sector_size(parm), (unsigned long)ata_physical_sector_size(parm), (unsigned long)ata_logical_sector_offset(parm)); if (parm->config == ATA_PROTO_CFA || (parm->support.command2 & ATA_SUPPORT_CFA)) printf("CFA supported\n"); printf("LBA%ssupported ", parm->capabilities1 & ATA_SUPPORT_LBA ? " " : " not "); if (lbasize) printf("%d sectors\n", lbasize); else printf("\n"); printf("LBA48%ssupported ", parm->support.command2 & ATA_SUPPORT_ADDRESS48 ? " " : " not "); if (lbasize48) printf("%ju sectors\n", (uintmax_t)lbasize48); else printf("\n"); printf("PIO supported PIO"); switch (ata_max_pmode(parm)) { case ATA_PIO4: printf("4"); break; case ATA_PIO3: printf("3"); break; case ATA_PIO2: printf("2"); break; case ATA_PIO1: printf("1"); break; default: printf("0"); } if ((parm->capabilities1 & ATA_SUPPORT_IORDY) == 0) printf(" w/o IORDY"); printf("\n"); printf("DMA%ssupported ", parm->capabilities1 & ATA_SUPPORT_DMA ? " " : " not "); if (parm->capabilities1 & ATA_SUPPORT_DMA) { if (parm->mwdmamodes & 0xff) { printf("WDMA"); if (parm->mwdmamodes & 0x04) printf("2"); else if (parm->mwdmamodes & 0x02) printf("1"); else if (parm->mwdmamodes & 0x01) printf("0"); printf(" "); } if ((parm->atavalid & ATA_FLAG_88) && (parm->udmamodes & 0xff)) { printf("UDMA"); if (parm->udmamodes & 0x40) printf("6"); else if (parm->udmamodes & 0x20) printf("5"); else if (parm->udmamodes & 0x10) printf("4"); else if (parm->udmamodes & 0x08) printf("3"); else if (parm->udmamodes & 0x04) printf("2"); else if (parm->udmamodes & 0x02) printf("1"); else if (parm->udmamodes & 0x01) printf("0"); printf(" "); } } printf("\n"); if (parm->media_rotation_rate == 1) { printf("media RPM non-rotating\n"); } else if (parm->media_rotation_rate >= 0x0401 && parm->media_rotation_rate <= 0xFFFE) { printf("media RPM %d\n", parm->media_rotation_rate); } printf("\nFeature " "Support Enabled Value Vendor\n"); printf("read ahead %s %s\n", parm->support.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no"); printf("write cache %s %s\n", parm->support.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no"); printf("flush cache %s %s\n", parm->support.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no"); printf("overlap %s\n", parm->capabilities1 & ATA_SUPPORT_OVERLAP ? "yes" : "no"); printf("Tagged Command Queuing (TCQ) %s %s", parm->support.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_QUEUED) { printf(" %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("\n"); printf("Native Command Queuing (NCQ) "); if (parm->satacapabilities != 0xffff && (parm->satacapabilities & ATA_SUPPORT_NCQ)) { printf("yes %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("no\n"); printf("NCQ Queue Management %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_QMANAGEMENT ? "yes" : "no"); printf("NCQ Streaming %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_STREAM ? "yes" : "no"); printf("Receive & Send FPDMA Queued %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED ? "yes" : "no"); printf("SMART %s %s\n", parm->support.command1 & ATA_SUPPORT_SMART ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SMART ? "yes" : "no"); printf("microcode download %s %s\n", parm->support.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no"); printf("security %s %s\n", parm->support.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no"); printf("power management %s %s\n", parm->support.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no"); printf("advanced power management %s %s", parm->support.command2 & ATA_SUPPORT_APM ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_APM ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_APM) { printf(" %d/0x%02X\n", parm->apm_value & 0xff, parm->apm_value & 0xff); } else printf("\n"); printf("automatic acoustic management %s %s", parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no", parm->enabled.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no"); if (parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC) { printf(" %d/0x%02X %d/0x%02X\n", ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic)); } else printf("\n"); printf("media status notification %s %s\n", parm->support.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no"); printf("power-up in Standby %s %s\n", parm->support.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no"); printf("write-read-verify %s %s", parm->support2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no"); if (parm->support2 & ATA_SUPPORT_WRITEREADVERIFY) { printf(" %d/0x%x\n", parm->wrv_mode, parm->wrv_mode); } else printf("\n"); printf("unload %s %s\n", parm->support.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no"); printf("general purpose logging %s %s\n", parm->support.extension & ATA_SUPPORT_GENLOG ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_GENLOG ? "yes" : "no"); printf("free-fall %s %s\n", parm->support2 & ATA_SUPPORT_FREEFALL ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_FREEFALL ? "yes" : "no"); printf("Data Set Management (DSM/TRIM) "); if (parm->support_dsm & ATA_SUPPORT_DSM_TRIM) { printf("yes\n"); printf("DSM - max 512byte blocks "); if (parm->max_dsm_blocks == 0x00) printf("yes not specified\n"); else printf("yes %d\n", parm->max_dsm_blocks); printf("DSM - deterministic read "); if (parm->support3 & ATA_SUPPORT_DRAT) { if (parm->support3 & ATA_SUPPORT_RZAT) printf("yes zeroed\n"); else printf("yes any value\n"); } else { printf("no\n"); } } else { printf("no\n"); } } static int scsi_cam_pass_16_send(struct cam_device *device, union ccb *ccb, int quiet) { struct ata_pass_16 *ata_pass_16; struct ata_cmd ata_cmd; ata_pass_16 = (struct ata_pass_16 *)ccb->csio.cdb_io.cdb_bytes; ata_cmd.command = ata_pass_16->command; ata_cmd.control = ata_pass_16->control; ata_cmd.features = ata_pass_16->features; if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s via pass_16 with timeout of %u msecs", ata_op_string(&ata_cmd), ccb->csio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s via pass_16", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if (!(ata_pass_16->flags & AP_FLAG_CHK_COND) && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s via pass_16 failed", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_cam_send(struct cam_device *device, union ccb *ccb, int quiet) { if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s with timeout of %u msecs", ata_op_string(&(ccb->ataio.cmd)), ccb->ataio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s", ata_op_string(&(ccb->ataio.cmd))); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s failed: %d", ata_op_string(&(ccb->ataio.cmd)), quiet); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_do_pass_16(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { if (data_ptr != NULL) { ata_flags |= AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT; if (flags & CAM_DIR_OUT) ata_flags |= AP_FLAG_TDIR_TO_DEV; else ata_flags |= AP_FLAG_TDIR_FROM_DEV; } else { ata_flags |= AP_FLAG_TLEN_NO_DATA; } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_ata_pass_16(&ccb->csio, retries, NULL, flags, tag_action, protocol, ata_flags, features, sector_count, lba, command, /*control*/0, data_ptr, dxfer_len, /*sense_len*/SSD_FULL_SIZE, timeout); return scsi_cam_pass_16_send(device, ccb, quiet); } static int ata_try_pass_16(struct cam_device *device) { struct ccb_pathinq cpi; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } if (cpi.protocol == PROTO_SCSI) { /* possibly compatible with pass_16 */ return (1); } /* likely not compatible with pass_16 */ return (0); } static int ata_do_28bit_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int32_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { switch (ata_try_pass_16(device)) { case -1: return (1); case 1: /* Try using SCSI Passthrough */ return ata_do_pass_16(device, ccb, retries, flags, protocol, 0, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, quiet); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - sizeof(struct ccb_hdr)); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); return ata_cam_send(device, ccb, quiet); } static int ata_do_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int force48bit) { int retval; retval = ata_try_pass_16(device); if (retval == -1) return (1); if (retval == 1) { int error; /* Try using SCSI Passthrough */ error = ata_do_pass_16(device, ccb, retries, flags, protocol, ata_flags, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, 0); if (ata_flags & AP_FLAG_CHK_COND) { /* Decode ata_res from sense data */ struct ata_res_pass16 *res_pass16; struct ata_res *res; u_int i; u_int16_t *ptr; /* sense_data is 4 byte aligned */ ptr = (uint16_t*)(uintptr_t)&ccb->csio.sense_data; for (i = 0; i < sizeof(*res_pass16) / 2; i++) ptr[i] = le16toh(ptr[i]); /* sense_data is 4 byte aligned */ res_pass16 = (struct ata_res_pass16 *)(uintptr_t) &ccb->csio.sense_data; res = &ccb->ataio.res; res->flags = res_pass16->flags; res->status = res_pass16->status; res->error = res_pass16->error; res->lba_low = res_pass16->lba_low; res->lba_mid = res_pass16->lba_mid; res->lba_high = res_pass16->lba_high; res->device = res_pass16->device; res->lba_low_exp = res_pass16->lba_low_exp; res->lba_mid_exp = res_pass16->lba_mid_exp; res->lba_high_exp = res_pass16->lba_high_exp; res->sector_count = res_pass16->sector_count; res->sector_count_exp = res_pass16->sector_count_exp; } return (error); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - sizeof(struct ccb_hdr)); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); if (force48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; return ata_cam_send(device, ccb, 0); } static void dump_data(uint16_t *ptr, uint32_t len) { u_int i; for (i = 0; i < len / 2; i++) { if ((i % 8) == 0) printf(" %3d: ", i); printf("%04hx ", ptr[i]); if ((i % 8) == 7) printf("\n"); } if ((i % 8) != 7) printf("\n"); } static int atahpa_proc_resp(struct cam_device *device, union ccb *ccb, int is48bit, u_int64_t *hpasize) { struct ata_res *res; res = &ccb->ataio.res; if (res->status & ATA_STATUS_ERROR) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); printf("error = 0x%02x, sector_count = 0x%04x, " "device = 0x%02x, status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (res->error & ATA_ERROR_ID_NOT_FOUND) { warnx("Max address has already been set since " "last power-on or hardware reset"); } return (1); } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw native max data:\n", device->device_name, device->dev_unit_num); /* res is 4 byte aligned */ dump_data((uint16_t*)(uintptr_t)res, sizeof(struct ata_res)); printf("error = 0x%02x, sector_count = 0x%04x, device = 0x%02x, " "status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (hpasize != NULL) { if (is48bit) { *hpasize = (((u_int64_t)((res->lba_high_exp << 16) | (res->lba_mid_exp << 8) | res->lba_low_exp) << 24) | ((res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low)) + 1; } else { *hpasize = (((res->device & 0x0f) << 24) | (res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low) + 1; } } return (0); } static int ata_read_native_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, struct ata_params *parm, u_int64_t *hpasize) { int error; u_int cmd, is48bit; u_int8_t protocol; is48bit = parm->support.command2 & ATA_SUPPORT_ADDRESS48; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_READ_NATIVE_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_READ_NATIVE_MAX_ADDRESS; } error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, hpasize); } static int atahpa_set_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, u_int64_t maxsize, int persist) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_SET_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_SET_MAX_ADDRESS; } /* lba's are zero indexed so the max lba is requested max - 1 */ if (maxsize) maxsize--; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_MAX_ADDR, /*lba*/maxsize, /*sector_count*/persist, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_password(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_SET_PWD, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_LOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_unlock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_UNLOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_freeze_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_FREEZE, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } int ata_do_identify(struct cam_device *device, int retry_count, int timeout, union ccb *ccb, struct ata_params** ident_bufp) { struct ata_params *ident_buf; struct ccb_pathinq cpi; struct ccb_getdev cgd; u_int i, error; int16_t *ptr; u_int8_t command, retry_command; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } /* Neither PROTO_ATAPI or PROTO_SATAPM are used in cpi.protocol */ if (cpi.protocol == PROTO_ATA) { if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (-1); } command = (cgd.protocol == PROTO_ATA) ? ATA_ATA_IDENTIFY : ATA_ATAPI_IDENTIFY; retry_command = 0; } else { /* We don't know which for sure so try both */ command = ATA_ATA_IDENTIFY; retry_command = ATA_ATAPI_IDENTIFY; } ptr = (uint16_t *)calloc(1, sizeof(struct ata_params)); if (ptr == NULL) { warnx("can't calloc memory for identify\n"); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t)sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); if (error != 0) { if (retry_command == 0) { free(ptr); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/retry_command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t) sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/0); if (error != 0) { free(ptr); return (1); } } error = 1; for (i = 0; i < sizeof(struct ata_params) / 2; i++) { ptr[i] = le16toh(ptr[i]); if (ptr[i] != 0) error = 0; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw identify data:\n", device->device_name, device->dev_unit_num); dump_data(ptr, sizeof(struct ata_params)); } /* check for invalid (all zero) response */ if (error != 0) { warnx("Invalid identify response detected"); free(ptr); return (error); } ident_buf = (struct ata_params *)ptr; if (strncmp(ident_buf->model, "FX", 2) && strncmp(ident_buf->model, "NEC", 3) && strncmp(ident_buf->model, "Pioneer", 7) && strncmp(ident_buf->model, "SHARP", 5)) { ata_bswap(ident_buf->model, sizeof(ident_buf->model)); ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); ata_bswap(ident_buf->media_serial, sizeof(ident_buf->media_serial)); } ata_btrim(ident_buf->model, sizeof(ident_buf->model)); ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); ata_btrim(ident_buf->media_serial, sizeof(ident_buf->media_serial)); ata_bpack(ident_buf->media_serial, ident_buf->media_serial, sizeof(ident_buf->media_serial)); *ident_bufp = ident_buf; return (0); } static int ataidentify(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct ata_params *ident_buf; u_int64_t hpasize; if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); return (1); } if (ident_buf->support.command1 & ATA_SUPPORT_PROTECTED) { if (ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize) != 0) { cam_freeccb(ccb); return (1); } } else { hpasize = 0; } printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); atacapprint(ident_buf); atahpa_print(ident_buf, hpasize, 0); free(ident_buf); cam_freeccb(ccb); return (0); } #endif /* MINIMALISTIC */ #ifndef MINIMALISTIC enum { ATA_SECURITY_ACTION_PRINT, ATA_SECURITY_ACTION_FREEZE, ATA_SECURITY_ACTION_UNLOCK, ATA_SECURITY_ACTION_DISABLE, ATA_SECURITY_ACTION_ERASE, ATA_SECURITY_ACTION_ERASE_ENHANCED, ATA_SECURITY_ACTION_SET_PASSWORD }; static void atasecurity_print_time(u_int16_t tw) { if (tw == 0) printf("unspecified"); else if (tw >= 255) printf("> 508 min"); else printf("%i min", 2 * tw); } static u_int32_t atasecurity_erase_timeout_msecs(u_int16_t timeout) { if (timeout == 0) return 2 * 3600 * 1000; /* default: two hours */ else if (timeout > 255) return (508 + 60) * 60 * 1000; /* spec says > 508 minutes */ return ((2 * timeout) + 5) * 60 * 1000; /* add a 5min margin */ } static void atasecurity_notify(u_int8_t command, struct ata_security_password *pwd) { struct ata_cmd cmd; bzero(&cmd, sizeof(cmd)); cmd.command = command; printf("Issuing %s", ata_op_string(&cmd)); if (pwd != NULL) { char pass[sizeof(pwd->password)+1]; /* pwd->password may not be null terminated */ pass[sizeof(pwd->password)] = '\0'; strncpy(pass, pwd->password, sizeof(pwd->password)); printf(" password='%s', user='%s'", pass, (pwd->ctrl & ATA_SECURITY_PASSWORD_MASTER) ? "master" : "user"); if (command == ATA_SECURITY_SET_PASSWORD) { printf(", mode='%s'", (pwd->ctrl & ATA_SECURITY_LEVEL_MAXIMUM) ? "maximum" : "high"); } } printf("\n"); } static int atasecurity_freeze(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_FREEZE_LOCK, NULL); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_FREEZE_LOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); } static int atasecurity_unlock(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_UNLOCK, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_UNLOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_disable(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_DISABLE_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_DISABLE_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_erase_confirm(struct cam_device *device, struct ata_params* ident_buf) { printf("\nYou are about to ERASE ALL DATA from the following" " device:\n%s%d,%s%d: ", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to ERASE ALL DATA? (yes/no) "); if (fgets(str, sizeof(str), stdin) != NULL) { if (strncasecmp(str, "yes", 3) == 0) { return (1); } else if (strncasecmp(str, "no", 2) == 0) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atasecurity_erase(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, u_int32_t erase_timeout, struct ata_security_password *pwd, int quiet) { int error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_PREPARE, NULL); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_PREPARE, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); if (error != 0) return error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_UNIT, pwd); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_UNIT, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/erase_timeout, /*quiet*/0); if (error == 0 && quiet == 0) printf("\nErase Complete\n"); return error; } static int atasecurity_set_password(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_SET_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_SET_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static void atasecurity_print(struct ata_params *parm) { printf("\nSecurity Option Value\n"); if (arglist & CAM_ARG_VERBOSE) { printf("status %04x\n", parm->security_status); } printf("supported %s\n", parm->security_status & ATA_SECURITY_SUPPORTED ? "yes" : "no"); if (!(parm->security_status & ATA_SECURITY_SUPPORTED)) return; printf("enabled %s\n", parm->security_status & ATA_SECURITY_ENABLED ? "yes" : "no"); printf("drive locked %s\n", parm->security_status & ATA_SECURITY_LOCKED ? "yes" : "no"); printf("security config frozen %s\n", parm->security_status & ATA_SECURITY_FROZEN ? "yes" : "no"); printf("count expired %s\n", parm->security_status & ATA_SECURITY_COUNT_EXP ? "yes" : "no"); printf("security level %s\n", parm->security_status & ATA_SECURITY_LEVEL ? "maximum" : "high"); printf("enhanced erase supported %s\n", parm->security_status & ATA_SECURITY_ENH_SUPP ? "yes" : "no"); printf("erase time "); atasecurity_print_time(parm->erase_time); printf("\n"); printf("enhanced erase time "); atasecurity_print_time(parm->enhanced_erase_time); printf("\n"); printf("master password rev %04x%s\n", parm->master_passwd_revision, parm->master_passwd_revision == 0x0000 || parm->master_passwd_revision == 0xFFFF ? " (unsupported)" : ""); } /* * Validates and copies the password in optarg to the passed buffer. * If the password in optarg is the same length as the buffer then * the data will still be copied but no null termination will occur. */ static int ata_getpwd(u_int8_t *passwd, int max, char opt) { int len; len = strlen(optarg); if (len > max) { warnx("-%c password is too long", opt); return (1); } else if (len == 0) { warnx("-%c password is missing", opt); return (1); } else if (optarg[0] == '-'){ warnx("-%c password starts with '-' (generic arg?)", opt); return (1); } else if (strlen(passwd) != 0 && strcmp(passwd, optarg) != 0) { warnx("-%c password conflicts with existing password from -%c", opt, pwd_opt); return (1); } /* Callers pass in a buffer which does NOT need to be terminated */ strncpy(passwd, optarg, max); pwd_opt = opt; return (0); } enum { ATA_HPA_ACTION_PRINT, ATA_HPA_ACTION_SET_MAX, ATA_HPA_ACTION_SET_PWD, ATA_HPA_ACTION_LOCK, ATA_HPA_ACTION_UNLOCK, ATA_HPA_ACTION_FREEZE_LOCK }; static int atahpa_set_confirm(struct cam_device *device, struct ata_params* ident_buf, u_int64_t maxsize, int persist) { printf("\nYou are about to configure HPA to limit the user accessible\n" "sectors to %ju %s on the device:\n%s%d,%s%d: ", maxsize, persist ? "persistently" : "temporarily", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to configure HPA? (yes/no) "); if (NULL != fgets(str, sizeof(str), stdin)) { if (0 == strncasecmp(str, "yes", 3)) { return (1); } else if (0 == strncasecmp(str, "no", 2)) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; struct ccb_getdev cgd; struct ata_set_max_pwd pwd; int error, confirm, quiet, c, action, actions, persist; int security, is48bit, pwdsize; u_int64_t hpasize, maxsize; actions = 0; confirm = 0; quiet = 0; maxsize = 0; persist = 0; security = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print hpa information */ action = ATA_HPA_ACTION_PRINT; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 's': action = ATA_HPA_ACTION_SET_MAX; maxsize = strtoumax(optarg, NULL, 0); actions++; break; case 'p': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_SET_PWD; security = 1; actions++; break; case 'l': action = ATA_HPA_ACTION_LOCK; security = 1; actions++; break; case 'U': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_UNLOCK; security = 1; actions++; break; case 'f': action = ATA_HPA_ACTION_FREEZE_LOCK; security = 1; actions++; break; case 'P': persist = 1; break; case 'y': confirm++; break; case 'q': quiet++; break; } } if (actions > 1) { warnx("too many hpa actions specified"); return (1); } if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_HPA_ACTION_PRINT) { error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) atahpa_print(ident_buf, hpasize, 1); cam_freeccb(ccb); free(ident_buf); return (error); } if (!(ident_buf->support.command1 & ATA_SUPPORT_PROTECTED)) { warnx("HPA is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } if (security && !(ident_buf->support.command1 & ATA_SUPPORT_MAXSECURITY)) { warnx("HPA Security is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } is48bit = ident_buf->support.command2 & ATA_SUPPORT_ADDRESS48; /* * The ATA spec requires: * 1. Read native max addr is called directly before set max addr * 2. Read native max addr is NOT called before any other set max call */ switch(action) { case ATA_HPA_ACTION_SET_MAX: if (confirm == 0 && atahpa_set_confirm(device, ident_buf, maxsize, persist) == 0) { cam_freeccb(ccb); free(ident_buf); return (1); } error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) { error = atahpa_set_max(device, retry_count, timeout, ccb, is48bit, maxsize, persist); if (error == 0) { /* redo identify to get new lba values */ error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); atahpa_print(ident_buf, hpasize, 1); } } break; case ATA_HPA_ACTION_SET_PWD: error = atahpa_password(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA password has been set\n"); break; case ATA_HPA_ACTION_LOCK: error = atahpa_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been locked\n"); break; case ATA_HPA_ACTION_UNLOCK: error = atahpa_unlock(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA has been unlocked\n"); break; case ATA_HPA_ACTION_FREEZE_LOCK: error = atahpa_freeze_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been frozen\n"); break; default: errx(1, "Option currently not supported"); } cam_freeccb(ccb); free(ident_buf); return (error); } static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; int error, confirm, quiet, c, action, actions, setpwd; int security_enabled, erase_timeout, pwdsize; struct ata_security_password pwd; actions = 0; setpwd = 0; erase_timeout = 0; confirm = 0; quiet = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print security information */ action = ATA_SECURITY_ACTION_PRINT; /* user is master by default as its safer that way */ pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': action = ATA_SECURITY_ACTION_FREEZE; actions++; break; case 'U': if (strcasecmp(optarg, "user") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_USER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_MASTER; } else if (strcasecmp(optarg, "master") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_USER; } else { warnx("-U argument '%s' is invalid (must be " "'user' or 'master')", optarg); return (1); } break; case 'l': if (strcasecmp(optarg, "high") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_HIGH; pwd.ctrl &= ~ATA_SECURITY_LEVEL_MAXIMUM; } else if (strcasecmp(optarg, "maximum") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_MAXIMUM; pwd.ctrl &= ~ATA_SECURITY_LEVEL_HIGH; } else { warnx("-l argument '%s' is unknown (must be " "'high' or 'maximum')", optarg); return (1); } break; case 'k': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_UNLOCK; actions++; break; case 'd': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_DISABLE; actions++; break; case 'e': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_ERASE; actions++; break; case 'h': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); pwd.ctrl |= ATA_SECURITY_ERASE_ENHANCED; action = ATA_SECURITY_ACTION_ERASE_ENHANCED; actions++; break; case 's': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); setpwd = 1; if (action == ATA_SECURITY_ACTION_PRINT) action = ATA_SECURITY_ACTION_SET_PASSWORD; /* * Don't increment action as this can be combined * with other actions. */ break; case 'y': confirm++; break; case 'q': quiet++; break; case 'T': erase_timeout = atoi(optarg) * 1000; break; } } if (actions > 1) { warnx("too many security actions specified"); return (1); } if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_SECURITY_ACTION_PRINT) { atasecurity_print(ident_buf); free(ident_buf); cam_freeccb(ccb); return (0); } if ((ident_buf->support.command1 & ATA_SUPPORT_SECURITY) == 0) { warnx("Security not supported"); free(ident_buf); cam_freeccb(ccb); return (1); } /* default timeout 15 seconds the same as linux hdparm */ timeout = timeout ? timeout : 15 * 1000; security_enabled = ident_buf->security_status & ATA_SECURITY_ENABLED; /* first set the password if requested */ if (setpwd == 1) { /* confirm we can erase before setting the password if erasing */ if (confirm == 0 && (action == ATA_SECURITY_ACTION_ERASE_ENHANCED || action == ATA_SECURITY_ACTION_ERASE) && atasecurity_erase_confirm(device, ident_buf) == 0) { cam_freeccb(ccb); free(ident_buf); return (error); } if (pwd.ctrl & ATA_SECURITY_PASSWORD_MASTER) { pwd.revision = ident_buf->master_passwd_revision; if (pwd.revision != 0 && pwd.revision != 0xfff && --pwd.revision == 0) { pwd.revision = 0xfffe; } } error = atasecurity_set_password(device, ccb, retry_count, timeout, &pwd, quiet); if (error != 0) { cam_freeccb(ccb); free(ident_buf); return (error); } security_enabled = 1; } switch(action) { case ATA_SECURITY_ACTION_FREEZE: error = atasecurity_freeze(device, ccb, retry_count, timeout, quiet); break; case ATA_SECURITY_ACTION_UNLOCK: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } else { warnx("Can't unlock, drive is not locked"); error = 1; } } else { warnx("Can't unlock, security is disabled"); error = 1; } break; case ATA_SECURITY_ACTION_DISABLE: if (security_enabled) { /* First unlock the drive if its locked */ if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } if (error == 0) { error = atasecurity_disable(device, ccb, retry_count, timeout, &pwd, quiet); } } else { warnx("Can't disable security (already disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE: if (security_enabled) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Can't secure erase (security is disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE_ENHANCED: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_ENH_SUPP) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->enhanced_erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Enhanced erase is not supported"); error = 1; } } else { warnx("Can't secure erase (enhanced), " "(security is disabled)"); error = 1; } break; } cam_freeccb(ccb); free(ident_buf); return (error); } #endif /* MINIMALISTIC */ /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst) { char *tmpstr; int convs = 0; while (isspace(*tstr) && (*tstr != '\0')) tstr++; tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_BUS; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_TARGET; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_LUN; convs++; } } } return convs; } static int dorescan_or_reset(int argc, char **argv, int rescan) { static const char must[] = "you must specify \"all\", a bus, or a bus:target:lun to %s"; int rv, error = 0; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr; if (argc < 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } tstr = argv[optind]; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncasecmp(tstr, "all", strlen("all")) == 0) arglist |= CAM_ARG_BUS; else { rv = parse_btl(argv[optind], &bus, &target, &lun, &arglist); if (rv != 1 && rv != 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } } if ((arglist & CAM_ARG_BUS) && (arglist & CAM_ARG_TARGET) && (arglist & CAM_ARG_LUN)) error = scanlun_or_reset_dev(bus, target, lun, rescan); else error = rescan_or_reset_bus(bus, rescan); return(error); } static int rescan_or_reset_bus(path_id_t bus, int rescan) { union ccb ccb, matchccb; int fd, retval; int bufsize; retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } if (bus != CAM_BUS_WILDCARD) { ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { fprintf(stdout, "%s of bus %d was successful\n", rescan ? "Re-scan" : "Reset", bus); } else { fprintf(stdout, "%s of bus %d returned error %#x\n", rescan ? "Re-scan" : "Reset", bus, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } close(fd); return(retval); } /* * The right way to handle this is to modify the xpt so that it can * handle a wildcarded bus in a rescan or reset CCB. At the moment * that isn't implemented, so instead we enumerate the busses and * send the rescan or reset to those busses in the case where the * given bus is -1 (wildcard). We don't send a rescan or reset * to the xpt bus; sending a rescan to the xpt bus is effectively a * no-op, sending a rescan to the xpt bus would result in a status of * CAM_REQ_INVALID. */ bzero(&(&matchccb.ccb_h)[1], sizeof(struct ccb_dev_match) - sizeof(struct ccb_hdr)); matchccb.ccb_h.func_code = XPT_DEV_MATCH; matchccb.ccb_h.path_id = CAM_BUS_WILDCARD; bufsize = sizeof(struct dev_match_result) * 20; matchccb.cdm.match_buf_len = bufsize; matchccb.cdm.matches=(struct dev_match_result *)malloc(bufsize); if (matchccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); retval = 1; goto bailout; } matchccb.cdm.num_matches = 0; matchccb.cdm.num_patterns = 1; matchccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern); matchccb.cdm.patterns = (struct dev_match_pattern *)malloc( matchccb.cdm.pattern_buf_len); if (matchccb.cdm.patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } matchccb.cdm.patterns[0].type = DEV_MATCH_BUS; matchccb.cdm.patterns[0].pattern.bus_pattern.flags = BUS_MATCH_ANY; do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &matchccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((matchccb.ccb_h.status != CAM_REQ_CMP) || ((matchccb.cdm.status != CAM_DEV_MATCH_LAST) && (matchccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", matchccb.ccb_h.status, matchccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < matchccb.cdm.num_matches; i++) { struct bus_match_result *bus_result; /* This shouldn't happen. */ if (matchccb.cdm.matches[i].type != DEV_MATCH_BUS) continue; bus_result = &matchccb.cdm.matches[i].result.bus_result; /* * We don't want to rescan or reset the xpt bus. * See above. */ if (bus_result->path_id == CAM_XPT_PATH_ID) continue; ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus_result->path_id; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((ccb.ccb_h.status & CAM_STATUS_MASK) ==CAM_REQ_CMP){ fprintf(stdout, "%s of bus %d was successful\n", rescan? "Re-scan" : "Reset", bus_result->path_id); } else { /* * Don't bail out just yet, maybe the other * rescan or reset commands will complete * successfully. */ fprintf(stderr, "%s of bus %d returned error " "%#x\n", rescan? "Re-scan" : "Reset", bus_result->path_id, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } } } while ((matchccb.ccb_h.status == CAM_REQ_CMP) && (matchccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); if (matchccb.cdm.patterns != NULL) free(matchccb.cdm.patterns); if (matchccb.cdm.matches != NULL) free(matchccb.cdm.matches); return(retval); } static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan) { union ccb ccb; struct cam_device *device; int fd; device = NULL; if (bus == CAM_BUS_WILDCARD) { warnx("invalid bus number %d", bus); return(1); } if (target == CAM_TARGET_WILDCARD) { warnx("invalid target number %d", target); return(1); } if (lun == CAM_LUN_WILDCARD) { warnx("invalid lun number %jx", (uintmax_t)lun); return(1); } fd = -1; bzero(&ccb, sizeof(union ccb)); if (scan) { if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s\n", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } } else { device = cam_open_btl(bus, target, lun, O_RDWR, NULL); if (device == NULL) { warnx("%s", cam_errbuf); return(1); } } ccb.ccb_h.func_code = (scan)? XPT_SCAN_LUN : XPT_RESET_DEV; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; ccb.ccb_h.timeout = 5000; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (scan) { if (ioctl(fd, CAMIOCOMMAND, &ccb) < 0) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } } else { if (cam_send_ccb(device, &ccb) < 0) { warn("error sending XPT_RESET_DEV CCB"); cam_close_device(device); return(1); } } if (scan) close(fd); else cam_close_device(device); /* * An error code of CAM_BDR_SENT is normal for a BDR request. */ if (((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) || ((!scan) && ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_BDR_SENT))) { fprintf(stdout, "%s of %d:%d:%jx was successful\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun); return(0); } else { fprintf(stdout, "%s of %d:%d:%jx returned error %#x\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun, ccb.ccb_h.status & CAM_STATUS_MASK); return(1); } } #ifndef MINIMALISTIC static struct scsi_nv defect_list_type_map[] = { { "block", SRDD10_BLOCK_FORMAT }, { "extbfi", SRDD10_EXT_BFI_FORMAT }, { "extphys", SRDD10_EXT_PHYS_FORMAT }, { "longblock", SRDD10_LONG_BLOCK_FORMAT }, { "bfi", SRDD10_BYTES_FROM_INDEX_FORMAT }, { "phys", SRDD10_PHYSICAL_SECTOR_FORMAT } }; static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb = NULL; struct scsi_read_defect_data_hdr_10 *hdr10 = NULL; struct scsi_read_defect_data_hdr_12 *hdr12 = NULL; size_t hdr_size = 0, entry_size = 0; int use_12byte = 0; int hex_format = 0; u_int8_t *defect_list = NULL; u_int8_t list_format = 0; int list_type_set = 0; u_int32_t dlist_length = 0; u_int32_t returned_length = 0, valid_len = 0; u_int32_t num_returned = 0, num_valid = 0; u_int32_t max_possible_size = 0, hdr_max = 0; u_int32_t starting_offset = 0; u_int8_t returned_format, returned_type; unsigned int i; int summary = 0, quiet = 0; int c, error = 0; int lists_specified = 0; int get_length = 1, first_pass = 1; int mads = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': { scsi_nv_status status; int entry_num = 0; status = scsi_get_nv(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) { list_format = defect_list_type_map[ entry_num].value; list_type_set = 1; } else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "defect list type", optarg); error = 1; goto defect_bailout; } break; } case 'G': arglist |= CAM_ARG_GLIST; break; case 'P': arglist |= CAM_ARG_PLIST; break; case 'q': quiet = 1; break; case 's': summary = 1; break; case 'S': { char *endptr; starting_offset = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { error = 1; warnx("invalid starting offset %s", optarg); goto defect_bailout; } break; } case 'X': hex_format = 1; break; default: break; } } if (list_type_set == 0) { error = 1; warnx("no defect list format specified"); goto defect_bailout; } if (arglist & CAM_ARG_PLIST) { list_format |= SRDD10_PLIST; lists_specified++; } if (arglist & CAM_ARG_GLIST) { list_format |= SRDD10_GLIST; lists_specified++; } /* * This implies a summary, and was the previous behavior. */ if (lists_specified == 0) summary = 1; ccb = cam_getccb(device); retry_12byte: /* * We start off asking for just the header to determine how much * defect data is available. Some Hitachi drives return an error * if you ask for more data than the drive has. Once we know the * length, we retry the command with the returned length. */ if (use_12byte == 0) dlist_length = sizeof(*hdr10); else dlist_length = sizeof(*hdr12); retry: if (defect_list != NULL) { free(defect_list); defect_list = NULL; } defect_list = malloc(dlist_length); if (defect_list == NULL) { warnx("can't malloc memory for defect list"); error = 1; goto defect_bailout; } next_batch: bzero(defect_list, dlist_length); /* * cam_getccb() zeros the CCB header only. So we need to zero the * payload portion of the ccb. */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_read_defects(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*list_format*/ list_format, /*addr_desc_index*/ starting_offset, /*data_ptr*/ defect_list, /*dxfer_len*/ dlist_length, /*minimum_cmd_size*/ use_12byte ? 12 : 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(device, ccb) < 0) { perror("error reading defect list"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto defect_bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (use_12byte == 0) { hdr10 = (struct scsi_read_defect_data_hdr_10 *)defect_list; hdr_size = sizeof(*hdr10); hdr_max = SRDDH10_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_2btoul(hdr10->length); returned_format = hdr10->format; } else { returned_length = 0; returned_format = 0; } } else { hdr12 = (struct scsi_read_defect_data_hdr_12 *)defect_list; hdr_size = sizeof(*hdr12); hdr_max = SRDDH12_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_4btoul(hdr12->length); returned_format = hdr12->format; } else { returned_length = 0; returned_format = 0; } } returned_type = returned_format & SRDDH10_DLIST_FORMAT_MASK; switch (returned_type) { case SRDD10_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_block); break; case SRDD10_LONG_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_long_block); break; case SRDD10_EXT_PHYS_FORMAT: case SRDD10_PHYSICAL_SECTOR_FORMAT: entry_size = sizeof(struct scsi_defect_desc_phys_sector); break; case SRDD10_EXT_BFI_FORMAT: case SRDD10_BYTES_FROM_INDEX_FORMAT: entry_size = sizeof(struct scsi_defect_desc_bytes_from_index); break; default: warnx("Unknown defect format 0x%x\n", returned_type); error = 1; goto defect_bailout; break; } max_possible_size = (hdr_max / entry_size) * entry_size; num_returned = returned_length / entry_size; num_valid = min(returned_length, valid_len - hdr_size); num_valid /= entry_size; if (get_length != 0) { get_length = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * If the drive is reporting that it just doesn't * support the defect list format, go ahead and use * the length it reported. Otherwise, the length * may not be valid, so use the maximum. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c) && (ascq == 0x00) && (returned_length > 0)) { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1f) && (ascq == 0x00) && (returned_length > 0)) { /* Partial defect list transfer */ /* * Hitachi drives return this error * along with a partial defect list if they * have more defects than the 10 byte * command can support. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_ILLEGAL_REQUEST) && (asc == 0x24) && (ascq == 0x00)) { /* Invalid field in CDB */ /* * SBC-3 says that if the drive has more * defects than can be reported with the * 10 byte command, it should return this * error and no data. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else { /* * If we got a SCSI error and no valid length, * just use the 10 byte maximum. The 12 * byte maximum is too large. */ if (returned_length == 0) dlist_length = SRDD10_MAX_LENGTH; else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP){ error = 1; warnx("Error reading defect header"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } if (summary != 0) { fprintf(stdout, "%u", num_returned); if (quiet == 0) { fprintf(stdout, " defect%s", (num_returned != 1) ? "s" : ""); } fprintf(stdout, "\n"); goto defect_bailout; } /* * We always limit the list length to the 10-byte maximum * length (0xffff). The reason is that some controllers * can't handle larger I/Os, and we can transfer the entire * 10 byte list in one shot. For drives that support the 12 * byte read defects command, we'll step through the list * by specifying a starting offset. For drives that don't * support the 12 byte command's starting offset, we'll * just display the first 64K. */ dlist_length = min(dlist_length, SRDD10_MAX_LENGTH); goto retry; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI spec, if the disk doesn't support * the requested format, it will generally return a sense * key of RECOVERED ERROR, and an additional sense code * of "DEFECT LIST NOT FOUND". HGST drives also return * Primary/Grown defect list not found errors. So just * check for an ASC of 0x1c. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c)) { const char *format_str; format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), list_format & SRDD10_DLIST_FORMAT_MASK); warnx("requested defect format %s not available", format_str ? format_str : "unknown"); format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), returned_type); if (format_str != NULL) { warnx("Device returned %s format", format_str); } else { error = 1; warnx("Device returned unknown defect" " data format %#x", returned_type); goto defect_bailout; } } else { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } if (first_pass != 0) { fprintf(stderr, "Got %d defect", num_returned); if ((lists_specified == 0) || (num_returned == 0)) { fprintf(stderr, "s.\n"); goto defect_bailout; } else if (num_returned == 1) fprintf(stderr, ":\n"); else fprintf(stderr, "s:\n"); first_pass = 0; } /* * XXX KDM I should probably clean up the printout format for the * disk defects. */ switch (returned_type) { case SRDD10_PHYSICAL_SECTOR_FORMAT: case SRDD10_EXT_PHYS_FORMAT: { struct scsi_defect_desc_phys_sector *dlist; dlist = (struct scsi_defect_desc_phys_sector *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t sector; sector = scsi_4btoul(dlist[i].sector); if (returned_type == SRDD10_EXT_PHYS_FORMAT) { mads = (sector & SDD_EXT_PHYS_MADS) ? 0 : 1; sector &= ~SDD_EXT_PHYS_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_BYTES_FROM_INDEX_FORMAT: case SRDD10_EXT_BFI_FORMAT: { struct scsi_defect_desc_bytes_from_index *dlist; dlist = (struct scsi_defect_desc_bytes_from_index *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t bfi; bfi = scsi_4btoul(dlist[i].bytes_from_index); if (returned_type == SRDD10_EXT_BFI_FORMAT) { mads = (bfi & SDD_EXT_BFI_MADS) ? 1 : 0; bfi &= ~SDD_EXT_BFI_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDDH10_BLOCK_FORMAT: { struct scsi_defect_desc_block *dlist; dlist = (struct scsi_defect_desc_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%u\n", scsi_4btoul(dlist[i].address)); else fprintf(stdout, "0x%x\n", scsi_4btoul(dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_LONG_BLOCK_FORMAT: { struct scsi_defect_desc_long_block *dlist; dlist = (struct scsi_defect_desc_long_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%ju\n", (uintmax_t)scsi_8btou64( dlist[i].address)); else fprintf(stdout, "0x%jx\n", (uintmax_t)scsi_8btou64( dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } default: fprintf(stderr, "Unknown defect format 0x%x\n", returned_type); error = 1; break; } defect_bailout: if (defect_list != NULL) free(defect_list); if (ccb != NULL) cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ #if 0 void reassignblocks(struct cam_device *device, u_int32_t *blocks, int num_blocks) { union ccb *ccb; ccb = cam_getccb(device); cam_freeccb(ccb); } #endif #ifndef MINIMALISTIC void mode_sense(struct cam_device *device, int mode_page, int page_control, int dbd, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_sense: couldn't allocate CCB"); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_mode_sense(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* page_code */ page_control << 6, /* page */ mode_page, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode sense command"); else errx(1, "error sending mode sense command"); } cam_freeccb(ccb); } void mode_select(struct cam_device *device, int save_pages, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_select: couldn't allocate CCB"); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_mode_select(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ save_pages, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode select command"); else errx(1, "error sending mode select command"); } cam_freeccb(ccb); } void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, mode_page = -1, page_control = 0; int binary = 0, list = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': binary = 1; break; case 'd': arglist |= CAM_ARG_DBD; break; case 'e': arglist |= CAM_ARG_MODE_EDIT; break; case 'l': list = 1; break; case 'm': mode_page = strtol(optarg, NULL, 0); if (mode_page < 0) errx(1, "invalid mode page %d", mode_page); break; case 'P': page_control = strtol(optarg, NULL, 0); if ((page_control < 0) || (page_control > 3)) errx(1, "invalid page control field %d", page_control); arglist |= CAM_ARG_PAGE_CNTL; break; default: break; } } if (mode_page == -1 && list == 0) errx(1, "you must specify a mode page!"); if (list) { mode_list(device, page_control, arglist & CAM_ARG_DBD, retry_count, timeout); } else { mode_edit(device, mode_page, page_control, arglist & CAM_ARG_DBD, arglist & CAM_ARG_MODE_EDIT, binary, retry_count, timeout); } } static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int32_t flags = CAM_DIR_NONE; u_int8_t *data_ptr = NULL; u_int8_t cdb[20]; u_int8_t atacmd[12]; struct get_hook hook; int c, data_bytes = 0; int cdb_len = 0; int atacmd_len = 0; int dmacmd = 0; int fpdmacmd = 0; int need_res = 0; char *datastr = NULL, *tstr, *resstr = NULL; int error = 0; int fd_data = 0, fd_res = 0; int retval; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsicmd: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; atacmd_len = buff_encode_visit(atacmd, sizeof(atacmd), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'c': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; cdb_len = buff_encode_visit(cdb, sizeof(cdb), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'd': dmacmd = 1; break; case 'f': fpdmacmd = 1; break; case 'i': if (arglist & CAM_ARG_CMD_OUT) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_IN; flags = CAM_DIR_IN; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of input bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } break; case 'o': if (arglist & CAM_ARG_CMD_IN) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_OUT; flags = CAM_DIR_OUT; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of output bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } bzero(data_ptr, data_bytes); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; else buff_encode_visit(data_ptr, data_bytes, datastr, iget, &hook); optind += hook.got; break; case 'r': need_res = 1; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; resstr = cget(&hook, NULL); if ((resstr != NULL) && (resstr[0] == '-')) fd_res = 1; optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_data == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto scsicmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (arglist & CAM_ARG_ERR_RECOVER) flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ flags |= CAM_DEV_QFRZDIS; if (cdb_len) { /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. * The next 5 bits are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((cdb[0] >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* computed by buff_encode_visit */ break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } /* * We should probably use csio_build_visit or something like that * here, but it's easier to encode arguments as you go. The * alternative would be skipping the CDB argument and then encoding * it here, since we've got the data buffer argument by now. */ bcopy(cdb, &ccb->csio.cdb_io.cdb_bytes, cdb_len); cam_fill_csio(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*sense_len*/ SSD_FULL_SIZE, /*cdb_len*/ cdb_len, /*timeout*/ timeout ? timeout : 5000); } else { atacmd_len = 12; bcopy(atacmd, &ccb->ataio.cmd.command, atacmd_len); if (need_res) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if (dmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; if (fpdmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*timeout*/ timeout ? timeout : 5000); } if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsicmd_bailout; } if (atacmd_len && need_res) { if (fd_res == 0) { buff_decode_visit(&ccb->ataio.res.status, 11, resstr, arg_put, NULL); fprintf(stdout, "\n"); } else { fprintf(stdout, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", ccb->ataio.res.status, ccb->ataio.res.error, ccb->ataio.res.lba_low, ccb->ataio.res.lba_mid, ccb->ataio.res.lba_high, ccb->ataio.res.device, ccb->ataio.res.lba_low_exp, ccb->ataio.res.lba_mid_exp, ccb->ataio.res.lba_high_exp, ccb->ataio.res.sector_count, ccb->ataio.res.sector_count_exp); fflush(stdout); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (arglist & CAM_ARG_CMD_IN) && (data_bytes > 0)) { if (fd_data == 0) { buff_decode_visit(data_ptr, data_bytes, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_written = 0; (amt_to_write > 0) && (amt_written =write(1, buf_ptr,amt_to_write))> 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto scsicmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", data_bytes - amt_to_write, data_bytes); } } } scsicmd_bailout: if ((data_bytes > 0) && (data_ptr != NULL)) free(data_ptr); cam_freeccb(ccb); return(error); } static int camdebug(int argc, char **argv, char *combinedopt) { int c, fd; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr, *tmpstr = NULL; union ccb ccb; int error = 0; bzero(&ccb, sizeof(union ccb)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'I': arglist |= CAM_ARG_DEBUG_INFO; ccb.cdbg.flags |= CAM_DEBUG_INFO; break; case 'P': arglist |= CAM_ARG_DEBUG_PERIPH; ccb.cdbg.flags |= CAM_DEBUG_PERIPH; break; case 'S': arglist |= CAM_ARG_DEBUG_SUBTRACE; ccb.cdbg.flags |= CAM_DEBUG_SUBTRACE; break; case 'T': arglist |= CAM_ARG_DEBUG_TRACE; ccb.cdbg.flags |= CAM_DEBUG_TRACE; break; case 'X': arglist |= CAM_ARG_DEBUG_XPT; ccb.cdbg.flags |= CAM_DEBUG_XPT; break; case 'c': arglist |= CAM_ARG_DEBUG_CDB; ccb.cdbg.flags |= CAM_DEBUG_CDB; break; case 'p': arglist |= CAM_ARG_DEBUG_PROBE; ccb.cdbg.flags |= CAM_DEBUG_PROBE; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } argc -= optind; argv += optind; if (argc <= 0) { warnx("you must specify \"off\", \"all\" or a bus,"); warnx("bus:target, or bus:target:lun"); close(fd); return(1); } tstr = *argv; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncmp(tstr, "off", 3) == 0) { ccb.cdbg.flags = CAM_DEBUG_NONE; arglist &= ~(CAM_ARG_DEBUG_INFO|CAM_ARG_DEBUG_PERIPH| CAM_ARG_DEBUG_TRACE|CAM_ARG_DEBUG_SUBTRACE| CAM_ARG_DEBUG_XPT|CAM_ARG_DEBUG_PROBE); } else if (strncmp(tstr, "all", 3) != 0) { tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ bus = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_BUS; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ target = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_TARGET; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ lun = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_LUN; } } } else { error = 1; warnx("you must specify \"all\", \"off\", or a bus,"); warnx("bus:target, or bus:target:lun to debug"); } } if (error == 0) { ccb.ccb_h.func_code = XPT_DEBUG; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); error = 1; } if (error == 0) { if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_FUNC_NOTAVAIL) { warnx("CAM debugging not available"); warnx("you need to put options CAMDEBUG in" " your kernel config file!"); error = 1; } else if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_DEBUG CCB failed with status %#x", ccb.ccb_h.status); error = 1; } else { if (ccb.cdbg.flags == CAM_DEBUG_NONE) { fprintf(stderr, "Debugging turned off\n"); } else { fprintf(stderr, "Debugging enabled for " "%d:%d:%jx\n", bus, target, (uintmax_t)lun); } } } close(fd); } return(error); } static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int numtags = -1; int retval = 0; int quiet = 0; char pathstr[1024]; ccb = cam_getccb(device); if (ccb == NULL) { warnx("tagcontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'N': numtags = strtol(optarg, NULL, 0); if (numtags < 0) { warnx("tag count %d is < 0", numtags); retval = 1; goto tagcontrol_bailout; } break; case 'q': quiet++; break; default: break; } } cam_path_string(device, pathstr, sizeof(pathstr)); if (numtags >= 0) { bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_relsim) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_REL_SIMQ; ccb->ccb_h.flags = CAM_DEV_QFREEZE; ccb->crs.release_flags = RELSIM_ADJUST_OPENINGS; ccb->crs.openings = numtags; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_REL_SIMQ CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_REL_SIMQ CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (quiet == 0) fprintf(stdout, "%stagged openings now %d\n", pathstr, ccb->crs.openings); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_getdevstats) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GDEV_STATS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GDEV_STATS CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GDEV_STATS CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_openings %d\n", ccb->cgds.dev_openings); fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_active %d\n", ccb->cgds.dev_active); fprintf(stdout, "%s", pathstr); fprintf(stdout, "allocated %d\n", ccb->cgds.allocated); fprintf(stdout, "%s", pathstr); fprintf(stdout, "queued %d\n", ccb->cgds.queued); fprintf(stdout, "%s", pathstr); fprintf(stdout, "held %d\n", ccb->cgds.held); fprintf(stdout, "%s", pathstr); fprintf(stdout, "mintags %d\n", ccb->cgds.mintags); fprintf(stdout, "%s", pathstr); fprintf(stdout, "maxtags %d\n", ccb->cgds.maxtags); } else { if (quiet == 0) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "device openings: "); } fprintf(stdout, "%d\n", ccb->cgds.dev_openings + ccb->cgds.dev_active); } tagcontrol_bailout: cam_freeccb(ccb); return(retval); } static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts) { char pathstr[1024]; cam_path_string(device, pathstr, sizeof(pathstr)); if (cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { fprintf(stdout, "%ssync parameter: %d\n", pathstr, spi->sync_period); if (spi->sync_offset != 0) { u_int freq; freq = scsi_calc_syncsrate(spi->sync_period); fprintf(stdout, "%sfrequency: %d.%03dMHz\n", pathstr, freq / 1000, freq % 1000); } } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { fprintf(stdout, "%soffset: %d\n", pathstr, spi->sync_offset); } if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { fprintf(stdout, "%sbus width: %d bits\n", pathstr, (0x01 << spi->bus_width) * 8); } if (spi->valid & CTS_SPI_VALID_DISC) { fprintf(stdout, "%sdisconnection is %s\n", pathstr, (spi->flags & CTS_SPI_FLAGS_DISC_ENB) ? "enabled" : "disabled"); } } if (cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) fprintf(stdout, "%sWWNN: 0x%llx\n", pathstr, (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) fprintf(stdout, "%sWWPN: 0x%llx\n", pathstr, (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) fprintf(stdout, "%sPortID: 0x%x\n", pathstr, fc->port); if (fc->valid & CTS_FC_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, fc->bitrate / 1000, fc->bitrate % 1000); } if (cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, sas->bitrate / 1000, sas->bitrate % 1000); } if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if ((pata->valid & CTS_ATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(pata->mode)); } if ((pata->valid & CTS_ATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, pata->atapi); } if ((pata->valid & CTS_ATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, pata->bytecount); } } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if ((sata->valid & CTS_SATA_VALID_REVISION) != 0) { fprintf(stdout, "%sSATA revision: %d.x\n", pathstr, sata->revision); } if ((sata->valid & CTS_SATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(sata->mode)); } if ((sata->valid & CTS_SATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, sata->atapi); } if ((sata->valid & CTS_SATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, sata->bytecount); } if ((sata->valid & CTS_SATA_VALID_PM) != 0) { fprintf(stdout, "%sPMP presence: %d\n", pathstr, sata->pm_present); } if ((sata->valid & CTS_SATA_VALID_TAGS) != 0) { fprintf(stdout, "%sNumber of tags: %d\n", pathstr, sata->tags); } if ((sata->valid & CTS_SATA_VALID_CAPS) != 0) { fprintf(stdout, "%sSATA capabilities: %08x\n", pathstr, sata->caps); } } if (cts->protocol == PROTO_ATA) { struct ccb_trans_settings_ata *ata= &cts->proto_specific.ata; if (ata->valid & CTS_ATA_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (ata->flags & CTS_ATA_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } if (cts->protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi= &cts->proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } } /* * Get a path inquiry CCB for the specified device. */ static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cpi: couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { warn("get_cpi: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } bcopy(&ccb->cpi, cpi, sizeof(struct ccb_pathinq)); get_cpi_bailout: cam_freeccb(ccb); return(retval); } /* * Get a get device CCB for the specified device. */ static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cgd: couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GDEV_TYPE; if (cam_send_ccb(device, ccb) < 0) { warn("get_cgd: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev)); get_cgd_bailout: cam_freeccb(ccb); return(retval); } /* * Returns 1 if the device has the VPD page, 0 if it does not, and -1 on an * error. */ int dev_has_vpd_page(struct cam_device *dev, uint8_t page_id, int retry_count, int timeout, int verbosemode) { union ccb *ccb = NULL; struct scsi_vpd_supported_page_list sup_pages; int i; int retval = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warn("Unable to allocate CCB"); retval = -1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); bzero(&sup_pages, sizeof(sup_pages)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)&sup_pages, /* inq_len */ sizeof(sup_pages), /* evpd */ 1, /* page_code */ SVPD_SUPPORTED_PAGE_LIST, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { cam_freeccb(ccb); retval = -1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = -1; goto bailout; } for (i = 0; i < sup_pages.length; i++) { if (sup_pages.list[i] == page_id) { retval = 1; goto bailout; } } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * devtype is filled in with the type of device. * Returns 0 for success, non-zero for failure. */ int get_device_type(struct cam_device *dev, int retry_count, int timeout, int verbosemode, camcontrol_devtype *devtype) { struct ccb_getdev cgd; int retval = 0; retval = get_cgd(dev, &cgd); if (retval != 0) goto bailout; switch (cgd.protocol) { case PROTO_SCSI: break; case PROTO_ATA: case PROTO_ATAPI: case PROTO_SATAPM: *devtype = CC_DT_ATA; goto bailout; break; /*NOTREACHED*/ default: *devtype = CC_DT_UNKNOWN; goto bailout; break; /*NOTREACHED*/ } /* * Check for the ATA Information VPD page (0x89). If this is an * ATA device behind a SCSI to ATA translation layer, this VPD page * should be present. * * If that VPD page isn't present, or we get an error back from the * INQUIRY command, we'll just treat it as a normal SCSI device. */ retval = dev_has_vpd_page(dev, SVPD_ATA_INFORMATION, retry_count, timeout, verbosemode); if (retval == 1) *devtype = CC_DT_ATA_BEHIND_SCSI; else *devtype = CC_DT_SCSI; retval = 0; bailout: return (retval); } void build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, uint16_t sector_count, uint64_t lba, uint8_t command, uint8_t *data_ptr, uint16_t dxfer_len, uint8_t sense_len, uint32_t timeout, int is48bit, camcontrol_devtype devtype) { if (devtype == CC_DT_ATA) { cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); if (is48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); } else { if (is48bit || lba > ATA_MAX_28BIT_LBA) protocol |= AP_EXTEND; scsi_ata_pass_16(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features, /*sector_count*/ sector_count, /*lba*/ lba, /*command*/ command, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*sense_len*/ sense_len, /*timeout*/ timeout); } } static void cpi_print(struct ccb_pathinq *cpi) { char adapter_str[1024]; int i; snprintf(adapter_str, sizeof(adapter_str), "%s%d:", cpi->dev_name, cpi->unit_number); fprintf(stdout, "%s SIM/HBA version: %d\n", adapter_str, cpi->version_num); for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_inquiry) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PI_MDP_ABLE: str = "MDP message"; break; case PI_WIDE_32: str = "32 bit wide SCSI"; break; case PI_WIDE_16: str = "16 bit wide SCSI"; break; case PI_SDTR_ABLE: str = "SDTR message"; break; case PI_LINKED_CDB: str = "linked CDBs"; break; case PI_TAG_ABLE: str = "tag queue messages"; break; case PI_SOFT_RST: str = "soft reset alternative"; break; case PI_SATAPM: str = "SATA Port Multiplier"; break; default: str = "unknown PI bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_misc) == 0) continue; fprintf(stdout, "%s ", adapter_str); switch(i) { case PIM_SCANHILO: str = "bus scans from high ID to low ID"; break; case PIM_NOREMOVE: str = "removable devices not included in scan"; break; case PIM_NOINITIATOR: str = "initiator role not supported"; break; case PIM_NOBUSRESET: str = "user has disabled initial BUS RESET or" " controller is in target/mixed mode"; break; case PIM_NO_6_BYTE: str = "do not send 6-byte commands"; break; case PIM_SEQSCAN: str = "scan bus sequentially"; break; default: str = "unknown PIM bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->target_sprt) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PIT_PROCESSOR: str = "target mode processor mode"; break; case PIT_PHASE: str = "target mode phase cog. mode"; break; case PIT_DISCONNECT: str = "disconnects in target mode"; break; case PIT_TERM_IO: str = "terminate I/O message in target mode"; break; case PIT_GRP_6: str = "group 6 commands in target mode"; break; case PIT_GRP_7: str = "group 7 commands in target mode"; break; default: str = "unknown PIT bit set"; break; } fprintf(stdout, "%s\n", str); } fprintf(stdout, "%s HBA engine count: %d\n", adapter_str, cpi->hba_eng_cnt); fprintf(stdout, "%s maximum target: %d\n", adapter_str, cpi->max_target); fprintf(stdout, "%s maximum LUN: %d\n", adapter_str, cpi->max_lun); fprintf(stdout, "%s highest path ID in subsystem: %d\n", adapter_str, cpi->hpath_id); fprintf(stdout, "%s initiator ID: %d\n", adapter_str, cpi->initiator_id); fprintf(stdout, "%s SIM vendor: %s\n", adapter_str, cpi->sim_vid); fprintf(stdout, "%s HBA vendor: %s\n", adapter_str, cpi->hba_vid); fprintf(stdout, "%s HBA vendor ID: 0x%04x\n", adapter_str, cpi->hba_vendor); fprintf(stdout, "%s HBA device ID: 0x%04x\n", adapter_str, cpi->hba_device); fprintf(stdout, "%s HBA subvendor ID: 0x%04x\n", adapter_str, cpi->hba_subvendor); fprintf(stdout, "%s HBA subdevice ID: 0x%04x\n", adapter_str, cpi->hba_subdevice); fprintf(stdout, "%s bus ID: %d\n", adapter_str, cpi->bus_id); fprintf(stdout, "%s base transfer speed: ", adapter_str); if (cpi->base_transfer_speed > 1000) fprintf(stdout, "%d.%03dMB/sec\n", cpi->base_transfer_speed / 1000, cpi->base_transfer_speed % 1000); else fprintf(stdout, "%dKB/sec\n", (cpi->base_transfer_speed % 1000) * 1000); fprintf(stdout, "%s maximum transfer size: %u bytes\n", adapter_str, cpi->maxio); } static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts) { int retval; union ccb *ccb; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_print_cts: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; if (user_settings == 0) ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; else ccb->cts.type = CTS_TYPE_USER_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if (quiet == 0) cts_print(device, &ccb->cts); if (cts != NULL) bcopy(&ccb->cts, cts, sizeof(struct ccb_trans_settings)); get_print_cts_bailout: cam_freeccb(ccb); return(retval); } static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int user_settings = 0; int retval = 0; int disc_enable = -1, tag_enable = -1; int mode = -1; int offset = -1; double syncrate = -1; int bus_width = -1; int quiet = 0; int change_settings = 0, send_tur = 0; struct ccb_pathinq cpi; ccb = cam_getccb(device); if (ccb == NULL) { warnx("ratecontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'a': send_tur = 1; break; case 'c': user_settings = 0; break; case 'D': if (strncasecmp(optarg, "enable", 6) == 0) disc_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) disc_enable = 0; else { warnx("-D argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'M': mode = ata_string2mode(optarg); if (mode < 0) { warnx("unknown mode '%s'", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'O': offset = strtol(optarg, NULL, 0); if (offset < 0) { warnx("offset value %d is < 0", offset); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'q': quiet++; break; case 'R': syncrate = atof(optarg); if (syncrate < 0) { warnx("sync rate %f is < 0", syncrate); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'T': if (strncasecmp(optarg, "enable", 6) == 0) tag_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) tag_enable = 0; else { warnx("-T argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'U': user_settings = 1; break; case 'W': bus_width = strtol(optarg, NULL, 0); if (bus_width < 0) { warnx("bus width %d is < 0", bus_width); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; default: break; } } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); /* * Grab path inquiry information, so we can determine whether * or not the initiator is capable of the things that the user * requests. */ ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_PATH_INQ CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_PATH_INQ CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } bcopy(&ccb->cpi, &cpi, sizeof(struct ccb_pathinq)); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); if (quiet == 0) { fprintf(stdout, "%s parameters:\n", user_settings ? "User" : "Current"); } retval = get_print_cts(device, user_settings, quiet, &ccb->cts); if (retval != 0) goto ratecontrol_bailout; if (arglist & CAM_ARG_VERBOSE) cpi_print(&cpi); if (change_settings) { int didsettings = 0; struct ccb_trans_settings_spi *spi = NULL; struct ccb_trans_settings_pata *pata = NULL; struct ccb_trans_settings_sata *sata = NULL; struct ccb_trans_settings_ata *ata = NULL; struct ccb_trans_settings_scsi *scsi = NULL; if (ccb->cts.transport == XPORT_SPI) spi = &ccb->cts.xport_specific.spi; if (ccb->cts.transport == XPORT_ATA) pata = &ccb->cts.xport_specific.ata; if (ccb->cts.transport == XPORT_SATA) sata = &ccb->cts.xport_specific.sata; if (ccb->cts.protocol == PROTO_ATA) ata = &ccb->cts.proto_specific.ata; if (ccb->cts.protocol == PROTO_SCSI) scsi = &ccb->cts.proto_specific.scsi; ccb->cts.xport_specific.valid = 0; ccb->cts.proto_specific.valid = 0; if (spi && disc_enable != -1) { spi->valid |= CTS_SPI_VALID_DISC; if (disc_enable == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; else spi->flags |= CTS_SPI_FLAGS_DISC_ENB; didsettings++; } if (tag_enable != -1) { if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0) { warnx("HBA does not support tagged queueing, " "so you cannot modify tag settings"); retval = 1; goto ratecontrol_bailout; } if (ata) { ata->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; else ata->flags |= CTS_ATA_FLAGS_TAG_ENB; didsettings++; } else if (scsi) { scsi->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; didsettings++; } } if (spi && offset != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing offset"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_offset = offset; didsettings++; } if (spi && syncrate != -1) { int prelim_sync_period; if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_RATE; /* * The sync rate the user gives us is in MHz. * We need to translate it into KHz for this * calculation. */ syncrate *= 1000; /* * Next, we calculate a "preliminary" sync period * in tenths of a nanosecond. */ if (syncrate == 0) prelim_sync_period = 0; else prelim_sync_period = 10000000 / syncrate; spi->sync_period = scsi_calc_syncparam(prelim_sync_period); didsettings++; } if (sata && syncrate != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user rate " "settings for SATA"); retval = 1; goto ratecontrol_bailout; } sata->revision = ata_speed2revision(syncrate * 100); if (sata->revision < 0) { warnx("Invalid rate %f", syncrate); retval = 1; goto ratecontrol_bailout; } sata->valid |= CTS_SATA_VALID_REVISION; didsettings++; } if ((pata || sata) && mode != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user mode " "settings for ATA/SATA"); retval = 1; goto ratecontrol_bailout; } if (pata) { pata->mode = mode; pata->valid |= CTS_ATA_VALID_MODE; } else { sata->mode = mode; sata->valid |= CTS_SATA_VALID_MODE; } didsettings++; } /* * The bus_width argument goes like this: * 0 == 8 bit * 1 == 16 bit * 2 == 32 bit * Therefore, if you shift the number of bits given on the * command line right by 4, you should get the correct * number. */ if (spi && bus_width != -1) { /* * We might as well validate things here with a * decipherable error message, rather than what * will probably be an indecipherable error message * by the time it gets back to us. */ if ((bus_width == 16) && ((cpi.hba_inquiry & PI_WIDE_16) == 0)) { warnx("HBA does not support 16 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width == 32) && ((cpi.hba_inquiry & PI_WIDE_32) == 0)) { warnx("HBA does not support 32 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width != 8) && (bus_width != 16) && (bus_width != 32)) { warnx("Invalid bus width %d", bus_width); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width >> 4; didsettings++; } if (didsettings == 0) { goto ratecontrol_bailout; } ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_SET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_SET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } } if (send_tur) { retval = testunitready(device, retry_count, timeout, (arglist & CAM_ARG_VERBOSE) ? 0 : 1); /* * If the TUR didn't succeed, just bail. */ if (retval != 0) { if (quiet == 0) fprintf(stderr, "Test Unit Ready failed\n"); goto ratecontrol_bailout; } } if ((change_settings || send_tur) && !quiet && (ccb->cts.transport == XPORT_ATA || ccb->cts.transport == XPORT_SATA || send_tur)) { fprintf(stdout, "New parameters:\n"); retval = get_print_cts(device, user_settings, 0, NULL); } ratecontrol_bailout: cam_freeccb(ccb); return(retval); } static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; struct format_defect_list_header fh; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsiformat: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'q': quiet++; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsiformat: error sending inquiry"); goto scsiformat_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsiformat_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current format timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } /* * Keep this outside the if block below to silence any unused * variable warnings. */ bzero(&fh, sizeof(fh)); /* * If we're in immediate mode, we've got to include the format * header */ if (immediate != 0) { fh.byte2 = FU_DLH_IMMED; data_ptr = (u_int8_t *)&fh; dxfer_len = sizeof(fh); byte2 = FU_FMT_DATA; } else if (quiet == 0) { fprintf(stdout, "Formatting..."); fflush(stdout); } scsi_format_unit(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* ileave */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((immediate == 0) && ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP))) { const char errstr[] = "error sending format command"; if (retval < 0) warn(errstr); else warnx(errstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Format Complete\n"); } goto scsiformat_bailout; } doreport: do { cam_status status; bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish formatting. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-2 and SCSI-3 specs, a * drive that is in the middle of a format should * return NOT READY with an ASC of "logical unit * not ready, format in progress". The sense key * specific bytes will then be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x04)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rFormatting: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during format:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but format will " "proceed."); warnx("will exit when format is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during format"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nFormat Complete\n"); scsiformat_bailout: cam_freeccb(ccb); return(error); } static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int8_t action = 0; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; int invert = 0; int passes = 0; int ause = 0; int fd = -1; const char *pattern = NULL; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsisanitize: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': if (strcasecmp(optarg, "overwrite") == 0) action = SSZ_SERVICE_ACTION_OVERWRITE; else if (strcasecmp(optarg, "block") == 0) action = SSZ_SERVICE_ACTION_BLOCK_ERASE; else if (strcasecmp(optarg, "crypto") == 0) action = SSZ_SERVICE_ACTION_CRYPTO_ERASE; else if (strcasecmp(optarg, "exitfailure") == 0) action = SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE; else { warnx("invalid service operation \"%s\"", optarg); error = 1; goto scsisanitize_bailout; } break; case 'c': passes = strtol(optarg, NULL, 0); if (passes < 1 || passes > 31) { warnx("invalid passes value %d", passes); error = 1; goto scsisanitize_bailout; } break; case 'I': invert = 1; break; case 'P': pattern = optarg; break; case 'q': quiet++; break; case 'U': ause = 1; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (action == 0) { warnx("an action is required"); error = 1; goto scsisanitize_bailout; } else if (action == SSZ_SERVICE_ACTION_OVERWRITE) { struct scsi_sanitize_parameter_list *pl; struct stat sb; ssize_t sz, amt; if (pattern == NULL) { warnx("overwrite action requires -P argument"); error = 1; goto scsisanitize_bailout; } fd = open(pattern, O_RDONLY); if (fd < 0) { warn("cannot open pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } if (fstat(fd, &sb) < 0) { warn("cannot stat pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } sz = sb.st_size; if (sz > SSZPL_MAX_PATTERN_LENGTH) { warnx("pattern file size exceeds maximum value %d", SSZPL_MAX_PATTERN_LENGTH); error = 1; goto scsisanitize_bailout; } dxfer_len = sizeof(*pl) + sz; data_ptr = calloc(1, dxfer_len); if (data_ptr == NULL) { warnx("cannot allocate parameter list buffer"); error = 1; goto scsisanitize_bailout; } amt = read(fd, data_ptr + sizeof(*pl), sz); if (amt < 0) { warn("cannot read pattern file"); error = 1; goto scsisanitize_bailout; } else if (amt != sz) { warnx("short pattern file read"); error = 1; goto scsisanitize_bailout; } pl = (struct scsi_sanitize_parameter_list *)data_ptr; if (passes == 0) pl->byte1 = 1; else pl->byte1 = passes; if (invert != 0) pl->byte1 |= SSZPL_INVERT; scsi_ulto2b(sz, pl->length); } else { const char *arg; if (passes != 0) arg = "-c"; else if (invert != 0) arg = "-I"; else if (pattern != NULL) arg = "-P"; else arg = NULL; if (arg != NULL) { warnx("%s argument only valid with overwrite " "operation", arg); error = 1; goto scsisanitize_bailout; } } if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsisanitize: error sending inquiry"); goto scsisanitize_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsisanitize_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current sanitize timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } byte2 = action; if (ause != 0) byte2 |= SSZ_UNRESTRICTED_EXIT; if (immediate != 0) byte2 |= SSZ_IMMED; scsi_sanitize(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* control */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending sanitize command"); error = 1; goto scsisanitize_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (sense_key == SSD_KEY_ILLEGAL_REQUEST && asc == 0x20 && ascq == 0x00) warnx("sanitize is not supported by " "this device"); else warnx("error sanitizing this device"); } else warnx("error sanitizing this device"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Sanitize Complete\n"); } goto scsisanitize_bailout; } doreport: do { cam_status status; bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish sanitizing. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-3 spec, a drive that is in the * middle of a sanitize should return NOT READY with an * ASC of "logical unit not ready, sanitize in * progress". The sense key specific bytes will then * be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x1b)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rSanitizing: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during sanitize:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but sanitze will " "proceed."); warnx("will exit when sanitize is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during sanitize"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nSanitize Complete\n"); scsisanitize_bailout: if (fd >= 0) close(fd); if (data_ptr != NULL) free(data_ptr); cam_freeccb(ccb); return(error); } static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c, countonly, lunsonly; struct scsi_report_luns_data *lundata; int alloc_len; uint8_t report_type; uint32_t list_len, i, j; int retval; retval = 0; lundata = NULL; report_type = RPL_REPORT_DEFAULT; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); countonly = 0; lunsonly = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': countonly++; break; case 'l': lunsonly++; break; case 'r': if (strcasecmp(optarg, "default") == 0) report_type = RPL_REPORT_DEFAULT; else if (strcasecmp(optarg, "wellknown") == 0) report_type = RPL_REPORT_WELLKNOWN; else if (strcasecmp(optarg, "all") == 0) report_type = RPL_REPORT_ALL; else { warnx("%s: invalid report type \"%s\"", __func__, optarg); retval = 1; goto bailout; } break; default: break; } } if ((countonly != 0) && (lunsonly != 0)) { warnx("%s: you can only specify one of -c or -l", __func__); retval = 1; goto bailout; } /* * According to SPC-4, the allocation length must be at least 16 * bytes -- enough for the header and one LUN. */ alloc_len = sizeof(*lundata) + 8; retry: lundata = malloc(alloc_len); if (lundata == NULL) { warn("%s: error mallocing %d bytes", __func__, alloc_len); retval = 1; goto bailout; } scsi_report_luns(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*select_report*/ report_type, /*rpl_buf*/ lundata, /*alloc_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REPORT LUNS command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } list_len = scsi_4btoul(lundata->length); /* * If we need to list the LUNs, and our allocation * length was too short, reallocate and retry. */ if ((countonly == 0) && (list_len > (alloc_len - sizeof(*lundata)))) { alloc_len = list_len + sizeof(*lundata); free(lundata); goto retry; } if (lunsonly == 0) fprintf(stdout, "%u LUN%s found\n", list_len / 8, ((list_len / 8) > 1) ? "s" : ""); if (countonly != 0) goto bailout; for (i = 0; i < (list_len / 8); i++) { int no_more; no_more = 0; for (j = 0; j < sizeof(lundata->luns[i].lundata); j += 2) { if (j != 0) fprintf(stdout, ","); switch (lundata->luns[i].lundata[j] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK) != 0) fprintf(stdout, "%d:", lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK); else if ((j == 0) && ((lundata->luns[i].lundata[j+2] & RPL_LUNDATA_PERIPH_BUS_MASK) == 0)) no_more = 1; fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); break; case RPL_LUNDATA_ATYP_FLAT: { uint8_t tmplun[2]; tmplun[0] = lundata->luns[i].lundata[j] & RPL_LUNDATA_FLAT_LUN_MASK; tmplun[1] = lundata->luns[i].lundata[j+1]; fprintf(stdout, "%d", scsi_2btoul(tmplun)); no_more = 1; break; } case RPL_LUNDATA_ATYP_LUN: fprintf(stdout, "%d:%d:%d", (lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_BUS_MASK) >> 5, lundata->luns[i].lundata[j] & RPL_LUNDATA_LUN_TARG_MASK, lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_LUN_MASK); break; case RPL_LUNDATA_ATYP_EXTLUN: { int field_len_code, eam_code; eam_code = lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_EAM_MASK; field_len_code = (lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_LEN_MASK) >> 4; if ((eam_code == RPL_LUNDATA_EXT_EAM_WK) && (field_len_code == 0x00)) { fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); } else if ((eam_code == RPL_LUNDATA_EXT_EAM_NOT_SPEC) && (field_len_code == 0x03)) { uint8_t tmp_lun[8]; /* * This format takes up all 8 bytes. * If we aren't starting at offset 0, * that's a bug. */ if (j != 0) { fprintf(stdout, "Invalid " "offset %d for " "Extended LUN not " "specified format", j); no_more = 1; break; } bzero(tmp_lun, sizeof(tmp_lun)); bcopy(&lundata->luns[i].lundata[j+1], &tmp_lun[1], sizeof(tmp_lun) - 1); fprintf(stdout, "%#jx", (intmax_t)scsi_8btou64(tmp_lun)); no_more = 1; } else { fprintf(stderr, "Unknown Extended LUN" "Address method %#x, length " "code %#x", eam_code, field_len_code); no_more = 1; } break; } default: fprintf(stderr, "Unknown LUN address method " "%#x\n", lundata->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); break; } /* * For the flat addressing method, there are no * other levels after it. */ if (no_more != 0) break; } fprintf(stdout, "\n"); } bailout: cam_freeccb(ccb); free(lundata); return (retval); } static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int blocksizeonly, humanize, numblocks, quiet, sizeonly, baseten; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; uint64_t maxsector; uint32_t block_len; int retval; int c; blocksizeonly = 0; humanize = 0; numblocks = 0; quiet = 0; sizeonly = 0; baseten = 0; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': blocksizeonly++; break; case 'h': humanize++; baseten = 0; break; case 'H': humanize++; baseten++; break; case 'N': numblocks++; break; case 'q': quiet++; break; case 's': sizeonly++; break; default: break; } } if ((blocksizeonly != 0) && (numblocks != 0)) { warnx("%s: you can only specify one of -b or -N", __func__); retval = 1; goto bailout; } if ((blocksizeonly != 0) && (sizeonly != 0)) { warnx("%s: you can only specify one of -b or -s", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (quiet != 0)) { warnx("%s: you can only specify one of -h/-H or -q", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (blocksizeonly != 0)) { warnx("%s: you can only specify one of -h/-H or -b", __func__); retval = 1; goto bailout; } scsi_read_capacity(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto do_print; scsi_read_capacity_16(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)&rcaplong, /*rcap_buf_len*/ sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); do_print: if (blocksizeonly == 0) { /* * Humanize implies !quiet, and also implies numblocks. */ if (humanize != 0) { char tmpstr[6]; int64_t tmpbytes; int ret; tmpbytes = (maxsector + 1) * block_len; ret = humanize_number(tmpstr, sizeof(tmpstr), tmpbytes, "", HN_AUTOSCALE, HN_B | HN_DECIMAL | ((baseten != 0) ? HN_DIVISOR_1000 : 0)); if (ret == -1) { warnx("%s: humanize_number failed!", __func__); retval = 1; goto bailout; } fprintf(stdout, "Device Size: %s%s", tmpstr, (sizeonly == 0) ? ", " : "\n"); } else if (numblocks != 0) { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Blocks: " : "", (uintmax_t)maxsector + 1, (sizeonly == 0) ? ", " : "\n"); } else { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Last Block: " : "", (uintmax_t)maxsector, (sizeonly == 0) ? ", " : "\n"); } } if (sizeonly == 0) fprintf(stdout, "%s%u%s\n", (quiet == 0) ? "Block Length: " : "", block_len, (quiet == 0) ? " bytes" : ""); bailout: cam_freeccb(ccb); return (retval); } static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, error = 0; union ccb *ccb; uint8_t *smp_request = NULL, *smp_response = NULL; int request_size = 0, response_size = 0; int fd_request = 0, fd_response = 0; char *datastr = NULL; struct get_hook hook; int retval; int flags = 0; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'R': arglist |= CAM_ARG_CMD_IN; response_size = strtol(optarg, NULL, 0); if (response_size <= 0) { warnx("invalid number of response bytes %d", response_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_response = 1; smp_response = (u_int8_t *)malloc(response_size); if (smp_response == NULL) { warn("can't malloc memory for SMP response"); error = 1; goto smpcmd_bailout; } break; case 'r': arglist |= CAM_ARG_CMD_OUT; request_size = strtol(optarg, NULL, 0); if (request_size <= 0) { warnx("invalid number of request bytes %d", request_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); smp_request = (u_int8_t *)malloc(request_size); if (smp_request == NULL) { warn("can't malloc memory for SMP request"); error = 1; goto smpcmd_bailout; } bzero(smp_request, request_size); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_request = 1; else buff_encode_visit(smp_request, request_size, datastr, iget, &hook); optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_request == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = request_size; u_int8_t *buf_ptr = smp_request; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto smpcmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (((arglist & CAM_ARG_CMD_IN) == 0) || ((arglist & CAM_ARG_CMD_OUT) == 0)) { warnx("%s: need both the request (-r) and response (-R) " "arguments", __func__); error = 1; goto smpcmd_bailout; } flags |= CAM_DEV_QFRZDIS; cam_fill_smpio(&ccb->smpio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*smp_request*/ smp_request, /*smp_request_len*/ request_size, /*smp_response*/ smp_response, /*smp_response_len*/ response_size, /*timeout*/ timeout ? timeout : 5000); ccb->smpio.flags = SMP_FLAG_NONE; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (response_size > 0)) { if (fd_response == 0) { buff_decode_visit(smp_response, response_size, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = response_size; u_int8_t *buf_ptr = smp_response; for (amt_written = 0; (amt_to_write > 0) && (amt_written = write(STDOUT_FILENO, buf_ptr, amt_to_write)) > 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto smpcmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", response_size - amt_to_write, response_size); } } } smpcmd_bailout: if (ccb != NULL) cam_freeccb(ccb); if (smp_request != NULL) free(smp_request); if (smp_response != NULL) free(smp_response); return (error); } static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_general_request *request = NULL; struct smp_report_general_response *response = NULL; struct sbuf *sb = NULL; int error = 0; int c, long_response = 0; int retval; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); error = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*response)); error = 1; goto bailout; } try_long: smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, /*request_len*/ sizeof(*request), (uint8_t *)response, /*response_len*/ sizeof(*response), /*long_response*/ long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } /* * If the device supports the long response bit, try again and see * if we can get all of the data. */ if ((response->long_response & SMP_RG_LONG_RESPONSE) && (long_response == 0)) { ccb->ccb_h.status = CAM_REQ_INPROG; bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); long_response = 1; goto try_long; } /* * XXX KDM detect and decode SMP errors here. */ sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_general_sbuf(response, sizeof(*response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); if (sb != NULL) sbuf_delete(sb); return (error); } static struct camcontrol_opts phy_ops[] = { {"nop", SMP_PC_PHY_OP_NOP, CAM_ARG_NONE, NULL}, {"linkreset", SMP_PC_PHY_OP_LINK_RESET, CAM_ARG_NONE, NULL}, {"hardreset", SMP_PC_PHY_OP_HARD_RESET, CAM_ARG_NONE, NULL}, {"disable", SMP_PC_PHY_OP_DISABLE, CAM_ARG_NONE, NULL}, {"clearerrlog", SMP_PC_PHY_OP_CLEAR_ERR_LOG, CAM_ARG_NONE, NULL}, {"clearaffiliation", SMP_PC_PHY_OP_CLEAR_AFFILIATON, CAM_ARG_NONE,NULL}, {"sataportsel", SMP_PC_PHY_OP_TRANS_SATA_PSS, CAM_ARG_NONE, NULL}, {"clearitnl", SMP_PC_PHY_OP_CLEAR_STP_ITN_LS, CAM_ARG_NONE, NULL}, {"setdevname", SMP_PC_PHY_OP_SET_ATT_DEV_NAME, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_phy_control_request *request = NULL; struct smp_phy_control_response *response = NULL; int long_response = 0; int retval = 0; int phy = -1; uint32_t phy_operation = SMP_PC_PHY_OP_NOP; int phy_op_set = 0; uint64_t attached_dev_name = 0; int dev_name_set = 0; uint32_t min_plr = 0, max_plr = 0; uint32_t pp_timeout_val = 0; int slumber_partial = 0; int set_pp_timeout_val = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': case 'A': case 's': case 'S': { int enable = -1; if (strcasecmp(optarg, "enable") == 0) enable = 1; else if (strcasecmp(optarg, "disable") == 0) enable = 2; else { warnx("%s: Invalid argument %s", __func__, optarg); retval = 1; goto bailout; } switch (c) { case 's': slumber_partial |= enable << SMP_PC_SAS_SLUMBER_SHIFT; break; case 'S': slumber_partial |= enable << SMP_PC_SAS_PARTIAL_SHIFT; break; case 'a': slumber_partial |= enable << SMP_PC_SATA_SLUMBER_SHIFT; break; case 'A': slumber_partial |= enable << SMP_PC_SATA_PARTIAL_SHIFT; break; default: warnx("%s: programmer error", __func__); retval = 1; goto bailout; break; /*NOTREACHED*/ } break; } case 'd': attached_dev_name = (uintmax_t)strtoumax(optarg, NULL,0); dev_name_set = 1; break; case 'l': long_response = 1; break; case 'm': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ min_plr = strtoul(optarg, NULL, 0); if ((min_plr == 0) || (min_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, min_plr); retval = 1; goto bailout; } break; case 'M': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ max_plr = strtoul(optarg, NULL, 0); if ((max_plr == 0) || (max_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, max_plr); retval = 1; goto bailout; } break; case 'o': { camcontrol_optret optreturn; cam_argmask argnums; const char *subopt; if (phy_op_set != 0) { warnx("%s: only one phy operation argument " "(-o) allowed", __func__); retval = 1; goto bailout; } phy_op_set = 1; /* * Allow the user to specify the phy operation * numerically, as well as with a name. This will * future-proof it a bit, so options that are added * in future specs can be used. */ if (isdigit(optarg[0])) { phy_operation = strtoul(optarg, NULL, 0); if ((phy_operation == 0) || (phy_operation > 0xff)) { warnx("%s: invalid phy operation %#x", __func__, phy_operation); retval = 1; goto bailout; } break; } optreturn = getoption(phy_ops, optarg, &phy_operation, &argnums, &subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous option %s", __func__, optarg); usage(0); retval = 1; goto bailout; } else if (optreturn == CC_OR_NOT_FOUND) { warnx("%s: option %s not found", __func__, optarg); usage(0); retval = 1; goto bailout; } break; } case 'p': phy = atoi(optarg); break; case 'T': pp_timeout_val = strtoul(optarg, NULL, 0); if (pp_timeout_val > 15) { warnx("%s: invalid partial pathway timeout " "value %u, need a value less than 16", __func__, pp_timeout_val); retval = 1; goto bailout; } set_pp_timeout_val = 1; break; default: break; } } if (phy == -1) { warnx("%s: a PHY (-p phy) argument is required",__func__); retval = 1; goto bailout; } if (((dev_name_set != 0) && (phy_operation != SMP_PC_PHY_OP_SET_ATT_DEV_NAME)) || ((phy_operation == SMP_PC_PHY_OP_SET_ATT_DEV_NAME) && (dev_name_set == 0))) { warnx("%s: -d name and -o setdevname arguments both " "required to set device name", __func__); retval = 1; goto bailout; } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } smp_phy_control(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, sizeof(*request), (uint8_t *)response, sizeof(*response), long_response, /*expected_exp_change_count*/ 0, phy, phy_operation, (set_pp_timeout_val != 0) ? 1 : 0, attached_dev_name, min_plr, max_plr, slumber_partial, pp_timeout_val, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { /* * Use CAM_EPF_NORMAL so we only get one line of * SMP command decoding. */ cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_NORMAL, stderr); } retval = 1; goto bailout; } /* XXX KDM print out something here for success? */ bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); return (retval); } static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_manuf_info_request request; struct smp_report_manuf_info_response response; struct sbuf *sb = NULL; int long_response = 0; int retval = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } bzero(&request, sizeof(request)); bzero(&response, sizeof(response)); smp_report_manuf_info(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, &request, sizeof(request), (uint8_t *)&response, sizeof(response), long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_manuf_info_sbuf(&response, sizeof(response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (sb != NULL) sbuf_delete(sb); return (retval); } static int getdevid(struct cam_devitem *item) { int retval = 0; union ccb *ccb = NULL; struct cam_device *dev; dev = cam_open_btl(item->dev_match.path_id, item->dev_match.target_id, item->dev_match.target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); retval = 1; goto bailout; } item->device_id_len = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); retval = 1; goto bailout; } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); /* * On the first try, we just probe for the size of the data, and * then allocate that much memory and try again. */ retry: ccb->ccb_h.func_code = XPT_DEV_ADVINFO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->cdai.flags = CDAI_FLAG_NONE; ccb->cdai.buftype = CDAI_TYPE_SCSI_DEVID; ccb->cdai.bufsiz = item->device_id_len; if (item->device_id_len != 0) ccb->cdai.buf = (uint8_t *)item->device_id; if (cam_send_ccb(dev, ccb) < 0) { warn("%s: error sending XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } if (ccb->ccb_h.status != CAM_REQ_CMP) { warnx("%s: CAM status %#x", __func__, ccb->ccb_h.status); retval = 1; goto bailout; } if (item->device_id_len == 0) { /* * This is our first time through. Allocate the buffer, * and then go back to get the data. */ if (ccb->cdai.provsiz == 0) { warnx("%s: invalid .provsiz field returned with " "XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } item->device_id_len = ccb->cdai.provsiz; item->device_id = malloc(item->device_id_len); if (item->device_id == NULL) { warn("%s: unable to allocate %d bytes", __func__, item->device_id_len); retval = 1; goto bailout; } ccb->ccb_h.status = CAM_REQ_INPROG; goto retry; } bailout: if (dev != NULL) cam_close_device(dev); if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * XXX KDM merge this code with getdevtree()? */ static int buildbusdevlist(struct cam_devlist *devlist) { union ccb ccb; int bufsize, fd = -1; struct dev_match_pattern *patterns; struct cam_devitem *item = NULL; int skip_device = 0; int retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; ccb.cdm.num_patterns = 2; ccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern) * ccb.cdm.num_patterns; patterns = (struct dev_match_pattern *)malloc(ccb.cdm.pattern_buf_len); if (patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } ccb.cdm.patterns = patterns; bzero(patterns, ccb.cdm.pattern_buf_len); patterns[0].type = DEV_MATCH_DEVICE; patterns[0].pattern.device_pattern.flags = DEV_MATCH_PATH; patterns[0].pattern.device_pattern.path_id = devlist->path_id; patterns[1].type = DEV_MATCH_PERIPH; patterns[1].pattern.periph_pattern.flags = PERIPH_MATCH_PATH; patterns[1].pattern.periph_pattern.path_id = devlist->path_id; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); retval = 1; goto bailout; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; dev_result = &ccb.cdm.matches[i].result.device_result; if (dev_result->flags & DEV_RESULT_UNCONFIGURED) { skip_device = 1; break; } else skip_device = 0; item = malloc(sizeof(*item)); if (item == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*item)); retval = 1; goto bailout; } bzero(item, sizeof(*item)); bcopy(dev_result, &item->dev_match, sizeof(*dev_result)); STAILQ_INSERT_TAIL(&devlist->dev_queue, item, links); if (getdevid(item) != 0) { retval = 1; goto bailout; } break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (skip_device != 0) break; item->num_periphs++; item->periph_matches = realloc( item->periph_matches, item->num_periphs * sizeof(struct periph_match_result)); if (item->periph_matches == NULL) { warn("%s: error allocating periph " "list", __func__); retval = 1; goto bailout; } bcopy(periph_result, &item->periph_matches[ item->num_periphs - 1], sizeof(*periph_result)); break; } default: fprintf(stderr, "%s: unexpected match " "type %d\n", __func__, ccb.cdm.matches[i].type); retval = 1; goto bailout; break; /*NOTREACHED*/ } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); free(patterns); free(ccb.cdm.matches); if (retval != 0) freebusdevlist(devlist); return (retval); } static void freebusdevlist(struct cam_devlist *devlist) { struct cam_devitem *item, *item2; STAILQ_FOREACH_SAFE(item, &devlist->dev_queue, links, item2) { STAILQ_REMOVE(&devlist->dev_queue, item, cam_devitem, links); free(item->device_id); free(item->periph_matches); free(item); } } static struct cam_devitem * findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr) { struct cam_devitem *item; STAILQ_FOREACH(item, &devlist->dev_queue, links) { struct scsi_vpd_id_descriptor *idd; /* * XXX KDM look for LUN IDs as well? */ idd = scsi_get_devid(item->device_id, item->device_id_len, scsi_devid_is_sas_target); if (idd == NULL) continue; if (scsi_8btou64(idd->identifier) == sasaddr) return (item); } return (NULL); } static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { struct smp_report_general_request *rgrequest = NULL; struct smp_report_general_response *rgresponse = NULL; struct smp_discover_request *disrequest = NULL; struct smp_discover_response *disresponse = NULL; struct cam_devlist devlist; union ccb *ccb; int long_response = 0; int num_phys = 0; int quiet = 0; int retval; int i, c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); STAILQ_INIT(&devlist.dev_queue); rgrequest = malloc(sizeof(*rgrequest)); if (rgrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgrequest)); retval = 1; goto bailout; } rgresponse = malloc(sizeof(*rgresponse)); if (rgresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgresponse)); retval = 1; goto bailout; } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; case 'q': quiet = 1; break; default: break; } } smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, rgrequest, /*request_len*/ sizeof(*rgrequest), (uint8_t *)rgresponse, /*response_len*/ sizeof(*rgresponse), /*long_response*/ long_response, timeout); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } num_phys = rgresponse->num_phys; if (num_phys == 0) { if (quiet == 0) fprintf(stdout, "%s: No Phys reported\n", __func__); retval = 1; goto bailout; } devlist.path_id = device->path_id; retval = buildbusdevlist(&devlist); if (retval != 0) goto bailout; if (quiet == 0) { fprintf(stdout, "%d PHYs:\n", num_phys); fprintf(stdout, "PHY Attached SAS Address\n"); } disrequest = malloc(sizeof(*disrequest)); if (disrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disrequest)); retval = 1; goto bailout; } disresponse = malloc(sizeof(*disresponse)); if (disresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disresponse)); retval = 1; goto bailout; } for (i = 0; i < num_phys; i++) { struct cam_devitem *item; struct device_match_result *dev_match; char vendor[16], product[48], revision[16]; char tmpstr[256]; int j; bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; smp_discover(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, disrequest, sizeof(*disrequest), (uint8_t *)disresponse, sizeof(*disresponse), long_response, /*ignore_zone_group*/ 0, /*phy*/ i, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (disresponse->function_result != SMP_FR_PHY_VACANT))) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } if (disresponse->function_result == SMP_FR_PHY_VACANT) { if (quiet == 0) fprintf(stdout, "%3d \n", i); continue; } if (disresponse->attached_device == SMP_DIS_AD_TYPE_NONE) { item = NULL; } else { item = findsasdevice(&devlist, scsi_8btou64(disresponse->attached_sas_address)); } if ((quiet == 0) || (item != NULL)) { fprintf(stdout, "%3d 0x%016jx", i, (uintmax_t)scsi_8btou64( disresponse->attached_sas_address)); if (item == NULL) { fprintf(stdout, "\n"); continue; } } else if (quiet != 0) continue; dev_match = &item->dev_match; if (dev_match->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_match->inq_data.vendor, sizeof(dev_match->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_match->inq_data.product, sizeof(dev_match->inq_data.product), sizeof(product)); cam_strvis(revision, dev_match->inq_data.revision, sizeof(dev_match->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if ((dev_match->protocol == PROTO_ATA) || (dev_match->protocol == PROTO_SATAPM)) { cam_strvis(product, dev_match->ident_data.model, sizeof(dev_match->ident_data.model), sizeof(product)); cam_strvis(revision, dev_match->ident_data.revision, sizeof(dev_match->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else { sprintf(tmpstr, "<>"); } fprintf(stdout, " %-33s ", tmpstr); /* * If we have 0 periphs, that's a bug... */ if (item->num_periphs == 0) { fprintf(stdout, "\n"); continue; } fprintf(stdout, "("); for (j = 0; j < item->num_periphs; j++) { if (j > 0) fprintf(stdout, ","); fprintf(stdout, "%s%d", item->periph_matches[j].periph_name, item->periph_matches[j].unit_number); } fprintf(stdout, ")\n"); } bailout: if (ccb != NULL) cam_freeccb(ccb); free(rgrequest); free(rgresponse); free(disrequest); free(disresponse); freebusdevlist(&devlist); return (retval); } static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int t = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 't': t = atoi(optarg); break; default: break; } } if (strcmp(argv[1], "idle") == 0) { if (t == -1) cmd = ATA_IDLE_IMMEDIATE; else cmd = ATA_IDLE_CMD; } else if (strcmp(argv[1], "standby") == 0) { if (t == -1) cmd = ATA_STANDBY_IMMEDIATE; else cmd = ATA_STANDBY_CMD; } else { cmd = ATA_SLEEP; t = -1; } if (t < 0) sc = 0; else if (t <= (240 * 5)) sc = (t + 4) / 5; else if (t <= (252 * 5)) /* special encoding for 21 minutes */ sc = 252; else if (t <= (11 * 30 * 60)) sc = (t - 1) / (30 * 60) + 241; else sc = 253; retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } static int ataaxm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int l = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': l = atoi(optarg); break; default: break; } } sc = 0; if (strcmp(argv[1], "apm") == 0) { if (l == -1) cmd = 0x85; else { cmd = 0x05; sc = l; } } else /* aam */ { if (l == -1) cmd = 0xC2; else { cmd = 0x42; sc = l; } } retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SETFEATURES, /*features*/cmd, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } int scsigetopcodes(struct cam_device *device, int opcode_set, int opcode, int show_sa_errors, int sa_set, int service_action, int timeout_desc, int retry_count, int timeout, int verbosemode, uint32_t *fill_len, uint8_t **data_ptr) { union ccb *ccb = NULL; uint8_t *buf = NULL; uint32_t alloc_len = 0, num_opcodes; uint32_t valid_len = 0; uint32_t avail_len = 0; struct scsi_report_supported_opcodes_all *all_hdr; struct scsi_report_supported_opcodes_one *one; int options = 0; int retval = 0; /* * Make it clear that we haven't yet allocated or filled anything. */ *fill_len = 0; *data_ptr = NULL; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); if (opcode_set != 0) { options |= RSO_OPTIONS_OC; num_opcodes = 1; alloc_len = sizeof(*one) + CAM_MAX_CDBLEN; } else { num_opcodes = 256; alloc_len = sizeof(*all_hdr) + (num_opcodes * sizeof(struct scsi_report_supported_opcodes_descr)); } if (timeout_desc != 0) { options |= RSO_RCTD; alloc_len += num_opcodes * sizeof(struct scsi_report_supported_opcodes_timeout); } if (sa_set != 0) { options |= RSO_OPTIONS_OC_SA; if (show_sa_errors != 0) options &= ~RSO_OPTIONS_OC; } retry_alloc: if (buf != NULL) { free(buf); buf = NULL; } buf = malloc(alloc_len); if (buf == NULL) { warn("Unable to allocate %u bytes", alloc_len); retval = 1; goto bailout; } bzero(buf, alloc_len); scsi_report_supported_opcodes(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*options*/ options, /*req_opcode*/ opcode, /*req_service_action*/ service_action, /*data_ptr*/ buf, /*dxfer_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 10000); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending REPORT SUPPORTED OPERATION CODES"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (((options & RSO_OPTIONS_MASK) == RSO_OPTIONS_ALL) && (valid_len >= sizeof(*all_hdr))) { all_hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(all_hdr->length) + sizeof(*all_hdr); } else if (((options & RSO_OPTIONS_MASK) != RSO_OPTIONS_ALL) && (valid_len >= sizeof(*one))) { uint32_t cdb_length; one = (struct scsi_report_supported_opcodes_one *)buf; cdb_length = scsi_2btoul(one->cdb_length); avail_len = sizeof(*one) + cdb_length; if (one->support & RSO_ONE_CTDP) { struct scsi_report_supported_opcodes_timeout *td; td = (struct scsi_report_supported_opcodes_timeout *) &buf[avail_len]; if (valid_len >= (avail_len + sizeof(td->length))) { avail_len += scsi_2btoul(td->length) + sizeof(td->length); } else { avail_len += sizeof(*td); } } } /* * avail_len could be zero if we didn't get enough data back from * thet target to determine */ if ((avail_len != 0) && (avail_len > valid_len)) { alloc_len = avail_len; goto retry_alloc; } *fill_len = valid_len; *data_ptr = buf; bailout: if (retval != 0) free(buf); cam_freeccb(ccb); return (retval); } static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint32_t cdb_len = 0, td_len = 0; const char *op_desc = NULL; unsigned int i; int retval = 0; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If we don't have the full single opcode descriptor, no point in * continuing. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_length)) { warnx("Only %u bytes returned, not enough to verify support", valid_len); retval = 1; goto bailout; } op_desc = scsi_op_desc(req_opcode, &device->inq_data); printf("%s (0x%02x)", op_desc != NULL ? op_desc : "UNKNOWN", req_opcode); if (sa_set != 0) printf(", SA 0x%x", req_sa); printf(": "); switch (one->support & RSO_ONE_SUP_MASK) { case RSO_ONE_SUP_UNAVAIL: printf("No command support information currently available\n"); break; case RSO_ONE_SUP_NOT_SUP: printf("Command not supported\n"); retval = 1; goto bailout; break; /*NOTREACHED*/ case RSO_ONE_SUP_AVAIL: printf("Command is supported, complies with a SCSI standard\n"); break; case RSO_ONE_SUP_VENDOR: printf("Command is supported, vendor-specific " "implementation\n"); break; default: printf("Unknown command support flags 0x%#x\n", one->support & RSO_ONE_SUP_MASK); break; } /* * If we don't have the CDB length, it isn't exactly an error, the * command probably isn't supported. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_usage)) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); /* * If our valid data doesn't include the full reported length, * return. The caller should have detected this and adjusted his * allocation length to get all of the available data. */ if (valid_len < sizeof(*one) + cdb_len) { retval = 1; goto bailout; } /* * If all we have is the opcode, there is no point in printing out * the usage bitmap. */ if (cdb_len <= 1) { retval = 1; goto bailout; } printf("CDB usage bitmap:"); for (i = 0; i < cdb_len; i++) { printf(" %02x", one->cdb_usage[i]); } printf("\n"); /* * If we don't have a timeout descriptor, we're done. */ if ((one->support & RSO_ONE_CTDP) == 0) goto bailout; /* * If we don't have enough valid length to include the timeout * descriptor length, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + sizeof(td->length))) goto bailout; td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); /* * If we don't have the full timeout descriptor, we're done. */ if (td_len < sizeof(*td)) goto bailout; /* * If we don't have enough valid length to contain the full timeout * descriptor, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + td_len)) goto bailout; printf("Timeout information:\n"); printf("Command-specific: 0x%02x\n", td->cmd_specific); printf("Nominal timeout: %u seconds\n", scsi_4btoul(td->nominal_time)); printf("Recommended timeout: %u seconds\n", scsi_4btoul(td->recommended_time)); bailout: return (retval); } static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_all *hdr; struct scsi_report_supported_opcodes_descr *desc; uint32_t avail_len = 0, used_len = 0; uint8_t *cur_ptr; int retval = 0; if (valid_len < sizeof(*hdr)) { warnx("%s: not enough returned data (%u bytes) opcode list", __func__, valid_len); retval = 1; goto bailout; } hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(hdr->length); avail_len += sizeof(hdr->length); /* * Take the lesser of the amount of data the drive claims is * available, and the amount of data the HBA says was returned. */ avail_len = MIN(avail_len, valid_len); used_len = sizeof(hdr->length); printf("%-6s %4s %8s ", "Opcode", "SA", "CDB len" ); if (td_req != 0) printf("%5s %6s %6s ", "CS", "Nom", "Rec"); printf(" Description\n"); while ((avail_len - used_len) > sizeof(*desc)) { struct scsi_report_supported_opcodes_timeout *td; uint32_t td_len; const char *op_desc = NULL; cur_ptr = &buf[used_len]; desc = (struct scsi_report_supported_opcodes_descr *)cur_ptr; op_desc = scsi_op_desc(desc->opcode, &device->inq_data); if (op_desc == NULL) op_desc = "UNKNOWN"; printf("0x%02x %#4x %8u ", desc->opcode, scsi_2btoul(desc->service_action), scsi_2btoul(desc->cdb_length)); used_len += sizeof(*desc); if ((desc->flags & RSO_CTDP) == 0) { printf(" %s\n", op_desc); continue; } /* * If we don't have enough space to fit a timeout * descriptor, then we're done. */ if (avail_len - used_len < sizeof(*td)) { used_len = avail_len; printf(" %s\n", op_desc); continue; } cur_ptr = &buf[used_len]; td = (struct scsi_report_supported_opcodes_timeout *)cur_ptr; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); used_len += td_len; /* * If the given timeout descriptor length is less than what * we understand, skip it. */ if (td_len < sizeof(*td)) { printf(" %s\n", op_desc); continue; } printf(" 0x%02x %6u %6u %s\n", td->cmd_specific, scsi_4btoul(td->nominal_time), scsi_4btoul(td->recommended_time), op_desc); } bailout: return (retval); } static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbosemode) { int c; uint32_t opcode = 0, service_action = 0; int td_set = 0, opcode_set = 0, sa_set = 0; int show_sa_errors = 1; uint32_t valid_len = 0; uint8_t *buf = NULL; char *endptr; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': show_sa_errors = 0; break; case 'o': opcode = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid opcode \"%s\", must be a number", optarg); retval = 1; goto bailout; } if (opcode > 0xff) { warnx("Invalid opcode 0x%#x, must be between" "0 and 0xff inclusive", opcode); retval = 1; goto bailout; } opcode_set = 1; break; case 's': service_action = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid service action \"%s\", must " "be a number", optarg); retval = 1; goto bailout; } if (service_action > 0xffff) { warnx("Invalid service action 0x%#x, must " "be between 0 and 0xffff inclusive", service_action); retval = 1; } sa_set = 1; break; case 'T': td_set = 1; break; default: break; } } if ((sa_set != 0) && (opcode_set == 0)) { warnx("You must specify an opcode with -o if a service " "action is given"); retval = 1; goto bailout; } retval = scsigetopcodes(device, opcode_set, opcode, show_sa_errors, sa_set, service_action, td_set, retry_count, timeout, verbosemode, &valid_len, &buf); if (retval != 0) goto bailout; if ((opcode_set != 0) || (sa_set != 0)) { retval = scsiprintoneopcode(device, opcode, sa_set, service_action, buf, valid_len); } else { retval = scsiprintopcodes(device, td_set, buf, valid_len); } bailout: free(buf); return (retval); } #endif /* MINIMALISTIC */ +static int +scsireprobe(struct cam_device *device) +{ + union ccb *ccb; + int retval = 0; + + ccb = cam_getccb(device); + + if (ccb == NULL) { + warnx("%s: error allocating ccb", __func__); + return (1); + } + + bzero(&(&ccb->ccb_h)[1], + sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); + + ccb->ccb_h.func_code = XPT_REPROBE_LUN; + + if (cam_send_ccb(device, ccb) < 0) { + warn("error sending XPT_REPROBE_LUN CCB"); + retval = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); + retval = 1; + goto bailout; + } + +bailout: + cam_freeccb(ccb); + + return (retval); +} + void usage(int printlong) { fprintf(printlong ? stdout : stderr, "usage: camcontrol [device id][generic args][command args]\n" " camcontrol devlist [-b] [-v]\n" #ifndef MINIMALISTIC " camcontrol periphlist [dev_id][-n dev_name] [-u unit]\n" " camcontrol tur [dev_id][generic args]\n" " camcontrol inquiry [dev_id][generic args] [-D] [-S] [-R]\n" " camcontrol identify [dev_id][generic args] [-v]\n" " camcontrol reportluns [dev_id][generic args] [-c] [-l] [-r report]\n" " camcontrol readcap [dev_id][generic args] [-b] [-h] [-H] [-N]\n" " [-q] [-s]\n" " camcontrol start [dev_id][generic args]\n" " camcontrol stop [dev_id][generic args]\n" " camcontrol load [dev_id][generic args]\n" " camcontrol eject [dev_id][generic args]\n" +" camcontrol reprobe [dev_id][generic args]\n" #endif /* MINIMALISTIC */ " camcontrol rescan \n" " camcontrol reset \n" #ifndef MINIMALISTIC " camcontrol defects [dev_id][generic args] <-f format> [-P][-G]\n" " [-q][-s][-S offset][-X]\n" " camcontrol modepage [dev_id][generic args] <-m page | -l>\n" " [-P pagectl][-e | -b][-d]\n" " camcontrol cmd [dev_id][generic args]\n" " <-a cmd [args] | -c cmd [args]>\n" " [-d] [-f] [-i len fmt|-o len fmt [args]] [-r fmt]\n" " camcontrol smpcmd [dev_id][generic args]\n" " <-r len fmt [args]> <-R len fmt [args]>\n" " camcontrol smprg [dev_id][generic args][-l]\n" " camcontrol smppc [dev_id][generic args] <-p phy> [-l]\n" " [-o operation][-d name][-m rate][-M rate]\n" " [-T pp_timeout][-a enable|disable]\n" " [-A enable|disable][-s enable|disable]\n" " [-S enable|disable]\n" " camcontrol smpphylist [dev_id][generic args][-l][-q]\n" " camcontrol smpmaninfo [dev_id][generic args][-l]\n" " camcontrol debug [-I][-P][-T][-S][-X][-c]\n" " \n" " camcontrol tags [dev_id][generic args] [-N tags] [-q] [-v]\n" " camcontrol negotiate [dev_id][generic args] [-a][-c]\n" " [-D ][-M mode][-O offset]\n" " [-q][-R syncrate][-v][-T ]\n" " [-U][-W bus_width]\n" " camcontrol format [dev_id][generic args][-q][-r][-w][-y]\n" " camcontrol sanitize [dev_id][generic args]\n" " [-a overwrite|block|crypto|exitfailure]\n" " [-c passes][-I][-P pattern][-q][-U][-r][-w]\n" " [-y]\n" " camcontrol idle [dev_id][generic args][-t time]\n" " camcontrol standby [dev_id][generic args][-t time]\n" " camcontrol sleep [dev_id][generic args]\n" " camcontrol apm [dev_id][generic args][-l level]\n" " camcontrol aam [dev_id][generic args][-l level]\n" " camcontrol fwdownload [dev_id][generic args] <-f fw_image> [-q]\n" " [-s][-y]\n" " camcontrol security [dev_id][generic args]\n" " <-d pwd | -e pwd | -f | -h pwd | -k pwd>\n" " [-l ] [-q] [-s pwd] [-T timeout]\n" " [-U ] [-y]\n" " camcontrol hpa [dev_id][generic args] [-f] [-l] [-P] [-p pwd]\n" " [-q] [-s max_sectors] [-U pwd] [-y]\n" " camcontrol persist [dev_id][generic args] <-i action|-o action>\n" " [-a][-I tid][-k key][-K sa_key][-p][-R rtp]\n" " [-s scope][-S][-T type][-U]\n" " camcontrol attrib [dev_id][generic args] <-r action|-w attr>\n" " [-a attr_num][-c][-e elem][-F form1,form1]\n" " [-p part][-s start][-T type][-V vol]\n" " camcontrol opcodes [dev_id][generic args][-o opcode][-s SA]\n" " [-N][-T]\n" #endif /* MINIMALISTIC */ " camcontrol help\n"); if (!printlong) return; #ifndef MINIMALISTIC fprintf(stdout, "Specify one of the following options:\n" "devlist list all CAM devices\n" "periphlist list all CAM peripheral drivers attached to a device\n" "tur send a test unit ready to the named device\n" "inquiry send a SCSI inquiry command to the named device\n" "identify send a ATA identify command to the named device\n" "reportluns send a SCSI report luns command to the device\n" "readcap send a SCSI read capacity command to the device\n" "start send a Start Unit command to the device\n" "stop send a Stop Unit command to the device\n" "load send a Start Unit command to the device with the load bit set\n" "eject send a Stop Unit command to the device with the eject bit set\n" +"reprobe update capacity information of the given device\n" "rescan rescan all busses, the given bus, or bus:target:lun\n" "reset reset all busses, the given bus, or bus:target:lun\n" "defects read the defect list of the specified device\n" "modepage display or edit (-e) the given mode page\n" "cmd send the given SCSI command, may need -i or -o as well\n" "smpcmd send the given SMP command, requires -o and -i\n" "smprg send the SMP Report General command\n" "smppc send the SMP PHY Control command, requires -p\n" "smpphylist display phys attached to a SAS expander\n" "smpmaninfo send the SMP Report Manufacturer Info command\n" "debug turn debugging on/off for a bus, target, or lun, or all devices\n" "tags report or set the number of transaction slots for a device\n" "negotiate report or set device negotiation parameters\n" "format send the SCSI FORMAT UNIT command to the named device\n" "sanitize send the SCSI SANITIZE command to the named device\n" "idle send the ATA IDLE command to the named device\n" "standby send the ATA STANDBY command to the named device\n" "sleep send the ATA SLEEP command to the named device\n" "fwdownload program firmware of the named device with the given image\n" "security report or send ATA security commands to the named device\n" "persist send the SCSI PERSISTENT RESERVE IN or OUT commands\n" "attrib send the SCSI READ or WRITE ATTRIBUTE commands\n" "opcodes send the SCSI REPORT SUPPORTED OPCODES command\n" "help this message\n" "Device Identifiers:\n" "bus:target specify the bus and target, lun defaults to 0\n" "bus:target:lun specify the bus, target and lun\n" "deviceUNIT specify the device name, like \"da4\" or \"cd2\"\n" "Generic arguments:\n" "-v be verbose, print out sense information\n" "-t timeout command timeout in seconds, overrides default timeout\n" "-n dev_name specify device name, e.g. \"da\", \"cd\"\n" "-u unit specify unit number, e.g. \"0\", \"5\"\n" "-E have the kernel attempt to perform SCSI error recovery\n" "-C count specify the SCSI command retry count (needs -E to work)\n" "modepage arguments:\n" "-l list all available mode pages\n" "-m page specify the mode page to view or edit\n" "-e edit the specified mode page\n" "-b force view to binary mode\n" "-d disable block descriptors for mode sense\n" "-P pgctl page control field 0-3\n" "defects arguments:\n" "-f format specify defect list format (block, bfi or phys)\n" "-G get the grown defect list\n" "-P get the permanent defect list\n" "inquiry arguments:\n" "-D get the standard inquiry data\n" "-S get the serial number\n" "-R get the transfer rate, etc.\n" "reportluns arguments:\n" "-c only report a count of available LUNs\n" "-l only print out luns, and not a count\n" "-r specify \"default\", \"wellknown\" or \"all\"\n" "readcap arguments\n" "-b only report the blocksize\n" "-h human readable device size, base 2\n" "-H human readable device size, base 10\n" "-N print the number of blocks instead of last block\n" "-q quiet, print numbers only\n" "-s only report the last block/device size\n" "cmd arguments:\n" "-c cdb [args] specify the SCSI CDB\n" "-i len fmt specify input data and input data format\n" "-o len fmt [args] specify output data and output data fmt\n" "smpcmd arguments:\n" "-r len fmt [args] specify the SMP command to be sent\n" "-R len fmt [args] specify SMP response format\n" "smprg arguments:\n" "-l specify the long response format\n" "smppc arguments:\n" "-p phy specify the PHY to operate on\n" "-l specify the long request/response format\n" "-o operation specify the phy control operation\n" "-d name set the attached device name\n" "-m rate set the minimum physical link rate\n" "-M rate set the maximum physical link rate\n" "-T pp_timeout set the partial pathway timeout value\n" "-a enable|disable enable or disable SATA slumber\n" "-A enable|disable enable or disable SATA partial phy power\n" "-s enable|disable enable or disable SAS slumber\n" "-S enable|disable enable or disable SAS partial phy power\n" "smpphylist arguments:\n" "-l specify the long response format\n" "-q only print phys with attached devices\n" "smpmaninfo arguments:\n" "-l specify the long response format\n" "debug arguments:\n" "-I CAM_DEBUG_INFO -- scsi commands, errors, data\n" "-T CAM_DEBUG_TRACE -- routine flow tracking\n" "-S CAM_DEBUG_SUBTRACE -- internal routine command flow\n" "-c CAM_DEBUG_CDB -- print out SCSI CDBs only\n" "tags arguments:\n" "-N tags specify the number of tags to use for this device\n" "-q be quiet, don't report the number of tags\n" "-v report a number of tag-related parameters\n" "negotiate arguments:\n" "-a send a test unit ready after negotiation\n" "-c report/set current negotiation settings\n" "-D \"enable\" or \"disable\" disconnection\n" "-M mode set ATA mode\n" "-O offset set command delay offset\n" "-q be quiet, don't report anything\n" "-R syncrate synchronization rate in MHz\n" "-T \"enable\" or \"disable\" tagged queueing\n" "-U report/set user negotiation settings\n" "-W bus_width set the bus width in bits (8, 16 or 32)\n" "-v also print a Path Inquiry CCB for the controller\n" "format arguments:\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-w don't send immediate format command\n" "-y don't ask any questions\n" "sanitize arguments:\n" "-a operation operation mode: overwrite, block, crypto or exitfailure\n" "-c passes overwrite passes to perform (1 to 31)\n" "-I invert overwrite pattern after each pass\n" "-P pattern path to overwrite pattern file\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-U run operation in unrestricted completion exit mode\n" "-w don't send immediate sanitize command\n" "-y don't ask any questions\n" "idle/standby arguments:\n" "-t number of seconds before respective state.\n" "fwdownload arguments:\n" "-f fw_image path to firmware image file\n" "-q don't print informational messages, only errors\n" "-s run in simulation mode\n" "-v print info for every firmware segment sent to device\n" "-y don't ask any questions\n" "security arguments:\n" "-d pwd disable security using the given password for the selected\n" " user\n" "-e pwd erase the device using the given pwd for the selected user\n" "-f freeze the security configuration of the specified device\n" "-h pwd enhanced erase the device using the given pwd for the\n" " selected user\n" "-k pwd unlock the device using the given pwd for the selected\n" " user\n" "-l specifies which security level to set: high or maximum\n" "-q be quiet, do not print any status messages\n" "-s pwd password the device (enable security) using the given\n" " pwd for the selected user\n" "-T timeout overrides the timeout (seconds) used for erase operation\n" "-U specifies which user to set: user or master\n" "-y don't ask any questions\n" "hpa arguments:\n" "-f freeze the HPA configuration of the device\n" "-l lock the HPA configuration of the device\n" "-P make the HPA max sectors persist\n" "-p pwd Set the HPA configuration password required for unlock\n" " calls\n" "-q be quiet, do not print any status messages\n" "-s sectors configures the maximum user accessible sectors of the\n" " device\n" "-U pwd unlock the HPA configuration of the device\n" "-y don't ask any questions\n" "persist arguments:\n" "-i action specify read_keys, read_reservation, report_cap, or\n" " read_full_status\n" "-o action specify register, register_ignore, reserve, release,\n" " clear, preempt, preempt_abort, register_move, replace_lost\n" "-a set the All Target Ports (ALL_TG_PT) bit\n" "-I tid specify a Transport ID, e.g.: sas,0x1234567812345678\n" "-k key specify the Reservation Key\n" "-K sa_key specify the Service Action Reservation Key\n" "-p set the Activate Persist Through Power Loss bit\n" "-R rtp specify the Relative Target Port\n" "-s scope specify the scope: lun, extent, element or a number\n" "-S specify Transport ID for register, requires -I\n" "-T res_type specify the reservation type: read_shared, wr_ex, rd_ex,\n" " ex_ac, wr_ex_ro, ex_ac_ro, wr_ex_ar, ex_ac_ar\n" "-U unregister the current initiator for register_move\n" "attrib arguments:\n" "-r action specify attr_values, attr_list, lv_list, part_list, or\n" " supp_attr\n" "-w attr specify an attribute to write, one -w argument per attr\n" "-a attr_num only display this attribute number\n" "-c get cached attributes\n" "-e elem_addr request attributes for the given element in a changer\n" "-F form1,form2 output format, comma separated list: text_esc, text_raw,\n" " nonascii_esc, nonascii_trim, nonascii_raw, field_all,\n" " field_none, field_desc, field_num, field_size, field_rw\n" "-p partition request attributes for the given partition\n" "-s start_attr request attributes starting at the given number\n" "-T elem_type specify the element type (used with -e)\n" "-V logical_vol specify the logical volume ID\n" "opcodes arguments:\n" "-o opcode specify the individual opcode to list\n" "-s service_action specify the service action for the opcode\n" "-N do not return SCSI error for unsupported SA\n" "-T request nominal and recommended timeout values\n" ); #endif /* MINIMALISTIC */ } int main(int argc, char **argv) { int c; char *device = NULL; int unit = 0; struct cam_device *cam_dev = NULL; int timeout = 0, retry_count = 1; camcontrol_optret optreturn; char *tstr; const char *mainopt = "C:En:t:u:v"; const char *subopt = NULL; char combinedopt[256]; int error = 0, optstart = 2; int devopen = 1; #ifndef MINIMALISTIC path_id_t bus; target_id_t target; lun_id_t lun; #endif /* MINIMALISTIC */ cmdlist = CAM_CMD_NONE; arglist = CAM_ARG_NONE; if (argc < 2) { usage(0); exit(1); } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &cmdlist, &arglist,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * For these options we do not parse optional device arguments and * we do not open a passthrough device. */ if ((cmdlist == CAM_CMD_RESCAN) || (cmdlist == CAM_CMD_RESET) || (cmdlist == CAM_CMD_DEVTREE) || (cmdlist == CAM_CMD_USAGE) || (cmdlist == CAM_CMD_DEBUG)) devopen = 0; #ifndef MINIMALISTIC if ((devopen == 1) && (argc > 2 && argv[2][0] != '-')) { char name[30]; int rv; if (isdigit(argv[2][0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(argv[2], &bus, &target, &lun, &arglist); if (rv < 2) errx(1, "numeric device specification must " "be either bus:target, or " "bus:target:lun"); /* default to 0 if lun was not specified */ if ((arglist & CAM_ARG_LUN) == 0) { lun = 0; arglist |= CAM_ARG_LUN; } optstart++; } else { if (cam_get_device(argv[2], name, sizeof name, &unit) == -1) errx(1, "%s", cam_errbuf); device = strdup(name); arglist |= CAM_ARG_DEVICE | CAM_ARG_UNIT; optstart++; } } #endif /* MINIMALISTIC */ /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch(c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAM_ARG_RETRIES; break; case 'E': arglist |= CAM_ARG_ERR_RECOVER; break; case 'n': arglist |= CAM_ARG_DEVICE; tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; device = (char *)strdup(tstr); break; case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAM_ARG_TIMEOUT; break; case 'u': arglist |= CAM_ARG_UNIT; unit = strtol(optarg, NULL, 0); break; case 'v': arglist |= CAM_ARG_VERBOSE; break; default: break; } } #ifndef MINIMALISTIC /* * For most commands we'll want to open the passthrough device * associated with the specified device. In the case of the rescan * commands, we don't use a passthrough device at all, just the * transport layer device. */ if (devopen == 1) { if (((arglist & (CAM_ARG_BUS|CAM_ARG_TARGET)) == 0) && (((arglist & CAM_ARG_DEVICE) == 0) || ((arglist & CAM_ARG_UNIT) == 0))) { errx(1, "subcommand \"%s\" requires a valid device " "identifier", argv[1]); } if ((cam_dev = ((arglist & (CAM_ARG_BUS | CAM_ARG_TARGET))? cam_open_btl(bus, target, lun, O_RDWR, NULL) : cam_open_spec_device(device,unit,O_RDWR,NULL))) == NULL) errx(1,"%s", cam_errbuf); } #endif /* MINIMALISTIC */ /* * Reset optind to 2, and reset getopt, so these routines can parse * the arguments again. */ optind = optstart; optreset = 1; switch(cmdlist) { #ifndef MINIMALISTIC case CAM_CMD_DEVLIST: error = getdevlist(cam_dev); break; case CAM_CMD_HPA: error = atahpa(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; #endif /* MINIMALISTIC */ case CAM_CMD_DEVTREE: error = getdevtree(argc, argv, combinedopt); break; #ifndef MINIMALISTIC case CAM_CMD_TUR: error = testunitready(cam_dev, retry_count, timeout, 0); break; case CAM_CMD_INQUIRY: error = scsidoinquiry(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDENTIFY: error = ataidentify(cam_dev, retry_count, timeout); break; case CAM_CMD_STARTSTOP: error = scsistart(cam_dev, arglist & CAM_ARG_START_UNIT, arglist & CAM_ARG_EJECT, retry_count, timeout); break; #endif /* MINIMALISTIC */ case CAM_CMD_RESCAN: error = dorescan_or_reset(argc, argv, 1); break; case CAM_CMD_RESET: error = dorescan_or_reset(argc, argv, 0); break; #ifndef MINIMALISTIC case CAM_CMD_READ_DEFECTS: error = readdefects(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_MODE_PAGE: modepage(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SCSI_CMD: error = scsicmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_CMD: error = smpcmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_RG: error = smpreportgeneral(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PC: error = smpphycontrol(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PHYLIST: error = smpphylist(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_MANINFO: error = smpmaninfo(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_DEBUG: error = camdebug(argc, argv, combinedopt); break; case CAM_CMD_TAG: error = tagcontrol(cam_dev, argc, argv, combinedopt); break; case CAM_CMD_RATE: error = ratecontrol(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_FORMAT: error = scsiformat(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_REPORTLUNS: error = scsireportluns(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_READCAP: error = scsireadcapacity(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDLE: case CAM_CMD_STANDBY: case CAM_CMD_SLEEP: error = atapm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_APM: case CAM_CMD_AAM: error = ataaxm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SECURITY: error = atasecurity(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_DOWNLOAD_FW: error = fwdownload(cam_dev, argc, argv, combinedopt, arglist & CAM_ARG_VERBOSE, retry_count, timeout); break; case CAM_CMD_SANITIZE: error = scsisanitize(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_PERSIST: error = scsipersist(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_ATTRIB: error = scsiattrib(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_OPCODES: error = scsiopcodes(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; + case CAM_CMD_REPROBE: + error = scsireprobe(cam_dev); + break; + #endif /* MINIMALISTIC */ case CAM_CMD_USAGE: usage(1); break; default: usage(0); error = 1; break; } if (cam_dev != NULL) cam_close_device(cam_dev); exit(error); } Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h (revision 299370) +++ head/sys/cam/cam_ccb.h (revision 299371) @@ -1,1353 +1,1355 @@ /*- * Data structures and definitions for CAM Control Blocks (CCBs). * * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAM_CAM_CCB_H #define _CAM_CAM_CCB_H 1 #include #include #include #include #ifndef _KERNEL #include #endif #include #include #include /* General allocation length definitions for CCB structures */ #define IOCDBLEN CAM_MAX_CDBLEN /* Space for CDB bytes/pointer */ #define VUHBALEN 14 /* Vendor Unique HBA length */ #define SIM_IDLEN 16 /* ASCII string len for SIM ID */ #define HBA_IDLEN 16 /* ASCII string len for HBA ID */ #define DEV_IDLEN 16 /* ASCII string len for device names */ #define CCB_PERIPH_PRIV_SIZE 2 /* size of peripheral private area */ #define CCB_SIM_PRIV_SIZE 2 /* size of sim private area */ /* Struct definitions for CAM control blocks */ /* Common CCB header */ /* CAM CCB flags */ typedef enum { CAM_CDB_POINTER = 0x00000001,/* The CDB field is a pointer */ CAM_QUEUE_ENABLE = 0x00000002,/* SIM queue actions are enabled */ CAM_CDB_LINKED = 0x00000004,/* CCB contains a linked CDB */ CAM_NEGOTIATE = 0x00000008,/* * Perform transport negotiation * with this command. */ CAM_DATA_ISPHYS = 0x00000010,/* Data type with physical addrs */ CAM_DIS_AUTOSENSE = 0x00000020,/* Disable autosense feature */ CAM_DIR_BOTH = 0x00000000,/* Data direction (00:IN/OUT) */ CAM_DIR_IN = 0x00000040,/* Data direction (01:DATA IN) */ CAM_DIR_OUT = 0x00000080,/* Data direction (10:DATA OUT) */ CAM_DIR_NONE = 0x000000C0,/* Data direction (11:no data) */ CAM_DIR_MASK = 0x000000C0,/* Data direction Mask */ CAM_DATA_VADDR = 0x00000000,/* Data type (000:Virtual) */ CAM_DATA_PADDR = 0x00000010,/* Data type (001:Physical) */ CAM_DATA_SG = 0x00040000,/* Data type (010:sglist) */ CAM_DATA_SG_PADDR = 0x00040010,/* Data type (011:sglist phys) */ CAM_DATA_BIO = 0x00200000,/* Data type (100:bio) */ CAM_DATA_MASK = 0x00240010,/* Data type mask */ CAM_SOFT_RST_OP = 0x00000100,/* Use Soft reset alternative */ CAM_ENG_SYNC = 0x00000200,/* Flush resid bytes on complete */ CAM_DEV_QFRZDIS = 0x00000400,/* Disable DEV Q freezing */ CAM_DEV_QFREEZE = 0x00000800,/* Freeze DEV Q on execution */ CAM_HIGH_POWER = 0x00001000,/* Command takes a lot of power */ CAM_SENSE_PTR = 0x00002000,/* Sense data is a pointer */ CAM_SENSE_PHYS = 0x00004000,/* Sense pointer is physical addr*/ CAM_TAG_ACTION_VALID = 0x00008000,/* Use the tag action in this ccb*/ CAM_PASS_ERR_RECOVER = 0x00010000,/* Pass driver does err. recovery*/ CAM_DIS_DISCONNECT = 0x00020000,/* Disable disconnect */ CAM_MSG_BUF_PHYS = 0x00080000,/* Message buffer ptr is physical*/ CAM_SNS_BUF_PHYS = 0x00100000,/* Autosense data ptr is physical*/ CAM_CDB_PHYS = 0x00400000,/* CDB poiner is physical */ CAM_ENG_SGLIST = 0x00800000,/* SG list is for the HBA engine */ /* Phase cognizant mode flags */ CAM_DIS_AUTOSRP = 0x01000000,/* Disable autosave/restore ptrs */ CAM_DIS_AUTODISC = 0x02000000,/* Disable auto disconnect */ CAM_TGT_CCB_AVAIL = 0x04000000,/* Target CCB available */ CAM_TGT_PHASE_MODE = 0x08000000,/* The SIM runs in phase mode */ CAM_MSGB_VALID = 0x10000000,/* Message buffer valid */ CAM_STATUS_VALID = 0x20000000,/* Status buffer valid */ CAM_DATAB_VALID = 0x40000000,/* Data buffer valid */ /* Host target Mode flags */ CAM_SEND_SENSE = 0x08000000,/* Send sense data with status */ CAM_TERM_IO = 0x10000000,/* Terminate I/O Message sup. */ CAM_DISCONNECT = 0x20000000,/* Disconnects are mandatory */ CAM_SEND_STATUS = 0x40000000,/* Send status after data phase */ CAM_UNLOCKED = 0x80000000 /* Call callback without lock. */ } ccb_flags; typedef enum { CAM_USER_DATA_ADDR = 0x00000002,/* Userspace data pointers */ CAM_SG_FORMAT_IOVEC = 0x00000004,/* iovec instead of busdma S/G*/ CAM_UNMAPPED_BUF = 0x00000008 /* use unmapped I/O */ } ccb_xflags; /* XPT Opcodes for xpt_action */ typedef enum { /* Function code flags are bits greater than 0xff */ XPT_FC_QUEUED = 0x100, /* Non-immediate function code */ XPT_FC_USER_CCB = 0x200, XPT_FC_XPT_ONLY = 0x400, /* Only for the transport layer device */ XPT_FC_DEV_QUEUED = 0x800 | XPT_FC_QUEUED, /* Passes through the device queues */ /* Common function commands: 0x00->0x0F */ XPT_NOOP = 0x00, /* Execute Nothing */ XPT_SCSI_IO = 0x01 | XPT_FC_DEV_QUEUED, /* Execute the requested I/O operation */ XPT_GDEV_TYPE = 0x02, /* Get type information for specified device */ XPT_GDEVLIST = 0x03, /* Get a list of peripheral devices */ XPT_PATH_INQ = 0x04, /* Path routing inquiry */ XPT_REL_SIMQ = 0x05, /* Release a frozen device queue */ XPT_SASYNC_CB = 0x06, /* Set Asynchronous Callback Parameters */ XPT_SDEV_TYPE = 0x07, /* Set device type information */ XPT_SCAN_BUS = 0x08 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* (Re)Scan the SCSI Bus */ XPT_DEV_MATCH = 0x09 | XPT_FC_XPT_ONLY, /* Get EDT entries matching the given pattern */ XPT_DEBUG = 0x0a, /* Turn on debugging for a bus, target or lun */ XPT_PATH_STATS = 0x0b, /* Path statistics (error counts, etc.) */ XPT_GDEV_STATS = 0x0c, /* Device statistics (error counts, etc.) */ XPT_DEV_ADVINFO = 0x0e, /* Get/Set Device advanced information */ XPT_ASYNC = 0x0f | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Asynchronous event */ /* SCSI Control Functions: 0x10->0x1F */ XPT_ABORT = 0x10, /* Abort the specified CCB */ XPT_RESET_BUS = 0x11 | XPT_FC_XPT_ONLY, /* Reset the specified SCSI bus */ XPT_RESET_DEV = 0x12 | XPT_FC_DEV_QUEUED, /* Bus Device Reset the specified SCSI device */ XPT_TERM_IO = 0x13, /* Terminate the I/O process */ XPT_SCAN_LUN = 0x14 | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Logical Unit */ XPT_GET_TRAN_SETTINGS = 0x15, /* * Get default/user transfer settings * for the target */ XPT_SET_TRAN_SETTINGS = 0x16, /* * Set transfer rate/width * negotiation settings */ XPT_CALC_GEOMETRY = 0x17, /* * Calculate the geometry parameters for * a device give the sector size and * volume size. */ XPT_ATA_IO = 0x18 | XPT_FC_DEV_QUEUED, /* Execute the requested ATA I/O operation */ XPT_GET_SIM_KNOB_OLD = 0x18, /* Compat only */ XPT_SET_SIM_KNOB = 0x19, /* * Set SIM specific knob values. */ XPT_GET_SIM_KNOB = 0x1a, /* * Get SIM specific knob values. */ XPT_SMP_IO = 0x1b | XPT_FC_DEV_QUEUED, /* Serial Management Protocol */ XPT_SCAN_TGT = 0x1E | XPT_FC_QUEUED | XPT_FC_USER_CCB | XPT_FC_XPT_ONLY, /* Scan Target */ /* HBA engine commands 0x20->0x2F */ XPT_ENG_INQ = 0x20 | XPT_FC_XPT_ONLY, /* HBA engine feature inquiry */ XPT_ENG_EXEC = 0x21 | XPT_FC_DEV_QUEUED, /* HBA execute engine request */ /* Target mode commands: 0x30->0x3F */ XPT_EN_LUN = 0x30, /* Enable LUN as a target */ XPT_TARGET_IO = 0x31 | XPT_FC_DEV_QUEUED, /* Execute target I/O request */ XPT_ACCEPT_TARGET_IO = 0x32 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Accept Host Target Mode CDB */ XPT_CONT_TARGET_IO = 0x33 | XPT_FC_DEV_QUEUED, /* Continue Host Target I/O Connection */ XPT_IMMED_NOTIFY = 0x34 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event (obsolete) */ XPT_NOTIFY_ACK = 0x35, /* Acknowledgement of event (obsolete) */ XPT_IMMEDIATE_NOTIFY = 0x36 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Notify Host Target driver of event */ XPT_NOTIFY_ACKNOWLEDGE = 0x37 | XPT_FC_QUEUED | XPT_FC_USER_CCB, /* Acknowledgement of event */ + XPT_REPROBE_LUN = 0x38 | XPT_FC_QUEUED | XPT_FC_USER_CCB, + /* Query device capacity and notify GEOM */ /* Vendor Unique codes: 0x80->0x8F */ XPT_VUNIQUE = 0x80 } xpt_opcode; #define XPT_FC_GROUP_MASK 0xF0 #define XPT_FC_GROUP(op) ((op) & XPT_FC_GROUP_MASK) #define XPT_FC_GROUP_COMMON 0x00 #define XPT_FC_GROUP_SCSI_CONTROL 0x10 #define XPT_FC_GROUP_HBA_ENGINE 0x20 #define XPT_FC_GROUP_TMODE 0x30 #define XPT_FC_GROUP_VENDOR_UNIQUE 0x80 #define XPT_FC_IS_DEV_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_DEV_QUEUED) == XPT_FC_DEV_QUEUED) #define XPT_FC_IS_QUEUED(ccb) \ (((ccb)->ccb_h.func_code & XPT_FC_QUEUED) != 0) typedef enum { PROTO_UNKNOWN, PROTO_UNSPECIFIED, PROTO_SCSI, /* Small Computer System Interface */ PROTO_ATA, /* AT Attachment */ PROTO_ATAPI, /* AT Attachment Packetized Interface */ PROTO_SATAPM, /* SATA Port Multiplier */ PROTO_SEMB, /* SATA Enclosure Management Bridge */ } cam_proto; typedef enum { XPORT_UNKNOWN, XPORT_UNSPECIFIED, XPORT_SPI, /* SCSI Parallel Interface */ XPORT_FC, /* Fiber Channel */ XPORT_SSA, /* Serial Storage Architecture */ XPORT_USB, /* Universal Serial Bus */ XPORT_PPB, /* Parallel Port Bus */ XPORT_ATA, /* AT Attachment */ XPORT_SAS, /* Serial Attached SCSI */ XPORT_SATA, /* Serial AT Attachment */ XPORT_ISCSI, /* iSCSI */ XPORT_SRP, /* SCSI RDMA Protocol */ } cam_xport; #define XPORT_IS_ATA(t) ((t) == XPORT_ATA || (t) == XPORT_SATA) #define XPORT_IS_SCSI(t) ((t) != XPORT_UNKNOWN && \ (t) != XPORT_UNSPECIFIED && \ !XPORT_IS_ATA(t)) #define XPORT_DEVSTAT_TYPE(t) (XPORT_IS_ATA(t) ? DEVSTAT_TYPE_IF_IDE : \ XPORT_IS_SCSI(t) ? DEVSTAT_TYPE_IF_SCSI : \ DEVSTAT_TYPE_IF_OTHER) #define PROTO_VERSION_UNKNOWN (UINT_MAX - 1) #define PROTO_VERSION_UNSPECIFIED UINT_MAX #define XPORT_VERSION_UNKNOWN (UINT_MAX - 1) #define XPORT_VERSION_UNSPECIFIED UINT_MAX typedef union { LIST_ENTRY(ccb_hdr) le; SLIST_ENTRY(ccb_hdr) sle; TAILQ_ENTRY(ccb_hdr) tqe; STAILQ_ENTRY(ccb_hdr) stqe; } camq_entry; typedef union { void *ptr; u_long field; u_int8_t bytes[sizeof(uintptr_t)]; } ccb_priv_entry; typedef union { ccb_priv_entry entries[CCB_PERIPH_PRIV_SIZE]; u_int8_t bytes[CCB_PERIPH_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_ppriv_area; typedef union { ccb_priv_entry entries[CCB_SIM_PRIV_SIZE]; u_int8_t bytes[CCB_SIM_PRIV_SIZE * sizeof(ccb_priv_entry)]; } ccb_spriv_area; typedef struct { struct timeval *etime; uintptr_t sim_data; uintptr_t periph_data; } ccb_qos_area; struct ccb_hdr { cam_pinfo pinfo; /* Info for priority scheduling */ camq_entry xpt_links; /* For chaining in the XPT layer */ camq_entry sim_links; /* For chaining in the SIM layer */ camq_entry periph_links; /* For chaining in the type driver */ u_int32_t retry_count; void (*cbfcnp)(struct cam_periph *, union ccb *); /* Callback on completion function */ xpt_opcode func_code; /* XPT function code */ u_int32_t status; /* Status returned by CAM subsystem */ struct cam_path *path; /* Compiled path for this ccb */ path_id_t path_id; /* Path ID for the request */ target_id_t target_id; /* Target device ID */ lun_id_t target_lun; /* Target LUN number */ u_int32_t flags; /* ccb_flags */ u_int32_t xflags; /* Extended flags */ ccb_ppriv_area periph_priv; ccb_spriv_area sim_priv; ccb_qos_area qos; u_int32_t timeout; /* Hard timeout value in mseconds */ struct timeval softtimeout; /* Soft timeout value in sec + usec */ }; /* Get Device Information CCB */ struct ccb_getdev { struct ccb_hdr ccb_h; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; u_int8_t serial_num[252]; u_int8_t inq_flags; u_int8_t serial_num_len; }; /* Device Statistics CCB */ struct ccb_getdevstats { struct ccb_hdr ccb_h; int dev_openings; /* Space left for more work on device*/ int dev_active; /* Transactions running on the device */ int allocated; /* CCBs allocated for the device */ int queued; /* CCBs queued to be sent to the device */ int held; /* * CCBs held by peripheral drivers * for this device */ int maxtags; /* * Boundary conditions for number of * tagged operations */ int mintags; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { CAM_GDEVLIST_LAST_DEVICE, CAM_GDEVLIST_LIST_CHANGED, CAM_GDEVLIST_MORE_DEVS, CAM_GDEVLIST_ERROR } ccb_getdevlist_status_e; struct ccb_getdevlist { struct ccb_hdr ccb_h; char periph_name[DEV_IDLEN]; u_int32_t unit_number; unsigned int generation; u_int32_t index; ccb_getdevlist_status_e status; }; typedef enum { PERIPH_MATCH_NONE = 0x000, PERIPH_MATCH_PATH = 0x001, PERIPH_MATCH_TARGET = 0x002, PERIPH_MATCH_LUN = 0x004, PERIPH_MATCH_NAME = 0x008, PERIPH_MATCH_UNIT = 0x010, PERIPH_MATCH_ANY = 0x01f } periph_pattern_flags; struct periph_match_pattern { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; periph_pattern_flags flags; }; typedef enum { DEV_MATCH_NONE = 0x000, DEV_MATCH_PATH = 0x001, DEV_MATCH_TARGET = 0x002, DEV_MATCH_LUN = 0x004, DEV_MATCH_INQUIRY = 0x008, DEV_MATCH_DEVID = 0x010, DEV_MATCH_ANY = 0x00f } dev_pattern_flags; struct device_id_match_pattern { uint8_t id_len; uint8_t id[256]; }; struct device_match_pattern { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; dev_pattern_flags flags; union { struct scsi_static_inquiry_pattern inq_pat; struct device_id_match_pattern devid_pat; } data; }; typedef enum { BUS_MATCH_NONE = 0x000, BUS_MATCH_PATH = 0x001, BUS_MATCH_NAME = 0x002, BUS_MATCH_UNIT = 0x004, BUS_MATCH_BUS_ID = 0x008, BUS_MATCH_ANY = 0x00f } bus_pattern_flags; struct bus_match_pattern { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; bus_pattern_flags flags; }; union match_pattern { struct periph_match_pattern periph_pattern; struct device_match_pattern device_pattern; struct bus_match_pattern bus_pattern; }; typedef enum { DEV_MATCH_PERIPH, DEV_MATCH_DEVICE, DEV_MATCH_BUS } dev_match_type; struct dev_match_pattern { dev_match_type type; union match_pattern pattern; }; struct periph_match_result { char periph_name[DEV_IDLEN]; u_int32_t unit_number; path_id_t path_id; target_id_t target_id; lun_id_t target_lun; }; typedef enum { DEV_RESULT_NOFLAG = 0x00, DEV_RESULT_UNCONFIGURED = 0x01 } dev_result_flags; struct device_match_result { path_id_t path_id; target_id_t target_id; lun_id_t target_lun; cam_proto protocol; struct scsi_inquiry_data inq_data; struct ata_params ident_data; dev_result_flags flags; }; struct bus_match_result { path_id_t path_id; char dev_name[DEV_IDLEN]; u_int32_t unit_number; u_int32_t bus_id; }; union match_result { struct periph_match_result periph_result; struct device_match_result device_result; struct bus_match_result bus_result; }; struct dev_match_result { dev_match_type type; union match_result result; }; typedef enum { CAM_DEV_MATCH_LAST, CAM_DEV_MATCH_MORE, CAM_DEV_MATCH_LIST_CHANGED, CAM_DEV_MATCH_SIZE_ERROR, CAM_DEV_MATCH_ERROR } ccb_dev_match_status; typedef enum { CAM_DEV_POS_NONE = 0x000, CAM_DEV_POS_BUS = 0x001, CAM_DEV_POS_TARGET = 0x002, CAM_DEV_POS_DEVICE = 0x004, CAM_DEV_POS_PERIPH = 0x008, CAM_DEV_POS_PDPTR = 0x010, CAM_DEV_POS_TYPEMASK = 0xf00, CAM_DEV_POS_EDT = 0x100, CAM_DEV_POS_PDRV = 0x200 } dev_pos_type; struct ccb_dm_cookie { void *bus; void *target; void *device; void *periph; void *pdrv; }; struct ccb_dev_position { u_int generations[4]; #define CAM_BUS_GENERATION 0x00 #define CAM_TARGET_GENERATION 0x01 #define CAM_DEV_GENERATION 0x02 #define CAM_PERIPH_GENERATION 0x03 dev_pos_type position_type; struct ccb_dm_cookie cookie; }; struct ccb_dev_match { struct ccb_hdr ccb_h; ccb_dev_match_status status; u_int32_t num_patterns; u_int32_t pattern_buf_len; struct dev_match_pattern *patterns; u_int32_t num_matches; u_int32_t match_buf_len; struct dev_match_result *matches; struct ccb_dev_position pos; }; /* * Definitions for the path inquiry CCB fields. */ #define CAM_VERSION 0x19 /* Hex value for current version */ typedef enum { PI_MDP_ABLE = 0x80, /* Supports MDP message */ PI_WIDE_32 = 0x40, /* Supports 32 bit wide SCSI */ PI_WIDE_16 = 0x20, /* Supports 16 bit wide SCSI */ PI_SDTR_ABLE = 0x10, /* Supports SDTR message */ PI_LINKED_CDB = 0x08, /* Supports linked CDBs */ PI_SATAPM = 0x04, /* Supports SATA PM */ PI_TAG_ABLE = 0x02, /* Supports tag queue messages */ PI_SOFT_RST = 0x01 /* Supports soft reset alternative */ } pi_inqflag; typedef enum { PIT_PROCESSOR = 0x80, /* Target mode processor mode */ PIT_PHASE = 0x40, /* Target mode phase cog. mode */ PIT_DISCONNECT = 0x20, /* Disconnects supported in target mode */ PIT_TERM_IO = 0x10, /* Terminate I/O message supported in TM */ PIT_GRP_6 = 0x08, /* Group 6 commands supported */ PIT_GRP_7 = 0x04 /* Group 7 commands supported */ } pi_tmflag; typedef enum { PIM_ATA_EXT = 0x200,/* ATA requests can understand ata_ext requests */ PIM_EXTLUNS = 0x100,/* 64bit extended LUNs supported */ PIM_SCANHILO = 0x80, /* Bus scans from high ID to low ID */ PIM_NOREMOVE = 0x40, /* Removeable devices not included in scan */ PIM_NOINITIATOR = 0x20, /* Initiator role not supported. */ PIM_NOBUSRESET = 0x10, /* User has disabled initial BUS RESET */ PIM_NO_6_BYTE = 0x08, /* Do not send 6-byte commands */ PIM_SEQSCAN = 0x04, /* Do bus scans sequentially, not in parallel */ PIM_UNMAPPED = 0x02, PIM_NOSCAN = 0x01 /* SIM does its own scanning */ } pi_miscflag; /* Path Inquiry CCB */ struct ccb_pathinq_settings_spi { u_int8_t ppr_options; }; struct ccb_pathinq_settings_fc { u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_pathinq_settings_sas { u_int32_t bitrate; /* Mbps */ }; #define PATHINQ_SETTINGS_SIZE 128 struct ccb_pathinq { struct ccb_hdr ccb_h; u_int8_t version_num; /* Version number for the SIM/HBA */ u_int8_t hba_inquiry; /* Mimic of INQ byte 7 for the HBA */ u_int16_t target_sprt; /* Flags for target mode support */ u_int32_t hba_misc; /* Misc HBA features */ u_int16_t hba_eng_cnt; /* HBA engine count */ /* Vendor Unique capabilities */ u_int8_t vuhba_flags[VUHBALEN]; u_int32_t max_target; /* Maximum supported Target */ u_int32_t max_lun; /* Maximum supported Lun */ u_int32_t async_flags; /* Installed Async handlers */ path_id_t hpath_id; /* Highest Path ID in the subsystem */ target_id_t initiator_id; /* ID of the HBA on the SCSI bus */ char sim_vid[SIM_IDLEN]; /* Vendor ID of the SIM */ char hba_vid[HBA_IDLEN]; /* Vendor ID of the HBA */ char dev_name[DEV_IDLEN];/* Device name for SIM */ u_int32_t unit_number; /* Unit number for SIM */ u_int32_t bus_id; /* Bus ID for SIM */ u_int32_t base_transfer_speed;/* Base bus speed in KB/sec */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { struct ccb_pathinq_settings_spi spi; struct ccb_pathinq_settings_fc fc; struct ccb_pathinq_settings_sas sas; char ccb_pathinq_settings_opaque[PATHINQ_SETTINGS_SIZE]; } xport_specific; u_int maxio; /* Max supported I/O size, in bytes. */ u_int16_t hba_vendor; /* HBA vendor ID */ u_int16_t hba_device; /* HBA device ID */ u_int16_t hba_subvendor; /* HBA subvendor ID */ u_int16_t hba_subdevice; /* HBA subdevice ID */ }; /* Path Statistics CCB */ struct ccb_pathstats { struct ccb_hdr ccb_h; struct timeval last_reset; /* Time of last bus reset/loop init */ }; typedef enum { SMP_FLAG_NONE = 0x00, SMP_FLAG_REQ_SG = 0x01, SMP_FLAG_RSP_SG = 0x02 } ccb_smp_pass_flags; /* * Serial Management Protocol CCB * XXX Currently the semantics for this CCB are that it is executed either * by the addressed device, or that device's parent (i.e. an expander for * any device on an expander) if the addressed device doesn't support SMP. * Later, once we have the ability to probe SMP-only devices and put them * in CAM's topology, the CCB will only be executed by the addressed device * if possible. */ struct ccb_smpio { struct ccb_hdr ccb_h; uint8_t *smp_request; int smp_request_len; uint16_t smp_request_sglist_cnt; uint8_t *smp_response; int smp_response_len; uint16_t smp_response_sglist_cnt; ccb_smp_pass_flags flags; }; typedef union { u_int8_t *sense_ptr; /* * Pointer to storage * for sense information */ /* Storage Area for sense information */ struct scsi_sense_data sense_buf; } sense_t; typedef union { u_int8_t *cdb_ptr; /* Pointer to the CDB bytes to send */ /* Area for the CDB send */ u_int8_t cdb_bytes[IOCDBLEN]; } cdb_t; /* * SCSI I/O Request CCB used for the XPT_SCSI_IO and XPT_CONT_TARGET_IO * function codes. */ struct ccb_scsiio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ u_int8_t *req_map; /* Ptr to mapping info */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ /* Autosense storage */ struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes to autosense */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int16_t sglist_cnt; /* Number of SG list entries */ u_int8_t scsi_status; /* Returned SCSI status */ u_int8_t sense_resid; /* Autosense resid length: 2's comp */ u_int32_t resid; /* Transfer residual length: 2's comp */ cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t *msg_ptr; /* Pointer to the message buffer */ u_int16_t msg_len; /* Number of bytes for the Message */ u_int8_t tag_action; /* What to do for tag queueing */ /* * The tag action should be either the define below (to send a * non-tagged transaction) or one of the defined scsi tag messages * from scsi_message.h. */ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ }; static __inline uint8_t * scsiio_cdb_ptr(struct ccb_scsiio *ccb) { return ((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->cdb_io.cdb_ptr : ccb->cdb_io.cdb_bytes); } /* * ATA I/O Request CCB used for the XPT_ATA_IO function code. */ struct ccb_ataio { struct ccb_hdr ccb_h; union ccb *next_ccb; /* Ptr for next CCB for action */ struct ata_cmd cmd; /* ATA command register set */ struct ata_res res; /* ATA result register set */ u_int8_t *data_ptr; /* Ptr to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int32_t resid; /* Transfer residual length: 2's comp */ u_int8_t ata_flags; /* Flags for the rest of the buffer */ #define ATA_FLAG_AUX 0x1 uint32_t aux; uint32_t unused; }; struct ccb_accept_tio { struct ccb_hdr ccb_h; cdb_t cdb_io; /* Union for CDB bytes/pointer */ u_int8_t cdb_len; /* Number of bytes for the CDB */ u_int8_t tag_action; /* What to do for tag queueing */ u_int8_t sense_len; /* Number of bytes of Sense Data */ u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ struct scsi_sense_data sense_data; }; /* Release SIM Queue */ struct ccb_relsim { struct ccb_hdr ccb_h; u_int32_t release_flags; #define RELSIM_ADJUST_OPENINGS 0x01 #define RELSIM_RELEASE_AFTER_TIMEOUT 0x02 #define RELSIM_RELEASE_AFTER_CMDCMPLT 0x04 #define RELSIM_RELEASE_AFTER_QEMPTY 0x08 u_int32_t openings; u_int32_t release_timeout; /* Abstract argument. */ u_int32_t qfrozen_cnt; }; /* * Definitions for the asynchronous callback CCB fields. */ typedef enum { AC_UNIT_ATTENTION = 0x4000,/* Device reported UNIT ATTENTION */ AC_ADVINFO_CHANGED = 0x2000,/* Advance info might have changes */ AC_CONTRACT = 0x1000,/* A contractual callback */ AC_GETDEV_CHANGED = 0x800,/* Getdev info might have changed */ AC_INQ_CHANGED = 0x400,/* Inquiry info might have changed */ AC_TRANSFER_NEG = 0x200,/* New transfer settings in effect */ AC_LOST_DEVICE = 0x100,/* A device went away */ AC_FOUND_DEVICE = 0x080,/* A new device was found */ AC_PATH_DEREGISTERED = 0x040,/* A path has de-registered */ AC_PATH_REGISTERED = 0x020,/* A new path has been registered */ AC_SENT_BDR = 0x010,/* A BDR message was sent to target */ AC_SCSI_AEN = 0x008,/* A SCSI AEN has been received */ AC_UNSOL_RESEL = 0x002,/* Unsolicited reselection occurred */ AC_BUS_RESET = 0x001 /* A SCSI bus reset occurred */ } ac_code; typedef void ac_callback_t (void *softc, u_int32_t code, struct cam_path *path, void *args); /* * Generic Asynchronous callbacks. * * Generic arguments passed bac which are then interpreted between a per-system * contract number. */ #define AC_CONTRACT_DATA_MAX (128 - sizeof (u_int64_t)) struct ac_contract { u_int64_t contract_number; u_int8_t contract_data[AC_CONTRACT_DATA_MAX]; }; #define AC_CONTRACT_DEV_CHG 1 struct ac_device_changed { u_int64_t wwpn; u_int32_t port; target_id_t target; u_int8_t arrived; }; /* Set Asynchronous Callback CCB */ struct ccb_setasync { struct ccb_hdr ccb_h; u_int32_t event_enable; /* Async Event enables */ ac_callback_t *callback; void *callback_arg; }; /* Set Device Type CCB */ struct ccb_setdev { struct ccb_hdr ccb_h; u_int8_t dev_type; /* Value for dev type field in EDT */ }; /* SCSI Control Functions */ /* Abort XPT request CCB */ struct ccb_abort { struct ccb_hdr ccb_h; union ccb *abort_ccb; /* Pointer to CCB to abort */ }; /* Reset SCSI Bus CCB */ struct ccb_resetbus { struct ccb_hdr ccb_h; }; /* Reset SCSI Device CCB */ struct ccb_resetdev { struct ccb_hdr ccb_h; }; /* Terminate I/O Process Request CCB */ struct ccb_termio { struct ccb_hdr ccb_h; union ccb *termio_ccb; /* Pointer to CCB to terminate */ }; typedef enum { CTS_TYPE_CURRENT_SETTINGS, CTS_TYPE_USER_SETTINGS } cts_type; struct ccb_trans_settings_scsi { u_int valid; /* Which fields to honor */ #define CTS_SCSI_VALID_TQ 0x01 u_int flags; #define CTS_SCSI_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_ata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_TQ 0x01 u_int flags; #define CTS_ATA_FLAGS_TAG_ENB 0x01 }; struct ccb_trans_settings_spi { u_int valid; /* Which fields to honor */ #define CTS_SPI_VALID_SYNC_RATE 0x01 #define CTS_SPI_VALID_SYNC_OFFSET 0x02 #define CTS_SPI_VALID_BUS_WIDTH 0x04 #define CTS_SPI_VALID_DISC 0x08 #define CTS_SPI_VALID_PPR_OPTIONS 0x10 u_int flags; #define CTS_SPI_FLAGS_DISC_ENB 0x01 u_int sync_period; u_int sync_offset; u_int bus_width; u_int ppr_options; }; struct ccb_trans_settings_fc { u_int valid; /* Which fields to honor */ #define CTS_FC_VALID_WWNN 0x8000 #define CTS_FC_VALID_WWPN 0x4000 #define CTS_FC_VALID_PORT 0x2000 #define CTS_FC_VALID_SPEED 0x1000 u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int32_t port; /* 24 bit port id, if known */ u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_sas { u_int valid; /* Which fields to honor */ #define CTS_SAS_VALID_SPEED 0x1000 u_int32_t bitrate; /* Mbps */ }; struct ccb_trans_settings_pata { u_int valid; /* Which fields to honor */ #define CTS_ATA_VALID_MODE 0x01 #define CTS_ATA_VALID_BYTECOUNT 0x02 #define CTS_ATA_VALID_ATAPI 0x20 #define CTS_ATA_VALID_CAPS 0x40 int mode; /* Mode */ u_int bytecount; /* Length of PIO transaction */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_ATA_CAPS_H 0x0000ffff #define CTS_ATA_CAPS_H_DMA48 0x00000001 /* 48-bit DMA */ #define CTS_ATA_CAPS_D 0xffff0000 }; struct ccb_trans_settings_sata { u_int valid; /* Which fields to honor */ #define CTS_SATA_VALID_MODE 0x01 #define CTS_SATA_VALID_BYTECOUNT 0x02 #define CTS_SATA_VALID_REVISION 0x04 #define CTS_SATA_VALID_PM 0x08 #define CTS_SATA_VALID_TAGS 0x10 #define CTS_SATA_VALID_ATAPI 0x20 #define CTS_SATA_VALID_CAPS 0x40 int mode; /* Legacy PATA mode */ u_int bytecount; /* Length of PIO transaction */ int revision; /* SATA revision */ u_int pm_present; /* PM is present (XPT->SIM) */ u_int tags; /* Number of allowed tags */ u_int atapi; /* Length of ATAPI CDB */ u_int caps; /* Device and host SATA caps. */ #define CTS_SATA_CAPS_H 0x0000ffff #define CTS_SATA_CAPS_H_PMREQ 0x00000001 #define CTS_SATA_CAPS_H_APST 0x00000002 #define CTS_SATA_CAPS_H_DMAAA 0x00000010 /* Auto-activation */ #define CTS_SATA_CAPS_H_AN 0x00000020 /* Async. notification */ #define CTS_SATA_CAPS_D 0xffff0000 #define CTS_SATA_CAPS_D_PMREQ 0x00010000 #define CTS_SATA_CAPS_D_APST 0x00020000 }; /* Get/Set transfer rate/width/disconnection/tag queueing settings */ struct ccb_trans_settings { struct ccb_hdr ccb_h; cts_type type; /* Current or User settings */ cam_proto protocol; u_int protocol_version; cam_xport transport; u_int transport_version; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_ata ata; struct ccb_trans_settings_scsi scsi; } proto_specific; union { u_int valid; /* Which fields to honor */ struct ccb_trans_settings_spi spi; struct ccb_trans_settings_fc fc; struct ccb_trans_settings_sas sas; struct ccb_trans_settings_pata ata; struct ccb_trans_settings_sata sata; } xport_specific; }; /* * Calculate the geometry parameters for a device * give the block size and volume size in blocks. */ struct ccb_calc_geometry { struct ccb_hdr ccb_h; u_int32_t block_size; u_int64_t volume_size; u_int32_t cylinders; u_int8_t heads; u_int8_t secs_per_track; }; /* * Set or get SIM (and transport) specific knobs */ #define KNOB_VALID_ADDRESS 0x1 #define KNOB_VALID_ROLE 0x2 #define KNOB_ROLE_NONE 0x0 #define KNOB_ROLE_INITIATOR 0x1 #define KNOB_ROLE_TARGET 0x2 #define KNOB_ROLE_BOTH 0x3 struct ccb_sim_knob_settings_spi { u_int valid; u_int initiator_id; u_int role; }; struct ccb_sim_knob_settings_fc { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int64_t wwpn; /* world wide port name */ u_int role; }; struct ccb_sim_knob_settings_sas { u_int valid; u_int64_t wwnn; /* world wide node name */ u_int role; }; #define KNOB_SETTINGS_SIZE 128 struct ccb_sim_knob { struct ccb_hdr ccb_h; union { u_int valid; /* Which fields to honor */ struct ccb_sim_knob_settings_spi spi; struct ccb_sim_knob_settings_fc fc; struct ccb_sim_knob_settings_sas sas; char pad[KNOB_SETTINGS_SIZE]; } xport_specific; }; /* * Rescan the given bus, or bus/target/lun */ struct ccb_rescan { struct ccb_hdr ccb_h; cam_flags flags; }; /* * Turn on debugging for the given bus, bus/target, or bus/target/lun. */ struct ccb_debug { struct ccb_hdr ccb_h; cam_debug_flags flags; }; /* Target mode structures. */ struct ccb_en_lun { struct ccb_hdr ccb_h; u_int16_t grp6_len; /* Group 6 VU CDB length */ u_int16_t grp7_len; /* Group 7 VU CDB length */ u_int8_t enable; }; /* old, barely used immediate notify, binary compatibility */ struct ccb_immed_notify { struct ccb_hdr ccb_h; struct scsi_sense_data sense_data; u_int8_t sense_len; /* Number of bytes in sense buffer */ u_int8_t initiator_id; /* Id of initiator that selected */ u_int8_t message_args[7]; /* Message Arguments */ }; struct ccb_notify_ack { struct ccb_hdr ccb_h; u_int16_t seq_id; /* Sequence identifier */ u_int8_t event; /* Event flags */ }; struct ccb_immediate_notify { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tag for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; struct ccb_notify_acknowledge { struct ccb_hdr ccb_h; u_int tag_id; /* Tag for immediate notify */ u_int seq_id; /* Tar for target of notify */ u_int initiator_id; /* Initiator Identifier */ u_int arg; /* Function specific */ }; /* HBA engine structures. */ typedef enum { EIT_BUFFER, /* Engine type: buffer memory */ EIT_LOSSLESS, /* Engine type: lossless compression */ EIT_LOSSY, /* Engine type: lossy compression */ EIT_ENCRYPT /* Engine type: encryption */ } ei_type; typedef enum { EAD_VUNIQUE, /* Engine algorithm ID: vendor unique */ EAD_LZ1V1, /* Engine algorithm ID: LZ1 var.1 */ EAD_LZ2V1, /* Engine algorithm ID: LZ2 var.1 */ EAD_LZ2V2 /* Engine algorithm ID: LZ2 var.2 */ } ei_algo; struct ccb_eng_inq { struct ccb_hdr ccb_h; u_int16_t eng_num; /* The engine number for this inquiry */ ei_type eng_type; /* Returned engine type */ ei_algo eng_algo; /* Returned engine algorithm type */ u_int32_t eng_memeory; /* Returned engine memory size */ }; struct ccb_eng_exec { /* This structure must match SCSIIO size */ struct ccb_hdr ccb_h; u_int8_t *pdrv_ptr; /* Ptr used by the peripheral driver */ u_int8_t *req_map; /* Ptr for mapping info on the req. */ u_int8_t *data_ptr; /* Pointer to the data buf/SG list */ u_int32_t dxfer_len; /* Data transfer length */ u_int8_t *engdata_ptr; /* Pointer to the engine buffer data */ u_int16_t sglist_cnt; /* Num of scatter gather list entries */ u_int32_t dmax_len; /* Destination data maximum length */ u_int32_t dest_len; /* Destination data length */ int32_t src_resid; /* Source residual length: 2's comp */ u_int32_t timeout; /* Timeout value */ u_int16_t eng_num; /* Engine number for this request */ u_int16_t vu_flags; /* Vendor Unique flags */ }; /* * Definitions for the timeout field in the SCSI I/O CCB. */ #define CAM_TIME_DEFAULT 0x00000000 /* Use SIM default value */ #define CAM_TIME_INFINITY 0xFFFFFFFF /* Infinite timeout */ #define CAM_SUCCESS 0 /* For signaling general success */ #define CAM_FAILURE 1 /* For signaling general failure */ #define CAM_FALSE 0 #define CAM_TRUE 1 #define XPT_CCB_INVALID -1 /* for signaling a bad CCB to free */ /* * CCB for working with advanced device information. This operates in a fashion * similar to XPT_GDEV_TYPE. Specify the target in ccb_h, the buffer * type requested, and provide a buffer size/buffer to write to. If the * buffer is too small, provsiz will be larger than bufsiz. */ struct ccb_dev_advinfo { struct ccb_hdr ccb_h; uint32_t flags; #define CDAI_FLAG_NONE 0x0 /* No flags set */ #define CDAI_FLAG_STORE 0x1 /* If set, action becomes store */ uint32_t buftype; /* IN: Type of data being requested */ /* NB: buftype is interpreted on a per-transport basis */ #define CDAI_TYPE_SCSI_DEVID 1 #define CDAI_TYPE_SERIAL_NUM 2 #define CDAI_TYPE_PHYS_PATH 3 #define CDAI_TYPE_RCAPLONG 4 #define CDAI_TYPE_EXT_INQ 5 off_t bufsiz; /* IN: Size of external buffer */ #define CAM_SCSI_DEVID_MAXLEN 65536 /* length in buffer is an uint16_t */ off_t provsiz; /* OUT: Size required/used */ uint8_t *buf; /* IN/OUT: Buffer for requested data */ }; /* * CCB for sending async events */ struct ccb_async { struct ccb_hdr ccb_h; uint32_t async_code; off_t async_arg_size; void *async_arg_ptr; }; /* * Union of all CCB types for kernel space allocation. This union should * never be used for manipulating CCBs - its only use is for the allocation * and deallocation of raw CCB space and is the return type of xpt_ccb_alloc * and the argument to xpt_ccb_free. */ union ccb { struct ccb_hdr ccb_h; /* For convenience */ struct ccb_scsiio csio; struct ccb_getdev cgd; struct ccb_getdevlist cgdl; struct ccb_pathinq cpi; struct ccb_relsim crs; struct ccb_setasync csa; struct ccb_setdev csd; struct ccb_pathstats cpis; struct ccb_getdevstats cgds; struct ccb_dev_match cdm; struct ccb_trans_settings cts; struct ccb_calc_geometry ccg; struct ccb_sim_knob knob; struct ccb_abort cab; struct ccb_resetbus crb; struct ccb_resetdev crd; struct ccb_termio tio; struct ccb_accept_tio atio; struct ccb_scsiio ctio; struct ccb_en_lun cel; struct ccb_immed_notify cin; struct ccb_notify_ack cna; struct ccb_immediate_notify cin1; struct ccb_notify_acknowledge cna2; struct ccb_eng_inq cei; struct ccb_eng_exec cee; struct ccb_smpio smpio; struct ccb_rescan crcn; struct ccb_debug cdbg; struct ccb_ataio ataio; struct ccb_dev_advinfo cdai; struct ccb_async casync; }; __BEGIN_DECLS static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout); static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout); static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout); static __inline void cam_fill_csio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int8_t cdb_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_SCSI_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; } static __inline void cam_fill_ctio(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action, u_int tag_id, u_int init_id, u_int scsi_status, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { csio->ccb_h.func_code = XPT_CONT_TARGET_IO; csio->ccb_h.flags = flags; csio->ccb_h.xflags = 0; csio->ccb_h.retry_count = retries; csio->ccb_h.cbfcnp = cbfcnp; csio->ccb_h.timeout = timeout; csio->data_ptr = data_ptr; csio->dxfer_len = dxfer_len; csio->scsi_status = scsi_status; csio->tag_action = tag_action; csio->tag_id = tag_id; csio->init_id = init_id; } static __inline void cam_fill_ataio(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int tag_action __unused, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int32_t timeout) { ataio->ccb_h.func_code = XPT_ATA_IO; ataio->ccb_h.flags = flags; ataio->ccb_h.retry_count = retries; ataio->ccb_h.cbfcnp = cbfcnp; ataio->ccb_h.timeout = timeout; ataio->data_ptr = data_ptr; ataio->dxfer_len = dxfer_len; ataio->ata_flags = 0; } static __inline void cam_fill_smpio(struct ccb_smpio *smpio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint32_t flags, uint8_t *smp_request, int smp_request_len, uint8_t *smp_response, int smp_response_len, uint32_t timeout) { #ifdef _KERNEL KASSERT((flags & CAM_DIR_MASK) == CAM_DIR_BOTH, ("direction != CAM_DIR_BOTH")); KASSERT((smp_request != NULL) && (smp_response != NULL), ("need valid request and response buffers")); KASSERT((smp_request_len != 0) && (smp_response_len != 0), ("need non-zero request and response lengths")); #endif /*_KERNEL*/ smpio->ccb_h.func_code = XPT_SMP_IO; smpio->ccb_h.flags = flags; smpio->ccb_h.retry_count = retries; smpio->ccb_h.cbfcnp = cbfcnp; smpio->ccb_h.timeout = timeout; smpio->smp_request = smp_request; smpio->smp_request_len = smp_request_len; smpio->smp_response = smp_response; smpio->smp_response_len = smp_response_len; } static __inline void cam_set_ccbstatus(union ccb *ccb, cam_status status) { ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= status; } static __inline cam_status cam_ccb_status(union ccb *ccb) { return ((cam_status)(ccb->ccb_h.status & CAM_STATUS_MASK)); } void cam_calc_geometry(struct ccb_calc_geometry *ccg, int extended); __END_DECLS #endif /* _CAM_CAM_CCB_H */ Index: head/sys/cam/cam_xpt.c =================================================================== --- head/sys/cam/cam_xpt.c (revision 299370) +++ head/sys/cam/cam_xpt.c (revision 299371) @@ -1,5298 +1,5303 @@ /*- * Implementation of the Common Access Method Transport (XPT) layer. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* geometry translation */ #include /* for xpt_print below */ #include "opt_cam.h" /* * This is the maximum number of high powered commands (e.g. start unit) * that can be outstanding at a particular time. */ #ifndef CAM_MAX_HIGHPOWER #define CAM_MAX_HIGHPOWER 4 #endif /* Datastructures internal to the xpt layer */ MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); /* Object for defering XPT actions to a taskqueue */ struct xpt_task { struct task task; void *data1; uintptr_t data2; }; struct xpt_softc { uint32_t xpt_generation; /* number of high powered commands that can go through right now */ struct mtx xpt_highpower_lock; STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; int num_highpower; /* queue for handling async rescan requests. */ TAILQ_HEAD(, ccb_hdr) ccb_scanq; int buses_to_config; int buses_config_done; /* Registered busses */ TAILQ_HEAD(,cam_eb) xpt_busses; u_int bus_generation; struct intr_config_hook *xpt_config_hook; int boot_delay; struct callout boot_callout; struct mtx xpt_topo_lock; struct mtx xpt_lock; struct taskqueue *xpt_taskq; }; typedef enum { DM_RET_COPY = 0x01, DM_RET_FLAG_MASK = 0x0f, DM_RET_NONE = 0x00, DM_RET_STOP = 0x10, DM_RET_DESCEND = 0x20, DM_RET_ERROR = 0x30, DM_RET_ACTION_MASK = 0xf0 } dev_match_ret; typedef enum { XPT_DEPTH_BUS, XPT_DEPTH_TARGET, XPT_DEPTH_DEVICE, XPT_DEPTH_PERIPH } xpt_traverse_depth; struct xpt_traverse_config { xpt_traverse_depth depth; void *tr_func; void *tr_arg; }; typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); /* Transport layer configuration information */ static struct xpt_softc xsoftc; MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, &xsoftc.boot_delay, 0, "Bus registration wait time"); SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); struct cam_doneq { struct mtx_padalign cam_doneq_mtx; STAILQ_HEAD(, ccb_hdr) cam_doneq; int cam_doneq_sleep; }; static struct cam_doneq cam_doneqs[MAXCPU]; static int cam_num_doneqs; static struct proc *cam_proc; SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, &cam_num_doneqs, 0, "Number of completion queues/threads"); struct cam_periph *xpt_periph; static periph_init_t xpt_periph_init; static struct periph_driver xpt_driver = { xpt_periph_init, "xpt", TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, CAM_PERIPH_DRV_EARLY }; PERIPHDRIVER_DECLARE(xpt, xpt_driver); static d_open_t xptopen; static d_close_t xptclose; static d_ioctl_t xptioctl; static d_ioctl_t xptdoioctl; static struct cdevsw xpt_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = xptopen, .d_close = xptclose, .d_ioctl = xptioctl, .d_name = "xpt", }; /* Storage for debugging datastructures */ struct cam_path *cam_dpath; u_int32_t cam_dflags = CAM_DEBUG_FLAGS; SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, &cam_dflags, 0, "Enabled debug flags"); u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, &cam_debug_delay, 0, "Delay in us after each debug message"); /* Our boot-time initialization hook */ static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); static moduledata_t cam_moduledata = { "cam", cam_module_event_handler, NULL }; static int xpt_init(void *); DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); MODULE_VERSION(cam, 1); static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg); static path_id_t xptnextfreepathid(void); static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); static union ccb *xpt_get_ccb(struct cam_periph *periph); static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); static void xpt_run_allocq(struct cam_periph *periph, int sleep); static void xpt_run_allocq_task(void *context, int pending); static void xpt_run_devq(struct cam_devq *devq); static timeout_t xpt_release_devq_timeout; static void xpt_release_simq_timeout(void *arg) __unused; static void xpt_acquire_bus(struct cam_eb *bus); static void xpt_release_bus(struct cam_eb *bus); static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue); static struct cam_et* xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); static void xpt_acquire_target(struct cam_et *target); static void xpt_release_target(struct cam_et *target); static struct cam_eb* xpt_find_bus(path_id_t path_id); static struct cam_et* xpt_find_target(struct cam_eb *bus, target_id_t target_id); static struct cam_ed* xpt_find_device(struct cam_et *target, lun_id_t lun_id); static void xpt_config(void *arg); static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, u_int32_t new_priority); static xpt_devicefunc_t xptpassannouncefunc; static void xptaction(struct cam_sim *sim, union ccb *work_ccb); static void xptpoll(struct cam_sim *sim); static void camisr_runqueue(void); static void xpt_done_process(struct ccb_hdr *ccb_h); static void xpt_done_td(void *); static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus); static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device); static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph); static xpt_busfunc_t xptedtbusfunc; static xpt_targetfunc_t xptedttargetfunc; static xpt_devicefunc_t xptedtdevicefunc; static xpt_periphfunc_t xptedtperiphfunc; static xpt_pdrvfunc_t xptplistpdrvfunc; static xpt_periphfunc_t xptplistperiphfunc; static int xptedtmatch(struct ccb_dev_match *cdm); static int xptperiphlistmatch(struct ccb_dev_match *cdm); static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg); static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg); static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg); static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg); static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg); static xpt_busfunc_t xptdefbusfunc; static xpt_targetfunc_t xptdeftargetfunc; static xpt_devicefunc_t xptdefdevicefunc; static xpt_periphfunc_t xptdefperiphfunc; static void xpt_finishconfig_task(void *context, int pending); static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg); static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id); static xpt_devicefunc_t xptsetasyncfunc; static xpt_busfunc_t xptsetasyncbusfunc; static cam_status xptregister(struct cam_periph *periph, void *arg); static __inline int device_is_queued(struct cam_ed *device); static __inline int xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) { int retval; mtx_assert(&devq->send_mtx, MA_OWNED); if ((dev->ccbq.queue.entries > 0) && (dev->ccbq.dev_openings > 0) && (dev->ccbq.queue.qfrozen_cnt == 0)) { /* * The priority of a device waiting for controller * resources is that of the highest priority CCB * enqueued. */ retval = xpt_schedule_dev(&devq->send_queue, &dev->devq_entry, CAMQ_GET_PRIO(&dev->ccbq.queue)); } else { retval = 0; } return (retval); } static __inline int device_is_queued(struct cam_ed *device) { return (device->devq_entry.index != CAM_UNQUEUED_INDEX); } static void xpt_periph_init() { make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); } static int xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) { /* * Only allow read-write access. */ if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) return(EPERM); /* * We don't allow nonblocking access. */ if ((flags & O_NONBLOCK) != 0) { printf("%s: can't do nonblocking access\n", devtoname(dev)); return(ENODEV); } return(0); } static int xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) { return(0); } /* * Don't automatically grab the xpt softc lock here even though this is going * through the xpt device. The xpt device is really just a back door for * accessing other devices and SIMs, so the right thing to do is to grab * the appropriate SIM lock once the bus/SIM is located. */ static int xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); } return (error); } static int xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; error = 0; switch(cmd) { /* * For the transport layer CAMIOCOMMAND ioctl, we really only want * to accept CCB types that don't quite make sense to send through a * passthrough driver. XPT_PATH_INQ is an exception to this, as stated * in the CAM spec. */ case CAMIOCOMMAND: { union ccb *ccb; union ccb *inccb; struct cam_eb *bus; inccb = (union ccb *)addr; bus = xpt_find_bus(inccb->ccb_h.path_id); if (bus == NULL) return (EINVAL); switch (inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; case XPT_SCAN_TGT: if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { xpt_release_bus(bus); return (EINVAL); } break; default: break; } switch(inccb->ccb_h.func_code) { case XPT_SCAN_BUS: case XPT_RESET_BUS: case XPT_PATH_INQ: case XPT_ENG_INQ: case XPT_SCAN_LUN: case XPT_SCAN_TGT: ccb = xpt_alloc_ccb(); /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb->ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; xpt_free_ccb(ccb); break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(ccb, inccb); xpt_path_lock(ccb->ccb_h.path); cam_periph_runccb(ccb, NULL, 0, 0, NULL); xpt_path_unlock(ccb->ccb_h.path); bcopy(ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); break; case XPT_DEBUG: { union ccb ccb; /* * This is an immediate CCB, so it's okay to * allocate it on the stack. */ /* * Create a path using the bus, target, and lun the * user passed in. */ if (xpt_create_path(&ccb.ccb_h.path, NULL, inccb->ccb_h.path_id, inccb->ccb_h.target_id, inccb->ccb_h.target_lun) != CAM_REQ_CMP){ error = EINVAL; break; } /* Ensure all of our fields are correct */ xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, inccb->ccb_h.pinfo.priority); xpt_merge_ccb(&ccb, inccb); xpt_action(&ccb); bcopy(&ccb, inccb, sizeof(union ccb)); xpt_free_path(ccb.ccb_h.path); break; } case XPT_DEV_MATCH: { struct cam_periph_map_info mapinfo; struct cam_path *old_path; /* * We can't deal with physical addresses for this * type of transaction. */ if ((inccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) { error = EINVAL; break; } /* * Save this in case the caller had it set to * something in particular. */ old_path = inccb->ccb_h.path; /* * We really don't need a path for the matching * code. The path is needed because of the * debugging statements in xpt_action(). They * assume that the CCB has a valid path. */ inccb->ccb_h.path = xpt_periph->path; bzero(&mapinfo, sizeof(mapinfo)); /* * Map the pattern and match buffers into kernel * virtual address space. */ error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); if (error) { inccb->ccb_h.path = old_path; break; } /* * This is an immediate CCB, we can send it on directly. */ xpt_action(inccb); /* * Map the buffers back into user space. */ cam_periph_unmapmem(inccb, &mapinfo); inccb->ccb_h.path = old_path; error = 0; break; } default: error = ENOTSUP; break; } xpt_release_bus(bus); break; } /* * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, * with the periphal driver name and unit name filled in. The other * fields don't really matter as input. The passthrough driver name * ("pass"), and unit number are passed back in the ccb. The current * device generation number, and the index into the device peripheral * driver list, and the status are also passed back. Note that * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is * (or rather should be) impossible for the device peripheral driver * list to change since we look at the whole thing in one pass, and * we do it with lock protection. * */ case CAMGETPASSTHRU: { union ccb *ccb; struct cam_periph *periph; struct periph_driver **p_drv; char *name; u_int unit; int base_periph_found; ccb = (union ccb *)addr; unit = ccb->cgdl.unit_number; name = ccb->cgdl.periph_name; base_periph_found = 0; /* * Sanity check -- make sure we don't get a null peripheral * driver name. */ if (*ccb->cgdl.periph_name == '\0') { error = EINVAL; break; } /* Keep the list from changing while we traverse it */ xpt_lock_buses(); /* first find our driver in the list of drivers */ for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) if (strcmp((*p_drv)->driver_name, name) == 0) break; if (*p_drv == NULL) { xpt_unlock_buses(); ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; break; } /* * Run through every peripheral instance of this driver * and check to see whether it matches the unit passed * in by the user. If it does, get out of the loops and * find the passthrough driver associated with that * peripheral driver. */ for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; periph = TAILQ_NEXT(periph, unit_links)) { if (periph->unit_number == unit) break; } /* * If we found the peripheral driver that the user passed * in, go through all of the peripheral drivers for that * particular device and look for a passthrough driver. */ if (periph != NULL) { struct cam_ed *device; int i; base_periph_found = 1; device = periph->path->device; for (i = 0, periph = SLIST_FIRST(&device->periphs); periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++) { /* * Check to see whether we have a * passthrough device or not. */ if (strcmp(periph->periph_name, "pass") == 0) { /* * Fill in the getdevlist fields. */ strcpy(ccb->cgdl.periph_name, periph->periph_name); ccb->cgdl.unit_number = periph->unit_number; if (SLIST_NEXT(periph, periph_links)) ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; else ccb->cgdl.status = CAM_GDEVLIST_LAST_DEVICE; ccb->cgdl.generation = device->generation; ccb->cgdl.index = i; /* * Fill in some CCB header fields * that the user may want. */ ccb->ccb_h.path_id = periph->path->bus->path_id; ccb->ccb_h.target_id = periph->path->target->target_id; ccb->ccb_h.target_lun = periph->path->device->lun_id; ccb->ccb_h.status = CAM_REQ_CMP; break; } } } /* * If the periph is null here, one of two things has * happened. The first possibility is that we couldn't * find the unit number of the particular peripheral driver * that the user is asking about. e.g. the user asks for * the passthrough driver for "da11". We find the list of * "da" peripherals all right, but there is no unit 11. * The other possibility is that we went through the list * of peripheral drivers attached to the device structure, * but didn't find one with the name "pass". Either way, * we return ENOENT, since we couldn't find something. */ if (periph == NULL) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; ccb->cgdl.status = CAM_GDEVLIST_ERROR; *ccb->cgdl.periph_name = '\0'; ccb->cgdl.unit_number = 0; error = ENOENT; /* * It is unfortunate that this is even necessary, * but there are many, many clueless users out there. * If this is true, the user is looking for the * passthrough driver, but doesn't have one in his * kernel. */ if (base_periph_found == 1) { printf("xptioctl: pass driver is not in the " "kernel\n"); printf("xptioctl: put \"device pass\" in " "your kernel config file\n"); } } xpt_unlock_buses(); break; } default: error = ENOTTY; break; } return(error); } static int cam_module_event_handler(module_t mod, int what, void *arg) { int error; switch (what) { case MOD_LOAD: if ((error = xpt_init(NULL)) != 0) return (error); break; case MOD_UNLOAD: return EBUSY; default: return EOPNOTSUPP; } return 0; } static void xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) { if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { xpt_free_path(done_ccb->ccb_h.path); xpt_free_ccb(done_ccb); } else { done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); } xpt_release_boot(); } /* thread to handle bus rescans */ static void xpt_scanner_thread(void *dummy) { union ccb *ccb; struct cam_path path; xpt_lock_buses(); for (;;) { if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, "-", 0); if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xpt_unlock_buses(); /* * Since lock can be dropped inside and path freed * by completion callback even before return here, * take our own path copy for reference. */ xpt_copy_path(&path, ccb->ccb_h.path); xpt_path_lock(&path); xpt_action(ccb); xpt_path_unlock(&path); xpt_release_path(&path); xpt_lock_buses(); } } } void xpt_rescan(union ccb *ccb) { struct ccb_hdr *hdr; /* Prepare request */ if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_BUS; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_TGT; else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) ccb->ccb_h.func_code = XPT_SCAN_LUN; else { xpt_print(ccb->ccb_h.path, "illegal scan path\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; ccb->ccb_h.cbfcnp = xpt_rescan_done; xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); /* Don't make duplicate entries for the same paths. */ xpt_lock_buses(); if (ccb->ccb_h.ppriv_ptr1 == NULL) { TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); xpt_print(ccb->ccb_h.path, "rescan already queued\n"); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } } } TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); xsoftc.buses_to_config++; wakeup(&xsoftc.ccb_scanq); xpt_unlock_buses(); } /* Functions accessed by the peripheral drivers */ static int xpt_init(void *dummy) { struct cam_sim *xpt_sim; struct cam_path *path; struct cam_devq *devq; cam_status status; int error, i; TAILQ_INIT(&xsoftc.xpt_busses); TAILQ_INIT(&xsoftc.ccb_scanq); STAILQ_INIT(&xsoftc.highpowerq); xsoftc.num_highpower = CAM_MAX_HIGHPOWER; mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); #ifdef CAM_BOOT_DELAY /* * Override this value at compile time to assist our users * who don't use loader to boot a kernel. */ xsoftc.boot_delay = CAM_BOOT_DELAY; #endif /* * The xpt layer is, itself, the equivalent of a SIM. * Allow 16 ccbs in the ccb pool for it. This should * give decent parallelism when we probe busses and * perform other XPT functions. */ devq = cam_simq_alloc(16); xpt_sim = cam_sim_alloc(xptaction, xptpoll, "xpt", /*softc*/NULL, /*unit*/0, /*mtx*/&xsoftc.xpt_lock, /*max_dev_transactions*/0, /*max_tagged_dev_transactions*/0, devq); if (xpt_sim == NULL) return (ENOMEM); mtx_lock(&xsoftc.xpt_lock); if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { mtx_unlock(&xsoftc.xpt_lock); printf("xpt_init: xpt_bus_register failed with status %#x," " failing attach\n", status); return (EINVAL); } mtx_unlock(&xsoftc.xpt_lock); /* * Looking at the XPT from the SIM layer, the XPT is * the equivalent of a peripheral driver. Allocate * a peripheral driver entry for us. */ if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { printf("xpt_init: xpt_create_path failed with status %#x," " failing attach\n", status); return (EINVAL); } xpt_path_lock(path); cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, path, NULL, 0, xpt_sim); xpt_path_unlock(path); xpt_free_path(path); if (cam_num_doneqs < 1) cam_num_doneqs = 1 + mp_ncpus / 6; else if (cam_num_doneqs > MAXCPU) cam_num_doneqs = MAXCPU; for (i = 0; i < cam_num_doneqs; i++) { mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, MTX_DEF); STAILQ_INIT(&cam_doneqs[i].cam_doneq); error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); if (error != 0) { cam_num_doneqs = i; break; } } if (cam_num_doneqs < 1) { printf("xpt_init: Cannot init completion queues " "- failing attach\n"); return (ENOMEM); } /* * Register a callback for when interrupts are enabled. */ xsoftc.xpt_config_hook = (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), M_CAMXPT, M_NOWAIT | M_ZERO); if (xsoftc.xpt_config_hook == NULL) { printf("xpt_init: Cannot malloc config hook " "- failing attach\n"); return (ENOMEM); } xsoftc.xpt_config_hook->ich_func = xpt_config; if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { free (xsoftc.xpt_config_hook, M_CAMXPT); printf("xpt_init: config_intrhook_establish failed " "- failing attach\n"); } return (0); } static cam_status xptregister(struct cam_periph *periph, void *arg) { struct cam_sim *xpt_sim; if (periph == NULL) { printf("xptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } xpt_sim = (struct cam_sim *)arg; xpt_sim->softc = periph; xpt_periph = periph; periph->softc = NULL; return(CAM_REQ_CMP); } int32_t xpt_add_periph(struct cam_periph *periph) { struct cam_ed *device; int32_t status; TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); device = periph->path->device; status = CAM_REQ_CMP; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } return (status); } void xpt_remove_periph(struct cam_periph *periph) { struct cam_ed *device; device = periph->path->device; if (device != NULL) { mtx_lock(&device->target->bus->eb_mtx); device->generation++; SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); mtx_unlock(&device->target->bus->eb_mtx); atomic_add_32(&xsoftc.xpt_generation, 1); } } void xpt_announce_periph(struct cam_periph *periph, char *announce_string) { struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); periph->flags |= CAM_PERIPH_ANNOUNCED; printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); if (path->device->protocol == PROTO_SCSI) scsi_print_inquiry(&path->device->inq_data); else if (path->device->protocol == PROTO_ATA || path->device->protocol == PROTO_SATAPM) ata_print_ident(&path->device->ident_data); else if (path->device->protocol == PROTO_SEMB) semb_print_ident( (struct sep_identify_data *)&path->device->ident_data); else printf("Unknown protocol device\n"); if (path->device->serial_num_len > 0) { /* Don't wrap the screen - print only the first 60 chars */ printf("%s%d: Serial Number %.60s\n", periph->periph_name, periph->unit_number, path->device->serial_num); } /* Announce transport details. */ (*(path->bus->xport->announce))(periph); /* Announce command queueing. */ if (path->device->inq_flags & SID_CmdQue || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { printf("%s%d: Command Queueing enabled\n", periph->periph_name, periph->unit_number); } /* Announce caller's details if they've passed in. */ if (announce_string != NULL) printf("%s%d: %s\n", periph->periph_name, periph->unit_number, announce_string); } void xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) { if (quirks != 0) { printf("%s%d: quirks=0x%b\n", periph->periph_name, periph->unit_number, quirks, bit_string); } } void xpt_denounce_periph(struct cam_periph *periph) { struct cam_path *path = periph->path; cam_periph_assert(periph, MA_OWNED); printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", periph->periph_name, periph->unit_number, path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id, path->bus->path_id, path->target->target_id, (uintmax_t)path->device->lun_id); printf("%s%d: ", periph->periph_name, periph->unit_number); if (path->device->protocol == PROTO_SCSI) scsi_print_inquiry_short(&path->device->inq_data); else if (path->device->protocol == PROTO_ATA || path->device->protocol == PROTO_SATAPM) ata_print_ident_short(&path->device->ident_data); else if (path->device->protocol == PROTO_SEMB) semb_print_ident_short( (struct sep_identify_data *)&path->device->ident_data); else printf("Unknown protocol device"); if (path->device->serial_num_len > 0) printf(" s/n %.60s", path->device->serial_num); printf(" detached\n"); } int xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) { int ret = -1, l; struct ccb_dev_advinfo cdai; struct scsi_vpd_id_descriptor *idd; xpt_path_assert(path, MA_OWNED); memset(&cdai, 0, sizeof(cdai)); xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.bufsiz = len; if (!strcmp(attr, "GEOM::ident")) cdai.buftype = CDAI_TYPE_SERIAL_NUM; else if (!strcmp(attr, "GEOM::physpath")) cdai.buftype = CDAI_TYPE_PHYS_PATH; else if (strcmp(attr, "GEOM::lunid") == 0 || strcmp(attr, "GEOM::lunname") == 0) { cdai.buftype = CDAI_TYPE_SCSI_DEVID; cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; } else goto out; cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); if (cdai.buf == NULL) { ret = ENOMEM; goto out; } xpt_action((union ccb *)&cdai); /* can only be synchronous */ if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.provsiz == 0) goto out; if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { if (strcmp(attr, "GEOM::lunid") == 0) { idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_naa); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_eui64); } else idd = NULL; if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_t10); if (idd == NULL) idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, cdai.provsiz, scsi_devid_is_lun_name); if (idd == NULL) goto out; ret = 0; if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { if (idd->length < len) { for (l = 0; l < idd->length; l++) buf[l] = idd->identifier[l] ? idd->identifier[l] : ' '; buf[l] = 0; } else ret = EFAULT; } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { l = strnlen(idd->identifier, idd->length); if (l < len) { bcopy(idd->identifier, buf, l); buf[l] = 0; } else ret = EFAULT; } else { if (idd->length * 2 < len) { for (l = 0; l < idd->length; l++) sprintf(buf + l * 2, "%02x", idd->identifier[l]); } else ret = EFAULT; } } else { ret = 0; if (strlcpy(buf, cdai.buf, len) >= len) ret = EFAULT; } out: if (cdai.buf != NULL) free(cdai.buf, M_CAMXPT); return ret; } static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_eb *bus) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (bus == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this bus matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct bus_match_pattern *cur_pattern; /* * If the pattern in question isn't for a bus node, we * aren't interested. However, we do indicate to the * calling routine that we should continue descending the * tree, since the user wants to match against lower-level * EDT elements. */ if (patterns[i].type != DEV_MATCH_BUS) { if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.bus_pattern; /* * If they want to match any bus node, we give them any * device node. */ if (cur_pattern->flags == BUS_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * If we've already decided on an action, go ahead * and return. */ if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == BUS_MATCH_NONE) continue; if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) && (cur_pattern->path_id != bus->path_id)) continue; if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) && (cur_pattern->bus_id != bus->sim->bus_id)) continue; if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) && (cur_pattern->unit_number != bus->sim->unit_number)) continue; if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this bus. So tell the caller to copy the * data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a non-bus matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a non-bus * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen anything other than bus matching patterns. So * tell the caller to stop descending the tree -- the user doesn't * want to match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_ed *device) { dev_match_ret retval; u_int i; retval = DM_RET_NONE; /* * If we aren't given something to match against, that's an error. */ if (device == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this device matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_DESCEND | DM_RET_COPY); for (i = 0; i < num_patterns; i++) { struct device_match_pattern *cur_pattern; struct scsi_vpd_device_id *device_id_page; /* * If the pattern in question isn't for a device node, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_DEVICE) { if ((patterns[i].type == DEV_MATCH_PERIPH) && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) retval |= DM_RET_DESCEND; continue; } cur_pattern = &patterns[i].pattern.device_pattern; /* Error out if mutually exclusive options are specified. */ if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) return(DM_RET_ERROR); /* * If they want to match any device node, we give them any * device node. */ if (cur_pattern->flags == DEV_MATCH_ANY) goto copy_dev_node; /* * Not sure why someone would do this... */ if (cur_pattern->flags == DEV_MATCH_NONE) continue; if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) && (cur_pattern->path_id != device->target->bus->path_id)) continue; if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) && (cur_pattern->target_id != device->target->target_id)) continue; if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) && (cur_pattern->target_lun != device->lun_id)) continue; if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) && (cam_quirkmatch((caddr_t)&device->inq_data, (caddr_t)&cur_pattern->data.inq_pat, 1, sizeof(cur_pattern->data.inq_pat), scsi_static_inquiry_match) == NULL)) continue; device_id_page = (struct scsi_vpd_device_id *)device->device_id; if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN || scsi_devid_match((uint8_t *)device_id_page->desc_list, device->device_id_len - SVPD_DEVICE_ID_HDR_LEN, cur_pattern->data.devid_pat.id, cur_pattern->data.devid_pat.id_len) != 0)) continue; copy_dev_node: /* * If we get to this point, the user definitely wants * information on this device. So tell the caller to copy * the data out. */ retval |= DM_RET_COPY; /* * If the return action has been set to descend, then we * know that we've already seen a peripheral matching * expression, therefore we need to further descend the tree. * This won't change by continuing around the loop, so we * go ahead and return. If we haven't seen a peripheral * matching expression, we keep going around the loop until * we exhaust the matching expressions. We'll set the stop * flag once we fall out of the loop. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) return(retval); } /* * If the return action hasn't been set to descend yet, that means * we haven't seen any peripheral matching patterns. So tell the * caller to stop descending the tree -- the user doesn't want to * match against lower level tree elements. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) retval |= DM_RET_STOP; return(retval); } /* * Match a single peripheral against any number of match patterns. */ static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, struct cam_periph *periph) { dev_match_ret retval; u_int i; /* * If we aren't given something to match against, that's an error. */ if (periph == NULL) return(DM_RET_ERROR); /* * If there are no match entries, then this peripheral matches no * matter what. */ if ((patterns == NULL) || (num_patterns == 0)) return(DM_RET_STOP | DM_RET_COPY); /* * There aren't any nodes below a peripheral node, so there's no * reason to descend the tree any further. */ retval = DM_RET_STOP; for (i = 0; i < num_patterns; i++) { struct periph_match_pattern *cur_pattern; /* * If the pattern in question isn't for a peripheral, we * aren't interested. */ if (patterns[i].type != DEV_MATCH_PERIPH) continue; cur_pattern = &patterns[i].pattern.periph_pattern; /* * If they want to match on anything, then we will do so. */ if (cur_pattern->flags == PERIPH_MATCH_ANY) { /* set the copy flag */ retval |= DM_RET_COPY; /* * We've already set the return action to stop, * since there are no nodes below peripherals in * the tree. */ return(retval); } /* * Not sure why someone would do this... */ if (cur_pattern->flags == PERIPH_MATCH_NONE) continue; if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) && (cur_pattern->path_id != periph->path->bus->path_id)) continue; /* * For the target and lun id's, we have to make sure the * target and lun pointers aren't NULL. The xpt peripheral * has a wildcard target and device. */ if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) && ((periph->path->target == NULL) ||(cur_pattern->target_id != periph->path->target->target_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) && ((periph->path->device == NULL) || (cur_pattern->target_lun != periph->path->device->lun_id))) continue; if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) && (cur_pattern->unit_number != periph->unit_number)) continue; if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) && (strncmp(cur_pattern->periph_name, periph->periph_name, DEV_IDLEN) != 0)) continue; /* * If we get to this point, the user definitely wants * information on this peripheral. So tell the caller to * copy the data out. */ retval |= DM_RET_COPY; /* * The return action has already been set to stop, since * peripherals don't have any nodes below them in the EDT. */ return(retval); } /* * If we get to this point, the peripheral that was passed in * doesn't match any of the patterns. */ return(retval); } static int xptedtbusfunc(struct cam_eb *bus, void *arg) { struct ccb_dev_match *cdm; struct cam_et *target; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) retval = DM_RET_DESCEND; else retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); /* * If we got an error, bail out of the search. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this bus out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; cdm->pos.cookie.bus = bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_BUS; cdm->matches[j].result.bus_result.path_id = bus->path_id; cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; cdm->matches[j].result.bus_result.unit_number = bus->sim->unit_number; strncpy(cdm->matches[j].result.bus_result.dev_name, bus->sim->sim_name, DEV_IDLEN); } /* * If the user is only interested in busses, there's no * reason to descend to the next level in the tree. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a target generation recorded, check it to * make sure the target list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target != NULL)) { if ((cdm->pos.generations[CAM_TARGET_GENERATION] != bus->generation)) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return (0); } target = (struct cam_et *)cdm->pos.cookie.target; target->refcount++; } else target = NULL; mtx_unlock(&bus->eb_mtx); return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); } static int xptedttargetfunc(struct cam_et *target, void *arg) { struct ccb_dev_match *cdm; struct cam_eb *bus; struct cam_ed *device; cdm = (struct ccb_dev_match *)arg; bus = target->bus; /* * If there is a device list generation recorded, check it to * make sure the device list hasn't changed. */ mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device != NULL)) { if (cdm->pos.generations[CAM_DEV_GENERATION] != target->generation) { mtx_unlock(&bus->eb_mtx); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } device = (struct cam_ed *)cdm->pos.cookie.device; device->refcount++; } else device = NULL; mtx_unlock(&bus->eb_mtx); return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); } static int xptedtdevicefunc(struct cam_ed *device, void *arg) { struct cam_eb *bus; struct cam_periph *periph; struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; bus = device->target->bus; /* * If our position is for something deeper in the tree, that means * that we've already seen this node. So, we keep going down. */ if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) retval = DM_RET_DESCEND; else retval = xptdevicematch(cdm->patterns, cdm->num_patterns, device); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this device out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; cdm->pos.cookie.bus = device->target->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = device->target; cdm->pos.generations[CAM_TARGET_GENERATION] = device->target->bus->generation; cdm->pos.cookie.device = device; cdm->pos.generations[CAM_DEV_GENERATION] = device->target->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_DEVICE; cdm->matches[j].result.device_result.path_id = device->target->bus->path_id; cdm->matches[j].result.device_result.target_id = device->target->target_id; cdm->matches[j].result.device_result.target_lun = device->lun_id; cdm->matches[j].result.device_result.protocol = device->protocol; bcopy(&device->inq_data, &cdm->matches[j].result.device_result.inq_data, sizeof(struct scsi_inquiry_data)); bcopy(&device->ident_data, &cdm->matches[j].result.device_result.ident_data, sizeof(struct ata_params)); /* Let the user know whether this device is unconfigured */ if (device->flags & CAM_DEV_UNCONFIGURED) cdm->matches[j].result.device_result.flags = DEV_RESULT_UNCONFIGURED; else cdm->matches[j].result.device_result.flags = DEV_RESULT_NOFLAG; } /* * If the user isn't interested in peripherals, don't descend * the tree any further. */ if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) return(1); /* * If there is a peripheral list generation recorded, make sure * it hasn't changed. */ xpt_lock_buses(); mtx_lock(&bus->eb_mtx); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus == bus) && (cdm->pos.position_type & CAM_DEV_POS_TARGET) && (cdm->pos.cookie.target == device->target) && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) && (cdm->pos.cookie.device == device) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != device->generation) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); } static int xptedtperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | CAM_DEV_POS_PERIPH; cdm->pos.cookie.bus = periph->path->bus; cdm->pos.generations[CAM_BUS_GENERATION]= xsoftc.bus_generation; cdm->pos.cookie.target = periph->path->target; cdm->pos.generations[CAM_TARGET_GENERATION] = periph->path->bus->generation; cdm->pos.cookie.device = periph->path->device; cdm->pos.generations[CAM_DEV_GENERATION] = periph->path->target->generation; cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = periph->path->device->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptedtmatch(struct ccb_dev_match *cdm) { struct cam_eb *bus; int ret; cdm->num_matches = 0; /* * Check the bus list generation. If it has changed, the user * needs to reset everything and start over. */ xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_BUS) && (cdm->pos.cookie.bus != NULL)) { if (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } bus = (struct cam_eb *)cdm->pos.cookie.bus; bus->refcount++; } else bus = NULL; xpt_unlock_buses(); ret = xptbustraverse(bus, xptedtbusfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the EDT. It also means that one of the subroutines * has set the status field to the proper value. If we get back 1, * we've fully traversed the EDT and copied out any matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) { struct cam_periph *periph; struct ccb_dev_match *cdm; cdm = (struct ccb_dev_match *)arg; xpt_lock_buses(); if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv == pdrv) && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) && (cdm->pos.cookie.periph != NULL)) { if (cdm->pos.generations[CAM_PERIPH_GENERATION] != (*pdrv)->generation) { xpt_unlock_buses(); cdm->status = CAM_DEV_MATCH_LIST_CHANGED; return(0); } periph = (struct cam_periph *)cdm->pos.cookie.periph; periph->refcount++; } else periph = NULL; xpt_unlock_buses(); return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); } static int xptplistperiphfunc(struct cam_periph *periph, void *arg) { struct ccb_dev_match *cdm; dev_match_ret retval; cdm = (struct ccb_dev_match *)arg; retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } /* * If the copy flag is set, copy this peripheral out. */ if (retval & DM_RET_COPY) { int spaceleft, j; spaceleft = cdm->match_buf_len - (cdm->num_matches * sizeof(struct dev_match_result)); /* * If we don't have enough space to put in another * match result, save our position and tell the * user there are more devices to check. */ if (spaceleft < sizeof(struct dev_match_result)) { struct periph_driver **pdrv; pdrv = NULL; bzero(&cdm->pos, sizeof(cdm->pos)); cdm->pos.position_type = CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | CAM_DEV_POS_PERIPH; /* * This may look a bit non-sensical, but it is * actually quite logical. There are very few * peripheral drivers, and bloating every peripheral * structure with a pointer back to its parent * peripheral driver linker set entry would cost * more in the long run than doing this quick lookup. */ for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { if (strcmp((*pdrv)->driver_name, periph->periph_name) == 0) break; } if (*pdrv == NULL) { cdm->status = CAM_DEV_MATCH_ERROR; return(0); } cdm->pos.cookie.pdrv = pdrv; /* * The periph generation slot does double duty, as * does the periph pointer slot. They are used for * both edt and pdrv lookups and positioning. */ cdm->pos.cookie.periph = periph; cdm->pos.generations[CAM_PERIPH_GENERATION] = (*pdrv)->generation; cdm->status = CAM_DEV_MATCH_MORE; return(0); } j = cdm->num_matches; cdm->num_matches++; cdm->matches[j].type = DEV_MATCH_PERIPH; cdm->matches[j].result.periph_result.path_id = periph->path->bus->path_id; /* * The transport layer peripheral doesn't have a target or * lun. */ if (periph->path->target) cdm->matches[j].result.periph_result.target_id = periph->path->target->target_id; else cdm->matches[j].result.periph_result.target_id = CAM_TARGET_WILDCARD; if (periph->path->device) cdm->matches[j].result.periph_result.target_lun = periph->path->device->lun_id; else cdm->matches[j].result.periph_result.target_lun = CAM_LUN_WILDCARD; cdm->matches[j].result.periph_result.unit_number = periph->unit_number; strncpy(cdm->matches[j].result.periph_result.periph_name, periph->periph_name, DEV_IDLEN); } return(1); } static int xptperiphlistmatch(struct ccb_dev_match *cdm) { int ret; cdm->num_matches = 0; /* * At this point in the edt traversal function, we check the bus * list generation to make sure that no busses have been added or * removed since the user last sent a XPT_DEV_MATCH ccb through. * For the peripheral driver list traversal function, however, we * don't have to worry about new peripheral driver types coming or * going; they're in a linker set, and therefore can't change * without a recompile. */ if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) && (cdm->pos.cookie.pdrv != NULL)) ret = xptpdrvtraverse( (struct periph_driver **)cdm->pos.cookie.pdrv, xptplistpdrvfunc, cdm); else ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); /* * If we get back 0, that means that we had to stop before fully * traversing the peripheral driver tree. It also means that one of * the subroutines has set the status field to the proper value. If * we get back 1, we've fully traversed the EDT and copied out any * matching entries. */ if (ret == 1) cdm->status = CAM_DEV_MATCH_LAST; return(ret); } static int xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) { struct cam_eb *bus, *next_bus; int retval; retval = 1; if (start_bus) bus = start_bus; else { xpt_lock_buses(); bus = TAILQ_FIRST(&xsoftc.xpt_busses); if (bus == NULL) { xpt_unlock_buses(); return (retval); } bus->refcount++; xpt_unlock_buses(); } for (; bus != NULL; bus = next_bus) { retval = tr_func(bus, arg); if (retval == 0) { xpt_release_bus(bus); break; } xpt_lock_buses(); next_bus = TAILQ_NEXT(bus, links); if (next_bus) next_bus->refcount++; xpt_unlock_buses(); xpt_release_bus(bus); } return(retval); } static int xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, xpt_targetfunc_t *tr_func, void *arg) { struct cam_et *target, *next_target; int retval; retval = 1; if (start_target) target = start_target; else { mtx_lock(&bus->eb_mtx); target = TAILQ_FIRST(&bus->et_entries); if (target == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } target->refcount++; mtx_unlock(&bus->eb_mtx); } for (; target != NULL; target = next_target) { retval = tr_func(target, arg); if (retval == 0) { xpt_release_target(target); break; } mtx_lock(&bus->eb_mtx); next_target = TAILQ_NEXT(target, links); if (next_target) next_target->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_target(target); } return(retval); } static int xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, xpt_devicefunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_ed *device, *next_device; int retval; retval = 1; bus = target->bus; if (start_device) device = start_device; else { mtx_lock(&bus->eb_mtx); device = TAILQ_FIRST(&target->ed_entries); if (device == NULL) { mtx_unlock(&bus->eb_mtx); return (retval); } device->refcount++; mtx_unlock(&bus->eb_mtx); } for (; device != NULL; device = next_device) { mtx_lock(&device->device_mtx); retval = tr_func(device, arg); mtx_unlock(&device->device_mtx); if (retval == 0) { xpt_release_device(device); break; } mtx_lock(&bus->eb_mtx); next_device = TAILQ_NEXT(device, links); if (next_device) next_device->refcount++; mtx_unlock(&bus->eb_mtx); xpt_release_device(device); } return(retval); } static int xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_eb *bus; struct cam_periph *periph, *next_periph; int retval; retval = 1; bus = device->target->bus; if (start_periph) periph = start_periph; else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); periph = SLIST_FIRST(&device->periphs); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = SLIST_NEXT(periph, periph_links); if (periph == NULL) { mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); return (retval); } periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { retval = tr_func(periph, arg); if (retval == 0) { cam_periph_release_locked(periph); break; } xpt_lock_buses(); mtx_lock(&bus->eb_mtx); next_periph = SLIST_NEXT(periph, periph_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = SLIST_NEXT(next_periph, periph_links); if (next_periph) next_periph->refcount++; mtx_unlock(&bus->eb_mtx); xpt_unlock_buses(); cam_periph_release_locked(periph); } return(retval); } static int xptpdrvtraverse(struct periph_driver **start_pdrv, xpt_pdrvfunc_t *tr_func, void *arg) { struct periph_driver **pdrv; int retval; retval = 1; /* * We don't traverse the peripheral driver list like we do the * other lists, because it is a linker set, and therefore cannot be * changed during runtime. If the peripheral driver list is ever * re-done to be something other than a linker set (i.e. it can * change while the system is running), the list traversal should * be modified to work like the other traversal functions. */ for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); *pdrv != NULL; pdrv++) { retval = tr_func(pdrv, arg); if (retval == 0) return(retval); } return(retval); } static int xptpdperiphtraverse(struct periph_driver **pdrv, struct cam_periph *start_periph, xpt_periphfunc_t *tr_func, void *arg) { struct cam_periph *periph, *next_periph; int retval; retval = 1; if (start_periph) periph = start_periph; else { xpt_lock_buses(); periph = TAILQ_FIRST(&(*pdrv)->units); while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) periph = TAILQ_NEXT(periph, unit_links); if (periph == NULL) { xpt_unlock_buses(); return (retval); } periph->refcount++; xpt_unlock_buses(); } for (; periph != NULL; periph = next_periph) { cam_periph_lock(periph); retval = tr_func(periph, arg); cam_periph_unlock(periph); if (retval == 0) { cam_periph_release(periph); break; } xpt_lock_buses(); next_periph = TAILQ_NEXT(periph, unit_links); while (next_periph != NULL && (next_periph->flags & CAM_PERIPH_FREE) != 0) next_periph = TAILQ_NEXT(next_periph, unit_links); if (next_periph) next_periph->refcount++; xpt_unlock_buses(); cam_periph_release(periph); } return(retval); } static int xptdefbusfunc(struct cam_eb *bus, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_BUS) { xpt_busfunc_t *tr_func; tr_func = (xpt_busfunc_t *)tr_config->tr_func; return(tr_func(bus, tr_config->tr_arg)); } else return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); } static int xptdeftargetfunc(struct cam_et *target, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_TARGET) { xpt_targetfunc_t *tr_func; tr_func = (xpt_targetfunc_t *)tr_config->tr_func; return(tr_func(target, tr_config->tr_arg)); } else return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); } static int xptdefdevicefunc(struct cam_ed *device, void *arg) { struct xpt_traverse_config *tr_config; tr_config = (struct xpt_traverse_config *)arg; if (tr_config->depth == XPT_DEPTH_DEVICE) { xpt_devicefunc_t *tr_func; tr_func = (xpt_devicefunc_t *)tr_config->tr_func; return(tr_func(device, tr_config->tr_arg)); } else return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); } static int xptdefperiphfunc(struct cam_periph *periph, void *arg) { struct xpt_traverse_config *tr_config; xpt_periphfunc_t *tr_func; tr_config = (struct xpt_traverse_config *)arg; tr_func = (xpt_periphfunc_t *)tr_config->tr_func; /* * Unlike the other default functions, we don't check for depth * here. The peripheral driver level is the last level in the EDT, * so if we're here, we should execute the function in question. */ return(tr_func(periph, tr_config->tr_arg)); } /* * Execute the given function for every bus in the EDT. */ static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_BUS; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } /* * Execute the given function for every device in the EDT. */ static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) { struct xpt_traverse_config tr_config; tr_config.depth = XPT_DEPTH_DEVICE; tr_config.tr_func = tr_func; tr_config.tr_arg = arg; return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); } static int xptsetasyncfunc(struct cam_ed *device, void *arg) { struct cam_path path; struct ccb_getdev cgd; struct ccb_setasync *csa = (struct ccb_setasync *)arg; /* * Don't report unconfigured devices (Wildcard devs, * devices only for target mode, device instances * that have been invalidated but are waiting for * their last reference count to be released). */ if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) return (1); xpt_compile_path(&path, NULL, device->target->bus->path_id, device->target->target_id, device->lun_id); xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); csa->callback(csa->callback_arg, AC_FOUND_DEVICE, &path, &cgd); xpt_release_path(&path); return(1); } static int xptsetasyncbusfunc(struct cam_eb *bus, void *arg) { struct cam_path path; struct ccb_pathinq cpi; struct ccb_setasync *csa = (struct ccb_setasync *)arg; xpt_compile_path(&path, /*periph*/NULL, bus->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); xpt_path_lock(&path); xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); csa->callback(csa->callback_arg, AC_PATH_REGISTERED, &path, &cpi); xpt_path_unlock(&path); xpt_release_path(&path); return(1); } void xpt_action(union ccb *start_ccb) { CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action: func=%#x\n", start_ccb->ccb_h.func_code)); start_ccb->ccb_h.status = CAM_REQ_INPROG; (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb); } void xpt_action_default(union ccb *start_ccb) { struct cam_path *path; struct cam_sim *sim; int lock; path = start_ccb->ccb_h.path; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default: func=%#x\n", start_ccb->ccb_h.func_code)); switch (start_ccb->ccb_h.func_code) { case XPT_SCSI_IO: { struct cam_ed *device; /* * For the sake of compatibility with SCSI-1 * devices that may not understand the identify * message, we include lun information in the * second byte of all commands. SCSI-1 specifies * that luns are a 3 bit value and reserves only 3 * bits for lun information in the CDB. Later * revisions of the SCSI spec allow for more than 8 * luns, but have deprecated lun information in the * CDB. So, if the lun won't fit, we must omit. * * Also be aware that during initial probing for devices, * the inquiry information is unknown but initialized to 0. * This means that this code will be exercised while probing * devices with an ANSI revision greater than 2. */ device = path->device; if (device->protocol_version <= SCSI_REV_2 && start_ccb->ccb_h.target_lun < 8 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { start_ccb->csio.cdb_io.cdb_bytes[1] |= start_ccb->ccb_h.target_lun << 5; } start_ccb->csio.scsi_status = SCSI_STATUS_OK; } /* FALLTHROUGH */ case XPT_TARGET_IO: case XPT_CONT_TARGET_IO: start_ccb->csio.sense_resid = 0; start_ccb->csio.resid = 0; /* FALLTHROUGH */ case XPT_ATA_IO: if (start_ccb->ccb_h.func_code == XPT_ATA_IO) start_ccb->ataio.resid = 0; /* FALLTHROUGH */ case XPT_RESET_DEV: case XPT_ENG_EXEC: case XPT_SMP_IO: { struct cam_devq *devq; devq = path->bus->sim->devq; mtx_lock(&devq->send_mtx); cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); if (xpt_schedule_devq(devq, path->device) != 0) xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); break; } case XPT_CALC_GEOMETRY: /* Filter out garbage */ if (start_ccb->ccg.block_size == 0 || start_ccb->ccg.volume_size == 0) { start_ccb->ccg.cylinders = 0; start_ccb->ccg.heads = 0; start_ccb->ccg.secs_per_track = 0; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #if defined(PC98) || defined(__sparc64__) /* * In a PC-98 system, geometry translation depens on * the "real" device geometry obtained from mode page 4. * SCSI geometry translation is performed in the * initialization routine of the SCSI BIOS and the result * stored in host memory. If the translation is available * in host memory, use it. If not, rely on the default * translation the device driver performs. * For sparc64, we may need adjust the geometry of large * disks in order to fit the limitations of the 16-bit * fields of the VTOC8 disk label. */ if (scsi_da_bios_params(&start_ccb->ccg) != 0) { start_ccb->ccb_h.status = CAM_REQ_CMP; break; } #endif goto call_sim; case XPT_ABORT: { union ccb* abort_ccb; abort_ccb = start_ccb->cab.abort_ccb; if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { if (abort_ccb->ccb_h.pinfo.index >= 0) { struct cam_ccbq *ccbq; struct cam_ed *device; device = abort_ccb->ccb_h.path->device; ccbq = &device->ccbq; cam_ccbq_remove_ccb(ccbq, abort_ccb); abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); xpt_done(abort_ccb); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { /* * We've caught this ccb en route to * the SIM. Flag it for abort and the * SIM will do so just before starting * real work on the CCB. */ abort_ccb->ccb_h.status = CAM_REQ_ABORTED|CAM_DEV_QFRZN; xpt_freeze_devq(abort_ccb->ccb_h.path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; } } if (XPT_FC_IS_QUEUED(abort_ccb) && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { /* * It's already completed but waiting * for our SWI to get to it. */ start_ccb->ccb_h.status = CAM_UA_ABORT; break; } /* * If we weren't able to take care of the abort request * in the XPT, pass the request down to the SIM for processing. */ } /* FALLTHROUGH */ case XPT_ACCEPT_TARGET_IO: case XPT_EN_LUN: case XPT_IMMED_NOTIFY: case XPT_NOTIFY_ACK: case XPT_RESET_BUS: case XPT_IMMEDIATE_NOTIFY: case XPT_NOTIFY_ACKNOWLEDGE: case XPT_GET_SIM_KNOB_OLD: case XPT_GET_SIM_KNOB: case XPT_SET_SIM_KNOB: case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: case XPT_PATH_INQ: call_sim: sim = path->bus->sim; lock = (mtx_owned(sim->mtx) == 0); if (lock) CAM_SIM_LOCK(sim); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); (*(sim->sim_action))(sim, start_ccb); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); if (lock) CAM_SIM_UNLOCK(sim); break; case XPT_PATH_STATS: start_ccb->cpis.last_reset = path->bus->last_reset; start_ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GDEV_TYPE: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdev *cgd; cgd = &start_ccb->cgd; cgd->protocol = dev->protocol; cgd->inq_data = dev->inq_data; cgd->ident_data = dev->ident_data; cgd->inq_flags = dev->inq_flags; cgd->ccb_h.status = CAM_REQ_CMP; cgd->serial_num_len = dev->serial_num_len; if ((dev->serial_num_len > 0) && (dev->serial_num != NULL)) bcopy(dev->serial_num, cgd->serial_num, dev->serial_num_len); } break; } case XPT_GDEV_STATS: { struct cam_ed *dev; dev = path->device; if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else { struct ccb_getdevstats *cgds; struct cam_eb *bus; struct cam_et *tar; struct cam_devq *devq; cgds = &start_ccb->cgds; bus = path->bus; tar = path->target; devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cgds->dev_openings = dev->ccbq.dev_openings; cgds->dev_active = dev->ccbq.dev_active; cgds->allocated = dev->ccbq.allocated; cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; cgds->last_reset = tar->last_reset; cgds->maxtags = dev->maxtags; cgds->mintags = dev->mintags; if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) cgds->last_reset = bus->last_reset; mtx_unlock(&devq->send_mtx); cgds->ccb_h.status = CAM_REQ_CMP; } break; } case XPT_GDEVLIST: { struct cam_periph *nperiph; struct periph_list *periph_head; struct ccb_getdevlist *cgdl; u_int i; struct cam_ed *device; int found; found = 0; /* * Don't want anyone mucking with our data. */ device = path->device; periph_head = &device->periphs; cgdl = &start_ccb->cgdl; /* * Check and see if the list has changed since the user * last requested a list member. If so, tell them that the * list has changed, and therefore they need to start over * from the beginning. */ if ((cgdl->index != 0) && (cgdl->generation != device->generation)) { cgdl->status = CAM_GDEVLIST_LIST_CHANGED; break; } /* * Traverse the list of peripherals and attempt to find * the requested peripheral. */ for (nperiph = SLIST_FIRST(periph_head), i = 0; (nperiph != NULL) && (i <= cgdl->index); nperiph = SLIST_NEXT(nperiph, periph_links), i++) { if (i == cgdl->index) { strncpy(cgdl->periph_name, nperiph->periph_name, DEV_IDLEN); cgdl->unit_number = nperiph->unit_number; found = 1; } } if (found == 0) { cgdl->status = CAM_GDEVLIST_ERROR; break; } if (nperiph == NULL) cgdl->status = CAM_GDEVLIST_LAST_DEVICE; else cgdl->status = CAM_GDEVLIST_MORE_DEVS; cgdl->index++; cgdl->generation = device->generation; cgdl->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEV_MATCH: { dev_pos_type position_type; struct ccb_dev_match *cdm; cdm = &start_ccb->cdm; /* * There are two ways of getting at information in the EDT. * The first way is via the primary EDT tree. It starts * with a list of busses, then a list of targets on a bus, * then devices/luns on a target, and then peripherals on a * device/lun. The "other" way is by the peripheral driver * lists. The peripheral driver lists are organized by * peripheral driver. (obviously) So it makes sense to * use the peripheral driver list if the user is looking * for something like "da1", or all "da" devices. If the * user is looking for something on a particular bus/target * or lun, it's generally better to go through the EDT tree. */ if (cdm->pos.position_type != CAM_DEV_POS_NONE) position_type = cdm->pos.position_type; else { u_int i; position_type = CAM_DEV_POS_NONE; for (i = 0; i < cdm->num_patterns; i++) { if ((cdm->patterns[i].type == DEV_MATCH_BUS) ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ position_type = CAM_DEV_POS_EDT; break; } } if (cdm->num_patterns == 0) position_type = CAM_DEV_POS_EDT; else if (position_type == CAM_DEV_POS_NONE) position_type = CAM_DEV_POS_PDRV; } switch(position_type & CAM_DEV_POS_TYPEMASK) { case CAM_DEV_POS_EDT: xptedtmatch(cdm); break; case CAM_DEV_POS_PDRV: xptperiphlistmatch(cdm); break; default: cdm->status = CAM_DEV_MATCH_ERROR; break; } if (cdm->status == CAM_DEV_MATCH_ERROR) start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_SASYNC_CB: { struct ccb_setasync *csa; struct async_node *cur_entry; struct async_list *async_head; u_int32_t added; csa = &start_ccb->csa; added = csa->event_enable; async_head = &path->device->asyncs; /* * If there is already an entry for us, simply * update it. */ cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { if ((cur_entry->callback_arg == csa->callback_arg) && (cur_entry->callback == csa->callback)) break; cur_entry = SLIST_NEXT(cur_entry, links); } if (cur_entry != NULL) { /* * If the request has no flags set, * remove the entry. */ added &= ~cur_entry->event_enable; if (csa->event_enable == 0) { SLIST_REMOVE(async_head, cur_entry, async_node, links); xpt_release_device(path->device); free(cur_entry, M_CAMXPT); } else { cur_entry->event_enable = csa->event_enable; } csa->event_enable = added; } else { cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, M_NOWAIT); if (cur_entry == NULL) { csa->ccb_h.status = CAM_RESRC_UNAVAIL; break; } cur_entry->event_enable = csa->event_enable; cur_entry->event_lock = mtx_owned(path->bus->sim->mtx) ? 1 : 0; cur_entry->callback_arg = csa->callback_arg; cur_entry->callback = csa->callback; SLIST_INSERT_HEAD(async_head, cur_entry, links); xpt_acquire_device(path->device); } start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_REL_SIMQ: { struct ccb_relsim *crs; struct cam_ed *dev; crs = &start_ccb->crs; dev = path->device; if (dev == NULL) { crs->ccb_h.status = CAM_DEV_NOT_THERE; break; } if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { /* Don't ever go below one opening */ if (crs->openings > 0) { xpt_dev_ccbq_resize(path, crs->openings); if (bootverbose) { xpt_print(path, "number of openings is now %d\n", crs->openings); } } } mtx_lock(&dev->sim->devq->send_mtx); if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { /* * Just extend the old timeout and decrement * the freeze count so that a single timeout * is sufficient for releasing the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; callout_stop(&dev->callout); } else { start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } callout_reset_sbt(&dev->callout, SBT_1MS * crs->release_timeout, 0, xpt_release_devq_timeout, dev, 0); dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; } if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { /* * Decrement the freeze count so that a single * completion is still sufficient to unfreeze * the queue. */ start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_COMPLETE; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 || (dev->ccbq.dev_active == 0)) { start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; } else { dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; } } mtx_unlock(&dev->sim->devq->send_mtx); if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_DEBUG: { struct cam_path *oldpath; /* Check that all request bits are supported. */ if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } cam_dflags = CAM_DEBUG_NONE; if (cam_dpath != NULL) { oldpath = cam_dpath; cam_dpath = NULL; xpt_free_path(oldpath); } if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, start_ccb->ccb_h.path_id, start_ccb->ccb_h.target_id, start_ccb->ccb_h.target_lun) != CAM_REQ_CMP) { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } else { cam_dflags = start_ccb->cdbg.flags; start_ccb->ccb_h.status = CAM_REQ_CMP; xpt_print(cam_dpath, "debugging flags now %x\n", cam_dflags); } } else start_ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_NOOP: if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) xpt_freeze_devq(path, 1); start_ccb->ccb_h.status = CAM_REQ_CMP; break; + case XPT_REPROBE_LUN: + xpt_async(AC_INQ_CHANGED, path, NULL); + start_ccb->ccb_h.status = CAM_REQ_CMP; + xpt_done(start_ccb); + break; default: case XPT_SDEV_TYPE: case XPT_TERM_IO: case XPT_ENG_INQ: /* XXX Implement */ printf("%s: CCB type %#x not supported\n", __func__, start_ccb->ccb_h.func_code); start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { xpt_done(start_ccb); } break; } } void xpt_polled_action(union ccb *start_ccb) { u_int32_t timeout; struct cam_sim *sim; struct cam_devq *devq; struct cam_ed *dev; timeout = start_ccb->ccb_h.timeout * 10; sim = start_ccb->ccb_h.path->bus->sim; devq = sim->devq; dev = start_ccb->ccb_h.path->device; mtx_unlock(&dev->device_mtx); /* * Steal an opening so that no other queued requests * can get it before us while we simulate interrupts. */ mtx_lock(&devq->send_mtx); dev->ccbq.dev_openings--; while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && (--timeout > 0)) { mtx_unlock(&devq->send_mtx); DELAY(100); CAM_SIM_LOCK(sim); (*(sim->sim_poll))(sim); CAM_SIM_UNLOCK(sim); camisr_runqueue(); mtx_lock(&devq->send_mtx); } dev->ccbq.dev_openings++; mtx_unlock(&devq->send_mtx); if (timeout != 0) { xpt_action(start_ccb); while(--timeout > 0) { CAM_SIM_LOCK(sim); (*(sim->sim_poll))(sim); CAM_SIM_UNLOCK(sim); camisr_runqueue(); if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; DELAY(100); } if (timeout == 0) { /* * XXX Is it worth adding a sim_timeout entry * point so we can attempt recovery? If * this is only used for dumps, I don't think * it is. */ start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; } } else { start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; } mtx_lock(&dev->device_mtx); } /* * Schedule a peripheral driver to receive a ccb when its * target device has space for more transactions. */ void xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) { CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); cam_periph_assert(periph, MA_OWNED); if (new_priority < periph->scheduled_priority) { periph->scheduled_priority = new_priority; xpt_run_allocq(periph, 0); } } /* * Schedule a device to run on a given queue. * If the device was inserted as a new entry on the queue, * return 1 meaning the device queue should be run. If we * were already queued, implying someone else has already * started the queue, return 0 so the caller doesn't attempt * to run the queue. */ static int xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, u_int32_t new_priority) { int retval; u_int32_t old_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); old_priority = pinfo->priority; /* * Are we already queued? */ if (pinfo->index != CAM_UNQUEUED_INDEX) { /* Simply reorder based on new priority */ if (new_priority < old_priority) { camq_change_priority(queue, pinfo->index, new_priority); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("changed priority to %d\n", new_priority)); retval = 1; } else retval = 0; } else { /* New entry on the queue */ if (new_priority < old_priority) pinfo->priority = new_priority; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("Inserting onto queue\n")); pinfo->generation = ++queue->generation; camq_insert(queue, pinfo); retval = 1; } return (retval); } static void xpt_run_allocq_task(void *context, int pending) { struct cam_periph *periph = context; cam_periph_lock(periph); periph->flags &= ~CAM_PERIPH_RUN_TASK; xpt_run_allocq(periph, 1); cam_periph_unlock(periph); cam_periph_release(periph); } static void xpt_run_allocq(struct cam_periph *periph, int sleep) { struct cam_ed *device; union ccb *ccb; uint32_t prio; cam_periph_assert(periph, MA_OWNED); if (periph->periph_allocating) return; periph->periph_allocating = 1; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); device = periph->path->device; ccb = NULL; restart: while ((prio = min(periph->scheduled_priority, periph->immediate_priority)) != CAM_PRIORITY_NONE && (periph->periph_allocated - (ccb != NULL ? 1 : 0) < device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { if (ccb == NULL && (ccb = xpt_get_ccb_nowait(periph)) == NULL) { if (sleep) { ccb = xpt_get_ccb(periph); goto restart; } if (periph->flags & CAM_PERIPH_RUN_TASK) break; cam_periph_doacquire(periph); periph->flags |= CAM_PERIPH_RUN_TASK; taskqueue_enqueue(xsoftc.xpt_taskq, &periph->periph_run_task); break; } xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); if (prio == periph->immediate_priority) { periph->immediate_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("waking cam_periph_getccb()\n")); SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, periph_links.sle); wakeup(&periph->ccb_list); } else { periph->scheduled_priority = CAM_PRIORITY_NONE; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("calling periph_start()\n")); periph->periph_start(periph, ccb); } ccb = NULL; } if (ccb != NULL) xpt_release_ccb(ccb); periph->periph_allocating = 0; } static void xpt_run_devq(struct cam_devq *devq) { char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; int lock; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); devq->send_queue.qfrozen_cnt++; while ((devq->send_queue.entries > 0) && (devq->send_openings > 0) && (devq->send_queue.qfrozen_cnt <= 1)) { struct cam_ed *device; union ccb *work_ccb; struct cam_sim *sim; device = (struct cam_ed *)camq_remove(&devq->send_queue, CAMQ_HEAD); CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("running device %p\n", device)); work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); if (work_ccb == NULL) { printf("device on run queue with no ccbs???\n"); continue; } if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { mtx_lock(&xsoftc.xpt_highpower_lock); if (xsoftc.num_highpower <= 0) { /* * We got a high power command, but we * don't have any available slots. Freeze * the device queue until we have a slot * available. */ xpt_freeze_devq_device(device, 1); STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); continue; } else { /* * Consume a high power slot while * this ccb runs. */ xsoftc.num_highpower--; } mtx_unlock(&xsoftc.xpt_highpower_lock); } cam_ccbq_remove_ccb(&device->ccbq, work_ccb); cam_ccbq_send_ccb(&device->ccbq, work_ccb); devq->send_openings--; devq->send_active++; xpt_schedule_devq(devq, device); mtx_unlock(&devq->send_mtx); if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { /* * The client wants to freeze the queue * after this CCB is sent. */ xpt_freeze_devq(work_ccb->ccb_h.path, 1); } /* In Target mode, the peripheral driver knows best... */ if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { if ((device->inq_flags & SID_CmdQue) != 0 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; else /* * Clear this in case of a retried CCB that * failed due to a rejected tag. */ work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; } switch (work_ccb->ccb_h.func_code) { case XPT_SCSI_IO: CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. CDB: %s\n", scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0], &device->inq_data), scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes, cdb_str, sizeof(cdb_str)))); break; case XPT_ATA_IO: CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_CDB,("%s. ACB: %s\n", ata_op_string(&work_ccb->ataio.cmd), ata_cmd_string(&work_ccb->ataio.cmd, cdb_str, sizeof(cdb_str)))); break; default: break; } /* * Device queues can be shared among multiple SIM instances * that reside on different busses. Use the SIM from the * queued device, rather than the one from the calling bus. */ sim = device->sim; lock = (mtx_owned(sim->mtx) == 0); if (lock) CAM_SIM_LOCK(sim); work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms (*(sim->sim_action))(sim, work_ccb); if (lock) CAM_SIM_UNLOCK(sim); mtx_lock(&devq->send_mtx); } devq->send_queue.qfrozen_cnt--; } /* * This function merges stuff from the slave ccb into the master ccb, while * keeping important fields in the master ccb constant. */ void xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) { /* * Pull fields that are valid for peripheral drivers to set * into the master CCB along with the CCB "payload". */ master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); } void xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority, u_int32_t flags) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); ccb_h->pinfo.priority = priority; ccb_h->path = path; ccb_h->path_id = path->bus->path_id; if (path->target) ccb_h->target_id = path->target->target_id; else ccb_h->target_id = CAM_TARGET_WILDCARD; if (path->device) { ccb_h->target_lun = path->device->lun_id; ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; } else { ccb_h->target_lun = CAM_TARGET_WILDCARD; } ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; ccb_h->flags = flags; ccb_h->xflags = 0; } void xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) { xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); } /* Path manipulation functions */ cam_status xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_path *path; cam_status status; path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (path == NULL) { status = CAM_RESRC_UNAVAIL; return(status); } status = xpt_compile_path(path, perph, path_id, target_id, lun_id); if (status != CAM_REQ_CMP) { free(path, M_CAMPATH); path = NULL; } *new_path_ptr = path; return (status); } cam_status xpt_create_path_unlocked(struct cam_path **new_path_ptr, struct cam_periph *periph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { return (xpt_create_path(new_path_ptr, periph, path_id, target_id, lun_id)); } cam_status xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, path_id_t path_id, target_id_t target_id, lun_id_t lun_id) { struct cam_eb *bus; struct cam_et *target; struct cam_ed *device; cam_status status; status = CAM_REQ_CMP; /* Completed without error */ target = NULL; /* Wildcarded */ device = NULL; /* Wildcarded */ /* * We will potentially modify the EDT, so block interrupts * that may attempt to create cam paths. */ bus = xpt_find_bus(path_id); if (bus == NULL) { status = CAM_PATH_INVALID; } else { xpt_lock_buses(); mtx_lock(&bus->eb_mtx); target = xpt_find_target(bus, target_id); if (target == NULL) { /* Create one */ struct cam_et *new_target; new_target = xpt_alloc_target(bus, target_id); if (new_target == NULL) { status = CAM_RESRC_UNAVAIL; } else { target = new_target; } } xpt_unlock_buses(); if (target != NULL) { device = xpt_find_device(target, lun_id); if (device == NULL) { /* Create one */ struct cam_ed *new_device; new_device = (*(bus->xport->alloc_device))(bus, target, lun_id); if (new_device == NULL) { status = CAM_RESRC_UNAVAIL; } else { device = new_device; } } } mtx_unlock(&bus->eb_mtx); } /* * Only touch the user's data if we are successful. */ if (status == CAM_REQ_CMP) { new_path->periph = perph; new_path->bus = bus; new_path->target = target; new_path->device = device; CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); } else { if (device != NULL) xpt_release_device(device); if (target != NULL) xpt_release_target(target); if (bus != NULL) xpt_release_bus(bus); } return (status); } cam_status xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) { struct cam_path *new_path; new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); if (new_path == NULL) return(CAM_RESRC_UNAVAIL); xpt_copy_path(new_path, path); *new_path_ptr = new_path; return (CAM_REQ_CMP); } void xpt_copy_path(struct cam_path *new_path, struct cam_path *path) { *new_path = *path; if (path->bus != NULL) xpt_acquire_bus(path->bus); if (path->target != NULL) xpt_acquire_target(path->target); if (path->device != NULL) xpt_acquire_device(path->device); } void xpt_release_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); if (path->device != NULL) { xpt_release_device(path->device); path->device = NULL; } if (path->target != NULL) { xpt_release_target(path->target); path->target = NULL; } if (path->bus != NULL) { xpt_release_bus(path->bus); path->bus = NULL; } } void xpt_free_path(struct cam_path *path) { CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); xpt_release_path(path); free(path, M_CAMPATH); } void xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) { xpt_lock_buses(); if (bus_ref) { if (path->bus) *bus_ref = path->bus->refcount; else *bus_ref = 0; } if (periph_ref) { if (path->periph) *periph_ref = path->periph->refcount; else *periph_ref = 0; } xpt_unlock_buses(); if (target_ref) { if (path->target) *target_ref = path->target->refcount; else *target_ref = 0; } if (device_ref) { if (path->device) *device_ref = path->device->refcount; else *device_ref = 0; } } /* * Return -1 for failure, 0 for exact match, 1 for match with wildcards * in path1, 2 for match with wildcards in path2. */ int xpt_path_comp(struct cam_path *path1, struct cam_path *path2) { int retval = 0; if (path1->bus != path2->bus) { if (path1->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (path2->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path1->target != path2->target) { if (path1->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path1->device != path2->device) { if (path1->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (path2->device->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } int xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) { int retval = 0; if (path->bus != dev->target->bus) { if (path->bus->path_id == CAM_BUS_WILDCARD) retval = 1; else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) retval = 2; else return (-1); } if (path->target != dev->target) { if (path->target->target_id == CAM_TARGET_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->target->target_id == CAM_TARGET_WILDCARD) retval = 2; else return (-1); } if (path->device != dev) { if (path->device->lun_id == CAM_LUN_WILDCARD) { if (retval == 0) retval = 1; } else if (dev->lun_id == CAM_LUN_WILDCARD) retval = 2; else return (-1); } return (retval); } void xpt_print_path(struct cam_path *path) { if (path == NULL) printf("(nopath): "); else { if (path->periph != NULL) printf("(%s%d:", path->periph->periph_name, path->periph->unit_number); else printf("(noperiph:"); if (path->bus != NULL) printf("%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else printf("nobus:"); if (path->target != NULL) printf("%d:", path->target->target_id); else printf("X:"); if (path->device != NULL) printf("%jx): ", (uintmax_t)path->device->lun_id); else printf("X): "); } } void xpt_print_device(struct cam_ed *device) { if (device == NULL) printf("(nopath): "); else { printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, device->sim->unit_number, device->sim->bus_id, device->target->target_id, (uintmax_t)device->lun_id); } } void xpt_print(struct cam_path *path, const char *fmt, ...) { va_list ap; xpt_print_path(path); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } int xpt_path_string(struct cam_path *path, char *str, size_t str_len) { struct sbuf sb; sbuf_new(&sb, str, str_len, 0); if (path == NULL) sbuf_printf(&sb, "(nopath): "); else { if (path->periph != NULL) sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, path->periph->unit_number); else sbuf_printf(&sb, "(noperiph:"); if (path->bus != NULL) sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, path->bus->sim->unit_number, path->bus->sim->bus_id); else sbuf_printf(&sb, "nobus:"); if (path->target != NULL) sbuf_printf(&sb, "%d:", path->target->target_id); else sbuf_printf(&sb, "X:"); if (path->device != NULL) sbuf_printf(&sb, "%jx): ", (uintmax_t)path->device->lun_id); else sbuf_printf(&sb, "X): "); } sbuf_finish(&sb); return(sbuf_len(&sb)); } path_id_t xpt_path_path_id(struct cam_path *path) { return(path->bus->path_id); } target_id_t xpt_path_target_id(struct cam_path *path) { if (path->target != NULL) return (path->target->target_id); else return (CAM_TARGET_WILDCARD); } lun_id_t xpt_path_lun_id(struct cam_path *path) { if (path->device != NULL) return (path->device->lun_id); else return (CAM_LUN_WILDCARD); } struct cam_sim * xpt_path_sim(struct cam_path *path) { return (path->bus->sim); } struct cam_periph* xpt_path_periph(struct cam_path *path) { return (path->periph); } /* * Release a CAM control block for the caller. Remit the cost of the structure * to the device referenced by the path. If the this device had no 'credits' * and peripheral drivers have registered async callbacks for this notification * call them now. */ void xpt_release_ccb(union ccb *free_ccb) { struct cam_ed *device; struct cam_periph *periph; CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); device = free_ccb->ccb_h.path->device; periph = free_ccb->ccb_h.path->periph; xpt_free_ccb(free_ccb); periph->periph_allocated--; cam_ccbq_release_opening(&device->ccbq); xpt_run_allocq(periph, 0); } /* Functions accessed by SIM drivers */ static struct xpt_xport xport_default = { .alloc_device = xpt_alloc_device_default, .action = xpt_action_default, .async = xpt_dev_async_default, }; /* * A sim structure, listing the SIM entry points and instance * identification info is passed to xpt_bus_register to hook the SIM * into the CAM framework. xpt_bus_register creates a cam_eb entry * for this new bus and places it in the array of busses and assigns * it a path_id. The path_id may be influenced by "hard wiring" * information specified by the user. Once interrupt services are * available, the bus will be probed. */ int32_t xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) { struct cam_eb *new_bus; struct cam_eb *old_bus; struct ccb_pathinq cpi; struct cam_path *path; cam_status status; mtx_assert(sim->mtx, MA_OWNED); sim->bus_id = bus; new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), M_CAMXPT, M_NOWAIT|M_ZERO); if (new_bus == NULL) { /* Couldn't satisfy request */ return (CAM_RESRC_UNAVAIL); } mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); TAILQ_INIT(&new_bus->et_entries); cam_sim_hold(sim); new_bus->sim = sim; timevalclear(&new_bus->last_reset); new_bus->flags = 0; new_bus->refcount = 1; /* Held until a bus_deregister event */ new_bus->generation = 0; xpt_lock_buses(); sim->path_id = new_bus->path_id = xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); while (old_bus != NULL && old_bus->path_id < new_bus->path_id) old_bus = TAILQ_NEXT(old_bus, links); if (old_bus != NULL) TAILQ_INSERT_BEFORE(old_bus, new_bus, links); else TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); /* * Set a default transport so that a PATH_INQ can be issued to * the SIM. This will then allow for probing and attaching of * a more appropriate transport. */ new_bus->xport = &xport_default; status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) { xpt_release_bus(new_bus); free(path, M_CAMXPT); return (CAM_RESRC_UNAVAIL); } xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP) { switch (cpi.transport) { case XPORT_SPI: case XPORT_SAS: case XPORT_FC: case XPORT_USB: case XPORT_ISCSI: case XPORT_SRP: case XPORT_PPB: new_bus->xport = scsi_get_xport(); break; case XPORT_ATA: case XPORT_SATA: new_bus->xport = ata_get_xport(); break; default: new_bus->xport = &xport_default; break; } } /* Notify interested parties */ if (sim->path_id != CAM_XPT_PATH_ID) { xpt_async(AC_PATH_REGISTERED, path, &cpi); if ((cpi.hba_misc & PIM_NOSCAN) == 0) { union ccb *scan_ccb; /* Initiate bus rescan. */ scan_ccb = xpt_alloc_ccb_nowait(); if (scan_ccb != NULL) { scan_ccb->ccb_h.path = path; scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; scan_ccb->crcn.flags = 0; xpt_rescan(scan_ccb); } else { xpt_print(path, "Can't allocate CCB to scan bus\n"); xpt_free_path(path); } } else xpt_free_path(path); } else xpt_free_path(path); return (CAM_SUCCESS); } int32_t xpt_bus_deregister(path_id_t pathid) { struct cam_path bus_path; cam_status status; status = xpt_compile_path(&bus_path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_async(AC_LOST_DEVICE, &bus_path, NULL); xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); /* Release the reference count held while registered. */ xpt_release_bus(bus_path.bus); xpt_release_path(&bus_path); return (CAM_REQ_CMP); } static path_id_t xptnextfreepathid(void) { struct cam_eb *bus; path_id_t pathid; const char *strval; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); pathid = 0; bus = TAILQ_FIRST(&xsoftc.xpt_busses); retry: /* Find an unoccupied pathid */ while (bus != NULL && bus->path_id <= pathid) { if (bus->path_id == pathid) pathid++; bus = TAILQ_NEXT(bus, links); } /* * Ensure that this pathid is not reserved for * a bus that may be registered in the future. */ if (resource_string_value("scbus", pathid, "at", &strval) == 0) { ++pathid; /* Start the search over */ goto retry; } return (pathid); } static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus) { path_id_t pathid; int i, dunit, val; char buf[32]; const char *dname; pathid = CAM_XPT_PATH_ID; snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) return (pathid); i = 0; while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { if (strcmp(dname, "scbus")) { /* Avoid a bit of foot shooting. */ continue; } if (dunit < 0) /* unwired?! */ continue; if (resource_int_value("scbus", dunit, "bus", &val) == 0) { if (sim_bus == val) { pathid = dunit; break; } } else if (sim_bus == 0) { /* Unspecified matches bus 0 */ pathid = dunit; break; } else { printf("Ambiguous scbus configuration for %s%d " "bus %d, cannot wire down. The kernel " "config entry for scbus%d should " "specify a controller bus.\n" "Scbus will be assigned dynamically.\n", sim_name, sim_unit, sim_bus, dunit); break; } } if (pathid == CAM_XPT_PATH_ID) pathid = xptnextfreepathid(); return (pathid); } static const char * xpt_async_string(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return ("AC_BUS_RESET"); case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); case AC_SCSI_AEN: return ("AC_SCSI_AEN"); case AC_SENT_BDR: return ("AC_SENT_BDR"); case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); case AC_CONTRACT: return ("AC_CONTRACT"); case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); } return ("AC_UNKNOWN"); } static int xpt_async_size(u_int32_t async_code) { switch (async_code) { case AC_BUS_RESET: return (0); case AC_UNSOL_RESEL: return (0); case AC_SCSI_AEN: return (0); case AC_SENT_BDR: return (0); case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); case AC_PATH_DEREGISTERED: return (0); case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); case AC_LOST_DEVICE: return (0); case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); case AC_INQ_CHANGED: return (0); case AC_GETDEV_CHANGED: return (0); case AC_CONTRACT: return (sizeof(struct ac_contract)); case AC_ADVINFO_CHANGED: return (-1); case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); } return (0); } static int xpt_async_process_dev(struct cam_ed *device, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; void *async_arg = ccb->casync.async_arg_ptr; u_int32_t async_code = ccb->casync.async_code; int relock; if (path->device != device && path->device->lun_id != CAM_LUN_WILDCARD && device->lun_id != CAM_LUN_WILDCARD) return (1); /* * The async callback could free the device. * If it is a broadcast async, it doesn't hold * device reference, so take our own reference. */ xpt_acquire_device(device); /* * If async for specific device is to be delivered to * the wildcard client, take the specific device lock. * XXX: We may need a way for client to specify it. */ if ((device->lun_id == CAM_LUN_WILDCARD && path->device->lun_id != CAM_LUN_WILDCARD) || (device->target->target_id == CAM_TARGET_WILDCARD && path->target->target_id != CAM_TARGET_WILDCARD) || (device->target->bus->path_id == CAM_BUS_WILDCARD && path->target->bus->path_id != CAM_BUS_WILDCARD)) { mtx_unlock(&device->device_mtx); xpt_path_lock(path); relock = 1; } else relock = 0; (*(device->target->bus->xport->async))(async_code, device->target->bus, device->target, device, async_arg); xpt_async_bcast(&device->asyncs, async_code, path, async_arg); if (relock) { xpt_path_unlock(path); mtx_lock(&device->device_mtx); } xpt_release_device(device); return (1); } static int xpt_async_process_tgt(struct cam_et *target, void *arg) { union ccb *ccb = arg; struct cam_path *path = ccb->ccb_h.path; if (path->target != target && path->target->target_id != CAM_TARGET_WILDCARD && target->target_id != CAM_TARGET_WILDCARD) return (1); if (ccb->casync.async_code == AC_SENT_BDR) { /* Update our notion of when the last reset occurred */ microtime(&target->last_reset); } return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); } static void xpt_async_process(struct cam_periph *periph, union ccb *ccb) { struct cam_eb *bus; struct cam_path *path; void *async_arg; u_int32_t async_code; path = ccb->ccb_h.path; async_code = ccb->casync.async_code; async_arg = ccb->casync.async_arg_ptr; CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, ("xpt_async(%s)\n", xpt_async_string(async_code))); bus = path->bus; if (async_code == AC_BUS_RESET) { /* Update our notion of when the last reset occurred */ microtime(&bus->last_reset); } xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); /* * If this wasn't a fully wildcarded async, tell all * clients that want all async events. */ if (bus != xpt_periph->path->bus) { xpt_path_lock(xpt_periph->path); xpt_async_process_dev(xpt_periph->path->device, ccb); xpt_path_unlock(xpt_periph->path); } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_release_devq(path, 1, TRUE); else xpt_release_simq(path->bus->sim, TRUE); if (ccb->casync.async_arg_size > 0) free(async_arg, M_CAMXPT); xpt_free_path(path); xpt_free_ccb(ccb); } static void xpt_async_bcast(struct async_list *async_head, u_int32_t async_code, struct cam_path *path, void *async_arg) { struct async_node *cur_entry; int lock; cur_entry = SLIST_FIRST(async_head); while (cur_entry != NULL) { struct async_node *next_entry; /* * Grab the next list entry before we call the current * entry's callback. This is because the callback function * can delete its async callback entry. */ next_entry = SLIST_NEXT(cur_entry, links); if ((cur_entry->event_enable & async_code) != 0) { lock = cur_entry->event_lock; if (lock) CAM_SIM_LOCK(path->device->sim); cur_entry->callback(cur_entry->callback_arg, async_code, path, async_arg); if (lock) CAM_SIM_UNLOCK(path->device->sim); } cur_entry = next_entry; } } void xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) { union ccb *ccb; int size; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { xpt_print(path, "Can't allocate CCB to send %s\n", xpt_async_string(async_code)); return; } if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { xpt_print(path, "Can't allocate path to send %s\n", xpt_async_string(async_code)); xpt_free_ccb(ccb); return; } ccb->ccb_h.path->periph = NULL; ccb->ccb_h.func_code = XPT_ASYNC; ccb->ccb_h.cbfcnp = xpt_async_process; ccb->ccb_h.flags |= CAM_UNLOCKED; ccb->casync.async_code = async_code; ccb->casync.async_arg_size = 0; size = xpt_async_size(async_code); if (size > 0 && async_arg != NULL) { ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); if (ccb->casync.async_arg_ptr == NULL) { xpt_print(path, "Can't allocate argument to send %s\n", xpt_async_string(async_code)); xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); return; } memcpy(ccb->casync.async_arg_ptr, async_arg, size); ccb->casync.async_arg_size = size; } else if (size < 0) { ccb->casync.async_arg_ptr = async_arg; ccb->casync.async_arg_size = size; } if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) xpt_freeze_devq(path, 1); else xpt_freeze_simq(path->bus->sim, 1); xpt_done(ccb); } static void xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, struct cam_ed *device, void *async_arg) { /* * We only need to handle events for real devices. */ if (target->target_id == CAM_TARGET_WILDCARD || device->lun_id == CAM_LUN_WILDCARD) return; printf("%s called\n", __func__); } static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_freeze_devq_device(%d) %u->%u\n", count, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); freeze = (dev->ccbq.queue.qfrozen_cnt += count); /* Remove frozen device from sendq. */ if (device_is_queued(dev)) camq_remove(&devq->send_queue, dev->devq_entry.index); return (freeze); } u_int32_t xpt_freeze_devq(struct cam_path *path, u_int count) { struct cam_ed *dev = path->device; struct cam_devq *devq; uint32_t freeze; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); freeze = xpt_freeze_devq_device(dev, count); mtx_unlock(&devq->send_mtx); return (freeze); } u_int32_t xpt_freeze_simq(struct cam_sim *sim, u_int count) { struct cam_devq *devq; uint32_t freeze; devq = sim->devq; mtx_lock(&devq->send_mtx); freeze = (devq->send_queue.qfrozen_cnt += count); mtx_unlock(&devq->send_mtx); return (freeze); } static void xpt_release_devq_timeout(void *arg) { struct cam_ed *dev; struct cam_devq *devq; dev = (struct cam_ed *)arg; CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); devq = dev->sim->devq; mtx_assert(&devq->send_mtx, MA_OWNED); if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) xpt_run_devq(devq); } void xpt_release_devq(struct cam_path *path, u_int count, int run_queue) { struct cam_ed *dev; struct cam_devq *devq; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", count, run_queue)); dev = path->device; devq = dev->sim->devq; mtx_lock(&devq->send_mtx); if (xpt_release_devq_device(dev, count, run_queue)) xpt_run_devq(dev->sim->devq); mtx_unlock(&devq->send_mtx); } static int xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) { mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); if (count > dev->ccbq.queue.qfrozen_cnt) { #ifdef INVARIANTS printf("xpt_release_devq(): requested %u > present %u\n", count, dev->ccbq.queue.qfrozen_cnt); #endif count = dev->ccbq.queue.qfrozen_cnt; } dev->ccbq.queue.qfrozen_cnt -= count; if (dev->ccbq.queue.qfrozen_cnt == 0) { /* * No longer need to wait for a successful * command completion. */ dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; /* * Remove any timeouts that might be scheduled * to release this queue. */ if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { callout_stop(&dev->callout); dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; } /* * Now that we are unfrozen schedule the * device so any pending transactions are * run. */ xpt_schedule_devq(dev->sim->devq, dev); } else run_queue = 0; return (run_queue); } void xpt_release_simq(struct cam_sim *sim, int run_queue) { struct cam_devq *devq; devq = sim->devq; mtx_lock(&devq->send_mtx); if (devq->send_queue.qfrozen_cnt <= 0) { #ifdef INVARIANTS printf("xpt_release_simq: requested 1 > present %u\n", devq->send_queue.qfrozen_cnt); #endif } else devq->send_queue.qfrozen_cnt--; if (devq->send_queue.qfrozen_cnt == 0) { /* * If there is a timeout scheduled to release this * sim queue, remove it. The queue frozen count is * already at 0. */ if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ callout_stop(&sim->callout); sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; } if (run_queue) { /* * Now that we are unfrozen run the send queue. */ xpt_run_devq(sim->devq); } } mtx_unlock(&devq->send_mtx); } /* * XXX Appears to be unused. */ static void xpt_release_simq_timeout(void *arg) { struct cam_sim *sim; sim = (struct cam_sim *)arg; xpt_release_simq(sim, /* run_queue */ TRUE); } void xpt_done(union ccb *done_ccb) { struct cam_doneq *queue; int run, hash; CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + done_ccb->ccb_h.target_lun) % cam_num_doneqs; queue = &cam_doneqs[hash]; mtx_lock(&queue->cam_doneq_mtx); run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; mtx_unlock(&queue->cam_doneq_mtx); if (run) wakeup(&queue->cam_doneq); } void xpt_done_direct(union ccb *done_ccb) { CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done_direct\n")); if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) return; /* Store the time the ccb was in the sim */ done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; xpt_done_process(&done_ccb->ccb_h); } union ccb * xpt_alloc_ccb() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); return (new_ccb); } union ccb * xpt_alloc_ccb_nowait() { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); return (new_ccb); } void xpt_free_ccb(union ccb *free_ccb) { free(free_ccb, M_CAMCCB); } /* Private XPT functions */ /* * Get a CAM control block for the caller. Charge the structure to the device * referenced by the path. If we don't have sufficient resources to allocate * more ccbs, we return NULL. */ static union ccb * xpt_get_ccb_nowait(struct cam_periph *periph) { union ccb *new_ccb; new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); if (new_ccb == NULL) return (NULL); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } static union ccb * xpt_get_ccb(struct cam_periph *periph) { union ccb *new_ccb; cam_periph_unlock(periph); new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); cam_periph_lock(periph); periph->periph_allocated++; cam_ccbq_take_opening(&periph->path->device->ccbq); return (new_ccb); } union ccb * cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) { struct ccb_hdr *ccb_h; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); cam_periph_assert(periph, MA_OWNED); while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || ccb_h->pinfo.priority != priority) { if (priority < periph->immediate_priority) { periph->immediate_priority = priority; xpt_run_allocq(periph, 0); } else cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, "cgticb", 0); } SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); return ((union ccb *)ccb_h); } static void xpt_acquire_bus(struct cam_eb *bus) { xpt_lock_buses(); bus->refcount++; xpt_unlock_buses(); } static void xpt_release_bus(struct cam_eb *bus) { xpt_lock_buses(); KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); if (--bus->refcount > 0) { xpt_unlock_buses(); return; } TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); xsoftc.bus_generation++; xpt_unlock_buses(); KASSERT(TAILQ_EMPTY(&bus->et_entries), ("destroying bus, but target list is not empty")); cam_sim_release(bus->sim); mtx_destroy(&bus->eb_mtx); free(bus, M_CAMXPT); } static struct cam_et * xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *cur_target, *target; mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); mtx_assert(&bus->eb_mtx, MA_OWNED); target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, M_NOWAIT|M_ZERO); if (target == NULL) return (NULL); TAILQ_INIT(&target->ed_entries); target->bus = bus; target->target_id = target_id; target->refcount = 1; target->generation = 0; target->luns = NULL; mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); timevalclear(&target->last_reset); /* * Hold a reference to our parent bus so it * will not go away before we do. */ bus->refcount++; /* Insertion sort into our bus's target list */ cur_target = TAILQ_FIRST(&bus->et_entries); while (cur_target != NULL && cur_target->target_id < target_id) cur_target = TAILQ_NEXT(cur_target, links); if (cur_target != NULL) { TAILQ_INSERT_BEFORE(cur_target, target, links); } else { TAILQ_INSERT_TAIL(&bus->et_entries, target, links); } bus->generation++; return (target); } static void xpt_acquire_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); target->refcount++; mtx_unlock(&bus->eb_mtx); } static void xpt_release_target(struct cam_et *target) { struct cam_eb *bus = target->bus; mtx_lock(&bus->eb_mtx); if (--target->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&bus->et_entries, target, links); bus->generation++; mtx_unlock(&bus->eb_mtx); KASSERT(TAILQ_EMPTY(&target->ed_entries), ("destroying target, but device list is not empty")); xpt_release_bus(bus); mtx_destroy(&target->luns_mtx); if (target->luns) free(target->luns, M_CAMXPT); free(target, M_CAMXPT); } static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; device = xpt_alloc_device(bus, target, lun_id); if (device == NULL) return (NULL); device->mintags = 1; device->maxtags = 1; return (device); } static void xpt_destroy_device(void *context, int pending) { struct cam_ed *device = context; mtx_lock(&device->device_mtx); mtx_destroy(&device->device_mtx); free(device, M_CAMDEV); } struct cam_ed * xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) { struct cam_ed *cur_device, *device; struct cam_devq *devq; cam_status status; mtx_assert(&bus->eb_mtx, MA_OWNED); /* Make space for us in the device queue on our bus */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); status = cam_devq_resize(devq, devq->send_queue.array_size + 1); mtx_unlock(&devq->send_mtx); if (status != CAM_REQ_CMP) return (NULL); device = (struct cam_ed *)malloc(sizeof(*device), M_CAMDEV, M_NOWAIT|M_ZERO); if (device == NULL) return (NULL); cam_init_pinfo(&device->devq_entry); device->target = target; device->lun_id = lun_id; device->sim = bus->sim; if (cam_ccbq_init(&device->ccbq, bus->sim->max_dev_openings) != 0) { free(device, M_CAMDEV); return (NULL); } SLIST_INIT(&device->asyncs); SLIST_INIT(&device->periphs); device->generation = 0; device->flags = CAM_DEV_UNCONFIGURED; device->tag_delay_count = 0; device->tag_saved_openings = 0; device->refcount = 1; mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); callout_init_mtx(&device->callout, &devq->send_mtx, 0); TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); /* * Hold a reference to our parent bus so it * will not go away before we do. */ target->refcount++; cur_device = TAILQ_FIRST(&target->ed_entries); while (cur_device != NULL && cur_device->lun_id < lun_id) cur_device = TAILQ_NEXT(cur_device, links); if (cur_device != NULL) TAILQ_INSERT_BEFORE(cur_device, device, links); else TAILQ_INSERT_TAIL(&target->ed_entries, device, links); target->generation++; return (device); } void xpt_acquire_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; mtx_lock(&bus->eb_mtx); device->refcount++; mtx_unlock(&bus->eb_mtx); } void xpt_release_device(struct cam_ed *device) { struct cam_eb *bus = device->target->bus; struct cam_devq *devq; mtx_lock(&bus->eb_mtx); if (--device->refcount > 0) { mtx_unlock(&bus->eb_mtx); return; } TAILQ_REMOVE(&device->target->ed_entries, device,links); device->target->generation++; mtx_unlock(&bus->eb_mtx); /* Release our slot in the devq */ devq = bus->sim->devq; mtx_lock(&devq->send_mtx); cam_devq_resize(devq, devq->send_queue.array_size - 1); mtx_unlock(&devq->send_mtx); KASSERT(SLIST_EMPTY(&device->periphs), ("destroying device, but periphs list is not empty")); KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, ("destroying device while still queued for ccbs")); if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) callout_stop(&device->callout); xpt_release_target(device->target); cam_ccbq_fini(&device->ccbq); /* * Free allocated memory. free(9) does nothing if the * supplied pointer is NULL, so it is safe to call without * checking. */ free(device->supported_vpds, M_CAMXPT); free(device->device_id, M_CAMXPT); free(device->ext_inq, M_CAMXPT); free(device->physpath, M_CAMXPT); free(device->rcap_buf, M_CAMXPT); free(device->serial_num, M_CAMXPT); taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); } u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) { int result; struct cam_ed *dev; dev = path->device; mtx_lock(&dev->sim->devq->send_mtx); result = cam_ccbq_resize(&dev->ccbq, newopenings); mtx_unlock(&dev->sim->devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 || (dev->inq_flags & SID_CmdQue) != 0) dev->tag_saved_openings = newopenings; return (result); } static struct cam_eb * xpt_find_bus(path_id_t path_id) { struct cam_eb *bus; xpt_lock_buses(); for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); bus != NULL; bus = TAILQ_NEXT(bus, links)) { if (bus->path_id == path_id) { bus->refcount++; break; } } xpt_unlock_buses(); return (bus); } static struct cam_et * xpt_find_target(struct cam_eb *bus, target_id_t target_id) { struct cam_et *target; mtx_assert(&bus->eb_mtx, MA_OWNED); for (target = TAILQ_FIRST(&bus->et_entries); target != NULL; target = TAILQ_NEXT(target, links)) { if (target->target_id == target_id) { target->refcount++; break; } } return (target); } static struct cam_ed * xpt_find_device(struct cam_et *target, lun_id_t lun_id) { struct cam_ed *device; mtx_assert(&target->bus->eb_mtx, MA_OWNED); for (device = TAILQ_FIRST(&target->ed_entries); device != NULL; device = TAILQ_NEXT(device, links)) { if (device->lun_id == lun_id) { device->refcount++; break; } } return (device); } void xpt_start_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; int newopenings; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; xpt_freeze_devq(path, /*count*/1); device->inq_flags |= SID_CmdQue; if (device->tag_saved_openings != 0) newopenings = device->tag_saved_openings; else newopenings = min(device->maxtags, sim->max_tagged_dev_openings); xpt_dev_ccbq_resize(path, newopenings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } void xpt_stop_tags(struct cam_path *path) { struct ccb_relsim crs; struct cam_ed *device; struct cam_sim *sim; device = path->device; sim = path->bus->sim; device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; device->tag_delay_count = 0; xpt_freeze_devq(path, /*count*/1); device->inq_flags &= ~SID_CmdQue; xpt_dev_ccbq_resize(path, sim->max_dev_openings); xpt_async(AC_GETDEV_CHANGED, path, NULL); xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; crs.openings = crs.release_timeout = crs.qfrozen_cnt = 0; xpt_action((union ccb *)&crs); } static void xpt_boot_delay(void *arg) { xpt_release_boot(); } static void xpt_config(void *arg) { /* * Now that interrupts are enabled, go find our devices */ if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) printf("xpt_config: failed to create taskqueue thread.\n"); /* Setup debugging path */ if (cam_dflags != CAM_DEBUG_NONE) { if (xpt_create_path(&cam_dpath, NULL, CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN) != CAM_REQ_CMP) { printf("xpt_config: xpt_create_path() failed for debug" " target %d:%d:%d, debugging disabled\n", CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); cam_dflags = CAM_DEBUG_NONE; } } else cam_dpath = NULL; periphdriver_init(1); xpt_hold_boot(); callout_init(&xsoftc.boot_callout, 1); callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, xpt_boot_delay, NULL, 0); /* Fire up rescan thread. */ if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, "cam", "scanner")) { printf("xpt_config: failed to create rescan thread.\n"); } } void xpt_hold_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config++; xpt_unlock_buses(); } void xpt_release_boot(void) { xpt_lock_buses(); xsoftc.buses_to_config--; if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { struct xpt_task *task; xsoftc.buses_config_done = 1; xpt_unlock_buses(); /* Call manually because we don't have any busses */ task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); if (task != NULL) { TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); taskqueue_enqueue(taskqueue_thread, &task->task); } } else xpt_unlock_buses(); } /* * If the given device only has one peripheral attached to it, and if that * peripheral is the passthrough driver, announce it. This insures that the * user sees some sort of announcement for every peripheral in their system. */ static int xptpassannouncefunc(struct cam_ed *device, void *arg) { struct cam_periph *periph; int i; for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; periph = SLIST_NEXT(periph, periph_links), i++); periph = SLIST_FIRST(&device->periphs); if ((i == 1) && (strncmp(periph->periph_name, "pass", 4) == 0)) xpt_announce_periph(periph, NULL); return(1); } static void xpt_finishconfig_task(void *context, int pending) { periphdriver_init(2); /* * Check for devices with no "standard" peripheral driver * attached. For any devices like that, announce the * passthrough driver so the user will see something. */ if (!bootverbose) xpt_for_all_devices(xptpassannouncefunc, NULL); /* Release our hook so that the boot can continue. */ config_intrhook_disestablish(xsoftc.xpt_config_hook); free(xsoftc.xpt_config_hook, M_CAMXPT); xsoftc.xpt_config_hook = NULL; free(context, M_CAMXPT); } cam_status xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, struct cam_path *path) { struct ccb_setasync csa; cam_status status; int xptpath = 0; if (path == NULL) { status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (status != CAM_REQ_CMP) return (status); xpt_path_lock(path); xptpath = 1; } xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = event; csa.callback = cbfunc; csa.callback_arg = cbarg; xpt_action((union ccb *)&csa); status = csa.ccb_h.status; if (xptpath) { xpt_path_unlock(path); xpt_free_path(path); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_FOUND_DEVICE)) { /* * Get this peripheral up to date with all * the currently existing devices. */ xpt_for_all_devices(xptsetasyncfunc, &csa); } if ((status == CAM_REQ_CMP) && (csa.event_enable & AC_PATH_REGISTERED)) { /* * Get this peripheral up to date with all * the currently existing busses. */ xpt_for_all_busses(xptsetasyncbusfunc, &csa); } return (status); } static void xptaction(struct cam_sim *sim, union ccb *work_ccb) { CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); switch (work_ccb->ccb_h.func_code) { /* Common cases first */ case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi; cpi = &work_ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "", HBA_IDLEN); strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); cpi->unit_number = sim->unit_number; cpi->bus_id = sim->bus_id; cpi->base_transfer_speed = 0; cpi->protocol = PROTO_UNSPECIFIED; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->transport = XPORT_UNSPECIFIED; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->ccb_h.status = CAM_REQ_CMP; xpt_done(work_ccb); break; } default: work_ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(work_ccb); break; } } /* * The xpt as a "controller" has no interrupt sources, so polling * is a no-op. */ static void xptpoll(struct cam_sim *sim) { } void xpt_lock_buses(void) { mtx_lock(&xsoftc.xpt_topo_lock); } void xpt_unlock_buses(void) { mtx_unlock(&xsoftc.xpt_topo_lock); } struct mtx * xpt_path_mtx(struct cam_path *path) { return (&path->device->device_mtx); } static void xpt_done_process(struct ccb_hdr *ccb_h) { struct cam_sim *sim; struct cam_devq *devq; struct mtx *mtx = NULL; if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; struct cam_ed *device; mtx_lock(&xsoftc.xpt_highpower_lock); hphead = &xsoftc.highpowerq; device = STAILQ_FIRST(hphead); /* * Increment the count since this command is done. */ xsoftc.num_highpower++; /* * Any high powered commands queued up? */ if (device != NULL) { STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); mtx_unlock(&xsoftc.xpt_highpower_lock); mtx_lock(&device->sim->devq->send_mtx); xpt_release_devq_device(device, /*count*/1, /*runqueue*/TRUE); mtx_unlock(&device->sim->devq->send_mtx); } else mtx_unlock(&xsoftc.xpt_highpower_lock); } sim = ccb_h->path->bus->sim; if (ccb_h->status & CAM_RELEASE_SIMQ) { xpt_release_simq(sim, /*run_queue*/FALSE); ccb_h->status &= ~CAM_RELEASE_SIMQ; } if ((ccb_h->flags & CAM_DEV_QFRZDIS) && (ccb_h->status & CAM_DEV_QFRZN)) { xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); ccb_h->status &= ~CAM_DEV_QFRZN; } devq = sim->devq; if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { struct cam_ed *dev = ccb_h->path->device; mtx_lock(&devq->send_mtx); devq->send_active--; devq->send_openings++; cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 && (dev->ccbq.dev_active == 0))) { dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; xpt_release_devq_device(dev, /*count*/1, /*run_queue*/FALSE); } if (!device_is_queued(dev)) (void)xpt_schedule_devq(devq, dev); xpt_run_devq(devq); mtx_unlock(&devq->send_mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 && (--dev->tag_delay_count == 0)) xpt_start_tags(ccb_h->path); } } if ((ccb_h->flags & CAM_UNLOCKED) == 0) { if (mtx == NULL) { mtx = xpt_path_mtx(ccb_h->path); mtx_lock(mtx); } } else { if (mtx != NULL) { mtx_unlock(mtx); mtx = NULL; } } /* Call the peripheral driver's callback */ ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); if (mtx != NULL) mtx_unlock(mtx); } void xpt_done_td(void *arg) { struct cam_doneq *queue = arg; struct ccb_hdr *ccb_h; STAILQ_HEAD(, ccb_hdr) doneq; STAILQ_INIT(&doneq); mtx_lock(&queue->cam_doneq_mtx); while (1) { while (STAILQ_EMPTY(&queue->cam_doneq)) { queue->cam_doneq_sleep = 1; msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, PRIBIO, "-", 0); queue->cam_doneq_sleep = 0; } STAILQ_CONCAT(&doneq, &queue->cam_doneq); mtx_unlock(&queue->cam_doneq_mtx); THREAD_NO_SLEEPING(); while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); xpt_done_process(ccb_h); } THREAD_SLEEPING_OK(); mtx_lock(&queue->cam_doneq_mtx); } } static void camisr_runqueue(void) { struct ccb_hdr *ccb_h; struct cam_doneq *queue; int i; /* Process global queues. */ for (i = 0; i < cam_num_doneqs; i++) { queue = &cam_doneqs[i]; mtx_lock(&queue->cam_doneq_mtx); while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); mtx_unlock(&queue->cam_doneq_mtx); xpt_done_process(ccb_h); mtx_lock(&queue->cam_doneq_mtx); } mtx_unlock(&queue->cam_doneq_mtx); } } Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 299370) +++ head/sys/cam/scsi/scsi_da.c (revision 299371) @@ -1,4164 +1,4169 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include #ifndef _KERNEL #include #endif /* !_KERNEL */ #ifdef _KERNEL typedef enum { DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, DA_STATE_NORMAL } da_state; typedef enum { DA_FLAG_PACK_INVALID = 0x001, DA_FLAG_NEW_PACK = 0x002, DA_FLAG_PACK_LOCKED = 0x004, DA_FLAG_PACK_REMOVABLE = 0x008, DA_FLAG_NEED_OTAG = 0x020, DA_FLAG_WAS_OTAG = 0x040, DA_FLAG_RETRY_UA = 0x080, DA_FLAG_OPEN = 0x100, DA_FLAG_SCTX_INIT = 0x200, DA_FLAG_CAN_RC16 = 0x400, DA_FLAG_PROBED = 0x800, DA_FLAG_DIRTY = 0x1000, DA_FLAG_ANNOUNCED = 0x2000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, DA_Q_RETRY_BUSY = 0x40 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ "\007RETRY_BUSY" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, DA_CCB_TYPE_MASK = 0x0F, DA_CCB_RETRY_UA = 0x10 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint64_t ws_max_blks; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; static void dadone(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static timeout_t dasendorderedtag; static void dashutdown(void *arg, int howto); static timeout_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } cam_periph_unhold(periph); cam_periph_unlock(periph); if (error != 0) cam_periph_release(periph); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (cam_periph_hold(periph, PRIBIO) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); if (error == 0) softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); cam_periph_unhold(periph); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, dadone, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC && SID_TYPE(&cgd->inq_data) != T_OPTICAL) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, * as they will be handled by daerror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } else if (asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } + case AC_INQ_CHANGED: + softc = (struct da_softc *)periph->softc; + softc->flags &= ~DA_FLAG_PROBED; + dareprobe(periph); + break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[80], tmpstr2[80]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release(periph); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= DA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { cam_periph_release(periph); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else cam_periph_release_locked(periph); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, methods, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = softc->delete_available | (1 << DA_DELETE_DISABLE); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_RC; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive refcount on the periph while dastart is called * to finish the probe. The reference will be dropped in dadone at * the end of probe. */ (void)cam_periph_hold(periph, PRIBIO); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size < 6) softc->minimum_cmd_size = 6; else if ((softc->minimum_cmd_size > 6) && (softc->minimum_cmd_size <= 10)) softc->minimum_cmd_size = 10; else if ((softc->minimum_cmd_size > 10) && (softc->minimum_cmd_size <= 12)) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; softc->state = DA_STATE_PROBE_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; xpt_print(periph->path, "UNMAPPED\n"); } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | - AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION, - daasync, periph, periph->path); + AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | + AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); break; } case BIO_FLUSH: /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. We also force * ordered tag semantics the flush applies * to all previously queued I/O. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_ORDERED_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); break; } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { daprobedone(periph, start_ccb); break; } ata_params = (struct ata_params*) malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO); if (ata_params == NULL) { printf("dastart: Couldn't malloc ata_params data\n"); /* da_free_periph??? */ break; } scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t lastcount = 0, c; uint32_t off, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastcount += c; off = ((ranges - 1) * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; scsi_ulto4b(lastcount, &buf[off + 8]); count -= c; lba +=c; totalcount += c; } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } off = (ranges * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; scsi_u64to8b(lba, &buf[off + 0]); scsi_ulto4b(c, &buf[off + 8]); lba += c; totalcount += c; ranges++; count -= c; lastcount = c; } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: case DA_CCB_DELETE: { struct bio *bp, *bp1; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. */ /* * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; if (csio->resid > 0) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; char announce_buf[80]; int lbp; lbp = 0; rdcap = NULL; rcaplong = NULL; if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC16; xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf[0] = '\0'; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; snprintf(announce_buf, sizeof(announce_buf), "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); } } else { int error; announce_buf[0] = '\0'; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf[0] != '\0' && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, DA_Q_BIT_STRING); } else { xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_LBP; xpt_schedule(periph, priority); return; } xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = scsi_2btoul(bdc->medium_rotation_rate); if (softc->disk->d_rotation_rate == SVPD_BDC_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_ATA; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_ATA: { int i; struct ata_params *ata_params; int16_t *ptr; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; for (i = 0; i < sizeof(*ata_params) / 2; i++) ptr[i] = le16toh(ptr[i]); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ata_params, M_SCSIDA); daprobedone(periph, done_ccb); return; } case DA_CCB_DUMP: /* No-op. We're polling */ return; case DA_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; cam_status status; softc = (struct da_softc *)periph->softc; /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = cam_periph_acquire(periph); KASSERT(status == CAM_REQ_CMP, ("dareprobe: cam_periph_acquire failed")); if (softc->flags & DA_FLAG_CAN_RC16) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/dadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; } else { lbppbe = 0; lalba = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct da_softc *softc = arg; if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */