Index: head/sbin/camcontrol/Makefile =================================================================== --- head/sbin/camcontrol/Makefile (revision 300206) +++ head/sbin/camcontrol/Makefile (revision 300207) @@ -1,18 +1,18 @@ # $FreeBSD$ PACKAGE=runtime PROG= camcontrol SRCS= camcontrol.c util.c .if !defined(RELEASE_CRUNCH) -SRCS+= attrib.c fwdownload.c modeedit.c persist.c progress.c +SRCS+= attrib.c epc.c fwdownload.c modeedit.c persist.c progress.c zone.c .else CFLAGS+= -DMINIMALISTIC .endif # This is verboten .if ${MACHINE_CPUARCH} == "arm" WARNS?= 3 .endif LIBADD= cam sbuf util MAN= camcontrol.8 .include Index: head/sbin/camcontrol/camcontrol.8 =================================================================== --- head/sbin/camcontrol/camcontrol.8 (revision 300206) +++ head/sbin/camcontrol/camcontrol.8 (revision 300207) @@ -1,2391 +1,2785 @@ .\" .\" Copyright (c) 1998, 1999, 2000, 2002, 2005, 2006, 2007 Kenneth D. Merry. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. The name of the author may not be used to endorse or promote products .\" derived from this software without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd April 26, 2016 +.Dd May 16, 2016 .Dt CAMCONTROL 8 .Os .Sh NAME .Nm camcontrol .Nd CAM control program .Sh SYNOPSIS .Nm .Aq Ar command .Op device id .Op generic args .Op command args .Nm .Ic devlist .Op Fl b .Op Fl v .Nm .Ic periphlist .Op device id .Op Fl n Ar dev_name .Op Fl u Ar unit_number .Nm .Ic tur .Op device id .Op generic args .Nm .Ic inquiry .Op device id .Op generic args .Op Fl D .Op Fl S .Op Fl R .Nm .Ic identify .Op device id .Op generic args .Op Fl v .Nm .Ic reportluns .Op device id .Op generic args .Op Fl c .Op Fl l .Op Fl r Ar reporttype .Nm .Ic readcap .Op device id .Op generic args .Op Fl b .Op Fl h .Op Fl H .Op Fl N .Op Fl q .Op Fl s .Nm .Ic start .Op device id .Op generic args .Nm .Ic stop .Op device id .Op generic args .Nm .Ic load .Op device id .Op generic args .Nm .Ic eject .Op device id .Op generic args .Nm .Ic reprobe .Op device id .Nm .Ic rescan .Aq all | bus Ns Op :target:lun .Nm .Ic reset .Aq all | bus Ns Op :target:lun .Nm .Ic defects .Op device id .Op generic args .Aq Fl f Ar format .Op Fl P .Op Fl G .Op Fl q .Op Fl s .Op Fl S Ar offset .Op Fl X .Nm .Ic modepage .Op device id .Op generic args .Aq Fl m Ar page | Fl l .Op Fl P Ar pgctl .Op Fl b | Fl e .Op Fl d .Nm .Ic cmd .Op device id .Op generic args .Aq Fl a Ar cmd Op args .Aq Fl c Ar cmd Op args .Op Fl d .Op Fl f .Op Fl i Ar len Ar fmt .Bk -words .Op Fl o Ar len Ar fmt Op args .Op Fl r Ar fmt .Ek .Nm .Ic smpcmd .Op device id .Op generic args .Aq Fl r Ar len Ar fmt Op args .Aq Fl R Ar len Ar fmt Op args .Nm .Ic smprg .Op device id .Op generic args .Op Fl l .Nm .Ic smppc .Op device id .Op generic args .Aq Fl p Ar phy .Op Fl l .Op Fl o Ar operation .Op Fl d Ar name .Op Fl m Ar rate .Op Fl M Ar rate .Op Fl T Ar pp_timeout .Op Fl a Ar enable|disable .Op Fl A Ar enable|disable .Op Fl s Ar enable|disable .Op Fl S Ar enable|disable .Nm .Ic smpphylist .Op device id .Op generic args .Op Fl l .Op Fl q .Nm .Ic smpmaninfo .Op device id .Op generic args .Op Fl l .Nm .Ic debug .Op Fl I .Op Fl P .Op Fl T .Op Fl S .Op Fl X .Op Fl c .Op Fl p .Aq all|off|bus Ns Op :target Ns Op :lun .Nm .Ic tags .Op device id .Op generic args .Op Fl N Ar tags .Op Fl q .Op Fl v .Nm .Ic negotiate .Op device id .Op generic args .Op Fl c .Op Fl D Ar enable|disable .Op Fl M Ar mode .Op Fl O Ar offset .Op Fl q .Op Fl R Ar syncrate .Op Fl T Ar enable|disable .Op Fl U .Op Fl W Ar bus_width .Op Fl v .Nm .Ic format .Op device id .Op generic args .Op Fl q .Op Fl r .Op Fl w .Op Fl y .Nm .Ic sanitize .Op device id .Op generic args .Aq Fl a Ar overwrite | block | crypto | exitfailure .Op Fl c Ar passes .Op Fl I .Op Fl P Ar pattern .Op Fl q .Op Fl U .Op Fl r .Op Fl w .Op Fl y .Nm .Ic idle .Op device id .Op generic args .Op Fl t Ar time .Nm .Ic standby .Op device id .Op generic args .Op Fl t Ar time .Nm .Ic sleep .Op device id .Op generic args .Nm .Ic apm .Op device id .Op generic args .Op Fl l Ar level .Nm .Ic aam .Op device id .Op generic args .Op Fl l Ar level .Nm .Ic fwdownload .Op device id .Op generic args .Aq Fl f Ar fw_image .Op Fl q .Op Fl s .Op Fl y .Nm .Ic security .Op device id .Op generic args .Op Fl d Ar pwd .Op Fl e Ar pwd .Op Fl f .Op Fl h Ar pwd .Op Fl k Ar pwd .Op Fl l Ar high|maximum .Op Fl q .Op Fl s Ar pwd .Op Fl T Ar timeout .Op Fl U Ar user|master .Op Fl y .Nm .Ic hpa .Op device id .Op generic args .Op Fl f .Op Fl l .Op Fl P .Op Fl p Ar pwd .Op Fl q .Op Fl s Ar max_sectors .Op Fl U Ar pwd .Op Fl y .Nm .Ic persist .Op device id .Op generic args .Aq Fl i Ar action | Fl o Ar action .Op Fl a .Op Fl I Ar trans_id .Op Fl k Ar key .Op Fl K Ar sa_key .Op Fl p .Op Fl R Ar rel_tgt_port .Op Fl s Ar scope .Op Fl S .Op Fl T Ar res_type .Op Fl U .Nm .Ic attrib .Op device id .Op generic args .Aq Fl r Ar action | Fl w Ar attrib .Op Fl a Ar attr_num .Op Fl c .Op Fl e Ar elem_addr .Op Fl F Ar form1,form2 .Op Fl p Ar part .Op Fl s Ar start_addr .Op Fl T Ar elem_type .Op Fl V Ar lv_num .Nm .Ic opcodes .Op device id .Op generic args .Op Fl o Ar opcode .Op Fl s Ar service_action .Op Fl N .Op Fl T .Nm +.Ic zone +.Aq Fl c Ar cmd +.Op Fl a +.Op Fl l Ar lba +.Op Fl o Ar rep_opts +.Op Fl P Ar print_opts +.Nm +.Ic epc +.Aq Fl c Ar cmd +.Op Fl d +.Op Fl D +.Op Fl e +.Op Fl H +.Op Fl p Ar power_cond +.Op Fl P +.Op Fl r Ar restore_src +.Op Fl s +.Op Fl S Ar power_src +.Op Fl T Ar timer +.Nm .Ic help .Sh DESCRIPTION The .Nm utility is designed to provide a way for users to access and control the .Fx CAM subsystem. .Pp The .Nm utility can cause a loss of data and/or system crashes if used improperly. Even expert users are encouraged to exercise caution when using this command. Novice users should stay away from this utility. .Pp The .Nm utility has a number of primary functions, many of which support an optional device identifier. A device identifier can take one of three forms: .Bl -tag -width 14n .It deviceUNIT Specify a device name and unit number combination, like "da5" or "cd3". .It bus:target Specify a bus number and target id. The bus number can be determined from the output of .Dq camcontrol devlist . The lun defaults to 0. .It bus:target:lun Specify the bus, target and lun for a device. (e.g.\& 1:2:0) .El .Pp The device identifier, if it is specified, .Em must come immediately after the function name, and before any generic or function-specific arguments. Note that the .Fl n and .Fl u arguments described below will override any device name or unit number specified beforehand. The .Fl n and .Fl u arguments will .Em not override a specified bus:target or bus:target:lun, however. .Pp Most of the .Nm primary functions support these generic arguments: .Bl -tag -width 14n .It Fl C Ar count SCSI command retry count. In order for this to work, error recovery .Pq Fl E must be turned on. .It Fl E Instruct the kernel to perform generic SCSI error recovery for the given command. This is needed in order for the retry count .Pq Fl C to be honored. Other than retrying commands, the generic error recovery in the code will generally attempt to spin up drives that are not spinning. It may take some other actions, depending upon the sense code returned from the command. .It Fl n Ar dev_name Specify the device type to operate on, e.g.\& "da", "cd". .It Fl t Ar timeout SCSI command timeout in seconds. This overrides the default timeout for any given command. .It Fl u Ar unit_number Specify the device unit number, e.g.\& "1", "5". .It Fl v Be verbose, print out sense information for failed SCSI commands. .El .Pp Primary command functions: .Bl -tag -width periphlist .It Ic devlist List all physical devices (logical units) attached to the CAM subsystem. This also includes a list of peripheral drivers attached to each device. With the .Fl v argument, SCSI bus number, adapter name and unit numbers are printed as well. On the other hand, with the .Fl b argument, only the bus adapter, and unit information will be printed, and device information will be omitted. .It Ic periphlist List all peripheral drivers attached to a given physical device (logical unit). .It Ic tur Send the SCSI test unit ready (0x00) command to the given device. The .Nm utility will report whether the device is ready or not. .It Ic inquiry Send a SCSI inquiry command (0x12) to a device. By default, .Nm will print out the standard inquiry data, device serial number, and transfer rate information. The user can specify that only certain types of inquiry data be printed: .Bl -tag -width 4n .It Fl D Get the standard inquiry data. .It Fl S Print out the serial number. If this flag is the only one specified, .Nm will not print out "Serial Number" before the value returned by the drive. This is to aid in script writing. .It Fl R Print out transfer rate information. .El .It Ic identify Send a ATA identify command (0xec) to a device. .It Ic reportluns Send the SCSI REPORT LUNS (0xA0) command to the given device. By default, .Nm will print out the list of logical units (LUNs) supported by the target device. There are a couple of options to modify the output: .Bl -tag -width 14n .It Fl c Just print out a count of LUNs, not the actual LUN numbers. .It Fl l Just print out the LUNs, and do not print out the count. .It Fl r Ar reporttype Specify the type of report to request from the target: .Bl -tag -width 012345678 .It default Return the default report. This is the .Nm default. Most targets will support this report if they support the REPORT LUNS command. .It wellknown Return only well known LUNs. .It all Return all available LUNs. .El .El .Pp .Nm will try to print out LUN numbers in a reasonable format. It can understand the peripheral, flat, LUN and extended LUN formats. .It Ic readcap Send the SCSI READ CAPACITY command to the given device and display the results. If the device is larger than 2TB, the SCSI READ CAPACITY (16) service action will be sent to obtain the full size of the device. By default, .Nm will print out the last logical block of the device, and the blocksize of the device in bytes. To modify the output format, use the following options: .Bl -tag -width 5n .It Fl b Just print out the blocksize, not the last block or device size. This cannot be used with .Fl N or .Fl s . .It Fl h Print out the device size in human readable (base 2, 1K == 1024) format. This implies .Fl N and cannot be used with .Fl q or .Fl b . .It Fl H Print out the device size in human readable (base 10, 1K == 1000) format. .It Fl N Print out the number of blocks in the device instead of the last logical block. .It Fl q Quiet, print out the numbers only (separated by a comma if .Fl b or .Fl s are not specified). .It Fl s Print out the last logical block or the size of the device only, and omit the blocksize. .El .Pp Note that this command only displays the information, it does not update the kernel data structures. Use the .Nm reprobe subcommand to do that. .It Ic start Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit set. .It Ic stop Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit cleared. .It Ic load Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit set and the load/eject bit set. .It Ic eject Send the SCSI Start/Stop Unit (0x1B) command to the given device with the start bit cleared and the load/eject bit set. .It Ic rescan Tell the kernel to scan all busses in the system (with the .Ar all argument), the given bus (XPT_SCAN_BUS), or bus:target:lun (XPT_SCAN_LUN) for new devices or devices that have gone away. The user may specify a scan of all busses, a single bus, or a lun. Scanning all luns on a target is not supported. .It Ic reprobe Tell the kernel to refresh the information about the device and notify the upper layer, .Xr GEOM 4 . This includes sending the SCSI READ CAPACITY command and updating the disk size visible to the rest of the system. .It Ic reset Tell the kernel to reset all busses in the system (with the .Ar all argument) or the given bus (XPT_RESET_BUS) by issuing a SCSI bus reset for that bus, or to reset the given bus:target:lun (XPT_RESET_DEV), typically by issuing a BUS DEVICE RESET message after connecting to that device. Note that this can have a destructive impact on the system. .It Ic defects Send the .Tn SCSI READ DEFECT DATA (10) command (0x37) or the .Tn SCSI READ DEFECT DATA (12) command (0xB7) to the given device, and print out any combination of: the total number of defects, the primary defect list (PLIST), and the grown defect list (GLIST). .Bl -tag -width 11n .It Fl f Ar format Specify the requested format of the defect list. The format argument is required. Most drives support the physical sector format. Some drives support the logical block format. Many drives, if they do not support the requested format, return the data in an alternate format, along with sense information indicating that the requested data format is not supported. The .Nm utility attempts to detect this, and print out whatever format the drive returns. If the drive uses a non-standard sense code to report that it does not support the requested format, .Nm will probably see the error as a failure to complete the request. .Pp The format options are: .Bl -tag -width 9n .It block Print out the list as logical blocks. This is limited to 32-bit block sizes, and isn't supported by many modern drives. .It longblock Print out the list as logical blocks. This option uses a 64-bit block size. .It bfi Print out the list in bytes from index format. .It extbfi Print out the list in extended bytes from index format. The extended format allows for ranges of blocks to be printed. .It phys Print out the list in physical sector format. Most drives support this format. .It extphys Print out the list in extended physical sector format. The extended format allows for ranges of blocks to be printed. .El .It Fl G Print out the grown defect list. This is a list of bad blocks that have been remapped since the disk left the factory. .It Fl P Print out the primary defect list. This is the list of defects that were present in the factory. .It Fl q When printing status information with .Fl s , only print the number of defects. .It Fl s Just print the number of defects, not the list of defects. .It Fl S Ar offset Specify the starting offset into the defect list. This implies using the .Tn SCSI READ DEFECT DATA (12) command, as the 10 byte version of the command doesn't support the address descriptor index field. Not all drives support the 12 byte command, and some drives that support the 12 byte command don't support the address descriptor index field. .It Fl X Print out defects in hexadecimal (base 16) form instead of base 10 form. .El .Pp If neither .Fl P nor .Fl G is specified, .Nm will print out the number of defects given in the READ DEFECT DATA header returned from the drive. Some drives will report 0 defects if neither the primary or grown defect lists are requested. .It Ic modepage Allows the user to display and optionally edit a SCSI mode page. The mode page formats are located in .Pa /usr/share/misc/scsi_modes . This can be overridden by specifying a different file in the .Ev SCSI_MODES environment variable. The .Ic modepage command takes several arguments: .Bl -tag -width 12n .It Fl d Disable block descriptors for mode sense. .It Fl b Displays mode page data in binary format. .It Fl e This flag allows the user to edit values in the mode page. The user may either edit mode page values with the text editor pointed to by his .Ev EDITOR environment variable, or supply mode page values via standard input, using the same format that .Nm uses to display mode page values. The editor will be invoked if .Nm detects that standard input is terminal. .It Fl l Lists all available mode pages. .It Fl m Ar mode_page This specifies the number of the mode page the user would like to view and/or edit. This argument is mandatory unless .Fl l is specified. .It Fl P Ar pgctl This allows the user to specify the page control field. Possible values are: .Bl -tag -width xxx -compact .It 0 Current values .It 1 Changeable values .It 2 Default values .It 3 Saved values .El .El .It Ic cmd Allows the user to send an arbitrary ATA or SCSI CDB to any device. The .Ic cmd function requires the .Fl c argument to specify SCSI CDB or the .Fl a argument to specify ATA Command Block registers values. Other arguments are optional, depending on the command type. The command and data specification syntax is documented in .Xr cam_cdbparse 3 . NOTE: If the CDB specified causes data to be transferred to or from the SCSI device in question, you MUST specify either .Fl i or .Fl o . .Bl -tag -width 17n .It Fl a Ar cmd Op args This specifies the content of 12 ATA Command Block registers (command, features, lba_low, lba_mid, lba_high, device, lba_low_exp, lba_mid_exp. lba_high_exp, features_exp, sector_count, sector_count_exp). .It Fl c Ar cmd Op args This specifies the SCSI CDB. SCSI CDBs may be 6, 10, 12 or 16 bytes. .It Fl d Specifies DMA protocol to be used for ATA command. .It Fl f Specifies FPDMA (NCQ) protocol to be used for ATA command. .It Fl i Ar len Ar fmt This specifies the amount of data to read, and how it should be displayed. If the format is .Sq - , .Ar len bytes of data will be read from the device and written to standard output. .It Fl o Ar len Ar fmt Op args This specifies the amount of data to be written to a device, and the data that is to be written. If the format is .Sq - , .Ar len bytes of data will be read from standard input and written to the device. .It Fl r Ar fmt This specifies that 11 result ATA Command Block registers should be displayed (status, error, lba_low, lba_mid, lba_high, device, lba_low_exp, lba_mid_exp, lba_high_exp, sector_count, sector_count_exp), and how. If the format is .Sq - , 11 result registers will be written to standard output in hex. .El .It Ic smpcmd Allows the user to send an arbitrary Serial Management Protocol (SMP) command to a device. The .Ic smpcmd function requires the .Fl r argument to specify the SMP request to be sent, and the .Fl R argument to specify the format of the SMP response. The syntax for the SMP request and response arguments is documented in .Xr cam_cdbparse 3 . .Pp Note that SAS adapters that support SMP passthrough (at least the currently known adapters) do not accept CRC bytes from the user in the request and do not pass CRC bytes back to the user in the response. Therefore users should not include the CRC bytes in the length of the request and not expect CRC bytes to be returned in the response. .Bl -tag -width 17n .It Fl r Ar len Ar fmt Op args This specifies the size of the SMP request, without the CRC bytes, and the SMP request format. If the format is .Sq - , .Ar len bytes of data will be read from standard input and written as the SMP request. .It Fl R Ar len Ar fmt Op args This specifies the size of the buffer allocated for the SMP response, and the SMP response format. If the format is .Sq - , .Ar len bytes of data will be allocated for the response and the response will be written to standard output. .El .It Ic smprg Allows the user to send the Serial Management Protocol (SMP) Report General command to a device. .Nm will display the data returned by the Report General command. If the SMP target supports the long response format, the additional data will be requested and displayed automatically. .Bl -tag -width 8n .It Fl l Request the long response format only. Not all SMP targets support the long response format. This option causes .Nm to skip sending the initial report general request without the long bit set and only issue a report general request with the long bit set. .El .It Ic smppc Allows the user to issue the Serial Management Protocol (SMP) PHY Control command to a device. This function should be used with some caution, as it can render devices inaccessible, and could potentially cause data corruption as well. The .Fl p argument is required to specify the PHY to operate on. .Bl -tag -width 17n .It Fl p Ar phy Specify the PHY to operate on. This argument is required. .It Fl l Request the long request/response format. Not all SMP targets support the long response format. For the PHY Control command, this currently only affects whether the request length is set to a value other than 0. .It Fl o Ar operation Specify a PHY control operation. Only one .Fl o operation may be specified. The operation may be specified numerically (in decimal, hexadecimal, or octal) or one of the following operation names may be specified: .Bl -tag -width 16n .It nop No operation. It is not necessary to specify this argument. .It linkreset Send the LINK RESET command to the phy. .It hardreset Send the HARD RESET command to the phy. .It disable Send the DISABLE command to the phy. Note that the LINK RESET or HARD RESET commands should re-enable the phy. .It clearerrlog Send the CLEAR ERROR LOG command. This clears the error log counters for the specified phy. .It clearaffiliation Send the CLEAR AFFILIATION command. This clears the affiliation from the STP initiator port with the same SAS address as the SMP initiator that requests the clear operation. .It sataportsel Send the TRANSMIT SATA PORT SELECTION SIGNAL command to the phy. This will cause a SATA port selector to use the given phy as its active phy and make the other phy inactive. .It clearitnl Send the CLEAR STP I_T NEXUS LOSS command to the PHY. .It setdevname Send the SET ATTACHED DEVICE NAME command to the PHY. This requires the .Fl d argument to specify the device name. .El .It Fl d Ar name Specify the attached device name. This option is needed with the .Fl o Ar setdevname phy operation. The name is a 64-bit number, and can be specified in decimal, hexadecimal or octal format. .It Fl m Ar rate Set the minimum physical link rate for the phy. This is a numeric argument. Currently known link rates are: .Bl -tag -width 5n .It 0x0 Do not change current value. .It 0x8 1.5 Gbps .It 0x9 3 Gbps .It 0xa 6 Gbps .El .Pp Other values may be specified for newer physical link rates. .It Fl M Ar rate Set the maximum physical link rate for the phy. This is a numeric argument. See the .Fl m argument description for known link rate arguments. .It Fl T Ar pp_timeout Set the partial pathway timeout value, in microseconds. See the .Tn ANSI .Tn SAS Protocol Layer (SPL) specification for more information on this field. .It Fl a Ar enable|disable Enable or disable SATA slumber phy power conditions. .It Fl A Ar enable|disable Enable or disable SATA partial power conditions. .It Fl s Ar enable|disable Enable or disable SAS slumber phy power conditions. .It Fl S Ar enable|disable Enable or disable SAS partial phy power conditions. .El .It Ic smpphylist List phys attached to a SAS expander, the address of the end device attached to the phy, and the inquiry data for that device and peripheral devices attached to that device. The inquiry data and peripheral devices are displayed if available. .Bl -tag -width 5n .It Fl l Turn on the long response format for the underlying SMP commands used for this command. .It Fl q Only print out phys that are attached to a device in the CAM EDT (Existing Device Table). .El .It Ic smpmaninfo Send the SMP Report Manufacturer Information command to the device and display the response. .Bl -tag -width 5n .It Fl l Turn on the long response format for the underlying SMP commands used for this command. .El .It Ic debug Turn on CAM debugging printfs in the kernel. This requires options CAMDEBUG in your kernel config file. WARNING: enabling debugging printfs currently causes an EXTREME number of kernel printfs. You may have difficulty turning off the debugging printfs once they start, since the kernel will be busy printing messages and unable to service other requests quickly. The .Ic debug function takes a number of arguments: .Bl -tag -width 18n .It Fl I Enable CAM_DEBUG_INFO printfs. .It Fl P Enable CAM_DEBUG_PERIPH printfs. .It Fl T Enable CAM_DEBUG_TRACE printfs. .It Fl S Enable CAM_DEBUG_SUBTRACE printfs. .It Fl X Enable CAM_DEBUG_XPT printfs. .It Fl c Enable CAM_DEBUG_CDB printfs. This will cause the kernel to print out the SCSI CDBs sent to the specified device(s). .It Fl p Enable CAM_DEBUG_PROBE printfs. .It all Enable debugging for all devices. .It off Turn off debugging for all devices .It bus Ns Op :target Ns Op :lun Turn on debugging for the given bus, target or lun. If the lun or target and lun are not specified, they are wildcarded. (i.e., just specifying a bus turns on debugging printfs for all devices on that bus.) .El .It Ic tags Show or set the number of "tagged openings" or simultaneous transactions we attempt to queue to a particular device. By default, the .Ic tags command, with no command-specific arguments (i.e., only generic arguments) prints out the "soft" maximum number of transactions that can be queued to the device in question. For more detailed information, use the .Fl v argument described below. .Bl -tag -width 7n .It Fl N Ar tags Set the number of tags for the given device. This must be between the minimum and maximum number set in the kernel quirk table. The default for most devices that support tagged queueing is a minimum of 2 and a maximum of 255. The minimum and maximum values for a given device may be determined by using the .Fl v switch. The meaning of the .Fl v switch for this .Nm subcommand is described below. .It Fl q Be quiet, and do not report the number of tags. This is generally used when setting the number of tags. .It Fl v The verbose flag has special functionality for the .Em tags argument. It causes .Nm to print out the tagged queueing related fields of the XPT_GDEV_TYPE CCB: .Bl -tag -width 13n .It dev_openings This is the amount of capacity for transactions queued to a given device. .It dev_active This is the number of transactions currently queued to a device. .It devq_openings This is the kernel queue space for transactions. This count usually mirrors dev_openings except during error recovery operations when the device queue is frozen (device is not allowed to receive commands), the number of dev_openings is reduced, or transaction replay is occurring. .It devq_queued This is the number of transactions waiting in the kernel queue for capacity on the device. This number is usually zero unless error recovery is in progress. .It held The held count is the number of CCBs held by peripheral drivers that have either just been completed or are about to be released to the transport layer for service by a device. Held CCBs reserve capacity on a given device. .It mintags This is the current "hard" minimum number of transactions that can be queued to a device at once. The .Ar dev_openings value above cannot go below this number. The default value for .Ar mintags is 2, although it may be set higher or lower for various devices. .It maxtags This is the "hard" maximum number of transactions that can be queued to a device at one time. The .Ar dev_openings value cannot go above this number. The default value for .Ar maxtags is 255, although it may be set higher or lower for various devices. .El .El .It Ic negotiate Show or negotiate various communication parameters. Some controllers may not support setting or changing some of these values. For instance, the Adaptec 174x controllers do not support changing a device's sync rate or offset. The .Nm utility will not attempt to set the parameter if the controller indicates that it does not support setting the parameter. To find out what the controller supports, use the .Fl v flag. The meaning of the .Fl v flag for the .Ic negotiate command is described below. Also, some controller drivers do not support setting negotiation parameters, even if the underlying controller supports negotiation changes. Some controllers, such as the Advansys wide controllers, support enabling and disabling synchronous negotiation for a device, but do not support setting the synchronous negotiation rate. .Bl -tag -width 17n .It Fl a Attempt to make the negotiation settings take effect immediately by sending a Test Unit Ready command to the device. .It Fl c Show or set current negotiation settings. This is the default. .It Fl D Ar enable|disable Enable or disable disconnection. .It Fl M Ar mode Set ATA mode. .It Fl O Ar offset Set the command delay offset. .It Fl q Be quiet, do not print anything. This is generally useful when you want to set a parameter, but do not want any status information. .It Fl R Ar syncrate Change the synchronization rate for a device. The sync rate is a floating point value specified in MHz. So, for instance, .Sq 20.000 is a legal value, as is .Sq 20 . .It Fl T Ar enable|disable Enable or disable tagged queueing for a device. .It Fl U Show or set user negotiation settings. The default is to show or set current negotiation settings. .It Fl v The verbose switch has special meaning for the .Ic negotiate subcommand. It causes .Nm to print out the contents of a Path Inquiry (XPT_PATH_INQ) CCB sent to the controller driver. .It Fl W Ar bus_width Specify the bus width to negotiate with a device. The bus width is specified in bits. The only useful values to specify are 8, 16, and 32 bits. The controller must support the bus width in question in order for the setting to take effect. .El .Pp In general, sync rate and offset settings will not take effect for a device until a command has been sent to the device. The .Fl a switch above will automatically send a Test Unit Ready to the device so negotiation parameters will take effect. .It Ic format Issue the .Tn SCSI FORMAT UNIT command to the named device. .Pp .Em WARNING! WARNING! WARNING! .Pp Low level formatting a disk will destroy ALL data on the disk. Use extreme caution when issuing this command. Many users low-level format disks that do not really need to be low-level formatted. There are relatively few scenarios that call for low-level formatting a disk. One reason for low-level formatting a disk is to initialize the disk after changing its physical sector size. Another reason for low-level formatting a disk is to revive the disk if you are getting "medium format corrupted" errors from the disk in response to read and write requests. .Pp Some disks take longer than others to format. Users should specify a timeout long enough to allow the format to complete. The default format timeout is 3 hours, which should be long enough for most disks. Some hard disks will complete a format operation in a very short period of time (on the order of 5 minutes or less). This is often because the drive does not really support the FORMAT UNIT command -- it just accepts the command, waits a few minutes and then returns it. .Pp The .Sq format subcommand takes several arguments that modify its default behavior. The .Fl q and .Fl y arguments can be useful for scripts. .Bl -tag -width 6n .It Fl q Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl r Run in .Dq report only mode. This will report status on a format that is already running on the drive. .It Fl w Issue a non-immediate format command. By default, .Nm issues the FORMAT UNIT command with the immediate bit set. This tells the device to immediately return the format command, before the format has actually completed. Then, .Nm gathers .Tn SCSI sense information from the device every second to determine how far along in the format process it is. If the .Fl w argument is specified, .Nm will issue a non-immediate format command, and will be unable to print any information to let the user know what percentage of the disk has been formatted. .It Fl y Do not ask any questions. By default, .Nm will ask the user if he/she really wants to format the disk in question, and also if the default format command timeout is acceptable. The user will not be asked about the timeout if a timeout is specified on the command line. .El .It Ic sanitize Issue the .Tn SCSI SANITIZE command to the named device. .Pp .Em WARNING! WARNING! WARNING! .Pp ALL data in the cache and on the disk will be destroyed or made inaccessible. Recovery of the data is not possible. Use extreme caution when issuing this command. .Pp The .Sq sanitize subcommand takes several arguments that modify its default behavior. The .Fl q and .Fl y arguments can be useful for scripts. .Bl -tag -width 6n .It Fl a Ar operation Specify the sanitize operation to perform. .Bl -tag -width 16n .It overwrite Perform an overwrite operation by writing a user supplied data pattern to the device one or more times. The pattern is given by the .Fl P argument. The number of times is given by the .Fl c argument. .It block Perform a block erase operation. All the device's blocks are set to a vendor defined value, typically zero. .It crypto Perform a cryptographic erase operation. The encryption keys are changed to prevent the decryption of the data. .It exitfailure Exits a previously failed sanitize operation. A failed sanitize operation can only be exited if it was run in the unrestricted completion mode, as provided by the .Fl U argument. .El .It Fl c Ar passes The number of passes when performing an .Sq overwrite operation. Valid values are between 1 and 31. The default is 1. .It Fl I When performing an .Sq overwrite operation, the pattern is inverted between consecutive passes. .It Fl P Ar pattern Path to the file containing the pattern to use when performing an .Sq overwrite operation. The pattern is repeated as needed to fill each block. .It Fl q Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl U Perform the sanitize in the unrestricted completion mode. If the operation fails, it can later be exited with the .Sq exitfailure operation. .It Fl r Run in .Dq report only mode. This will report status on a sanitize that is already running on the drive. .It Fl w Issue a non-immediate sanitize command. By default, .Nm issues the SANITIZE command with the immediate bit set. This tells the device to immediately return the sanitize command, before the sanitize has actually completed. Then, .Nm gathers .Tn SCSI sense information from the device every second to determine how far along in the sanitize process it is. If the .Fl w argument is specified, .Nm will issue a non-immediate sanitize command, and will be unable to print any information to let the user know what percentage of the disk has been sanitized. .It Fl y Do not ask any questions. By default, .Nm will ask the user if he/she really wants to sanitize the disk in question, and also if the default sanitize command timeout is acceptable. The user will not be asked about the timeout if a timeout is specified on the command line. .El .It Ic idle Put ATA device into IDLE state. Optional parameter .Pq Fl t specifies automatic standby timer value in seconds. Value 0 disables timer. .It Ic standby Put ATA device into STANDBY state. Optional parameter .Pq Fl t specifies automatic standby timer value in seconds. Value 0 disables timer. .It Ic sleep Put ATA device into SLEEP state. Note that the only way get device out of this state may be reset. .It Ic apm It optional parameter .Pq Fl l specified, enables and sets advanced power management level, where 1 -- minimum power, 127 -- maximum performance with standby, 128 -- minimum power without standby, 254 -- maximum performance. If not specified -- APM is disabled. .It Ic aam It optional parameter .Pq Fl l specified, enables and sets automatic acoustic management level, where 1 -- minimum noise, 254 -- maximum performance. If not specified -- AAM is disabled. .It Ic security Update or report security settings, using an ATA identify command (0xec). By default, .Nm will print out the security support and associated settings of the device. The .Ic security command takes several arguments: .Bl -tag -width 0n .It Fl d Ar pwd .Pp Disable device security using the given password for the selected user according to the devices configured security level. .It Fl e Ar pwd .Pp Erase the device using the given password for the selected user. .Pp .Em WARNING! WARNING! WARNING! .Pp Issuing a secure erase will .Em ERASE ALL user data on the device and may take several hours to complete. .Pp When this command is used against an SSD drive all its cells will be marked as empty, restoring it to factory default write performance. For SSD's this action usually takes just a few seconds. .It Fl f .Pp Freeze the security configuration of the specified device. .Pp After command completion any other commands that update the device lock mode shall be command aborted. Frozen mode is disabled by power-off or hardware reset. .It Fl h Ar pwd .Pp Enhanced erase the device using the given password for the selected user. .Pp .Em WARNING! WARNING! WARNING! .Pp Issuing an enhanced secure erase will .Em ERASE ALL user data on the device and may take several hours to complete. .Pp An enhanced erase writes predetermined data patterns to all user data areas, all previously written user data shall be overwritten, including sectors that are no longer in use due to reallocation. .It Fl k Ar pwd .Pp Unlock the device using the given password for the selected user according to the devices configured security level. .It Fl l Ar high|maximum .Pp Specifies which security level to set when issuing a .Fl s Ar pwd command. The security level determines device behavior when the master password is used to unlock the device. When the security level is set to high the device requires the unlock command and the master password to unlock. When the security level is set to maximum the device requires a secure erase with the master password to unlock. .Pp This option must be used in conjunction with one of the security action commands. .Pp Defaults to .Em high .It Fl q .Pp Be quiet, do not print any status messages. This option will not disable the questions, however. To disable questions, use the .Fl y argument, below. .It Fl s Ar pwd .Pp Password the device (enable security) using the given password for the selected user. This option can be combined with other options such as .Fl e Em pwd .Pp A master password may be set in a addition to the user password. The purpose of the master password is to allow an administrator to establish a password that is kept secret from the user, and which may be used to unlock the device if the user password is lost. .Pp .Em Note: Setting the master password does not enable device security. .Pp If the master password is set and the drive supports a Master Revision Code feature the Master Password Revision Code will be decremented. .It Fl T Ar timeout .Pp Overrides the default timeout, specified in seconds, used for both .Fl e and .Fl h this is useful if your system has problems processing long timeouts correctly. .Pp Usually the timeout is calculated from the information stored on the drive if present, otherwise it defaults to 2 hours. .It Fl U Ar user|master .Pp Specifies which user to set / use for the running action command, valid values are user or master and defaults to master if not set. .Pp This option must be used in conjunction with one of the security action commands. .Pp Defaults to .Em master .It Fl y .Pp Confirm yes to dangerous options such as .Fl e without prompting for confirmation. .El .Pp If the password specified for any action commands does not match the configured password for the specified user the command will fail. .Pp The password in all cases is limited to 32 characters, longer passwords will fail. .It Ic hpa Update or report Host Protected Area details. By default .Nm will print out the HPA support and associated settings of the device. The .Ic hpa command takes several optional arguments: .Bl -tag -width 0n .It Fl f .Pp Freeze the HPA configuration of the specified device. .Pp After command completion any other commands that update the HPA configuration shall be command aborted. Frozen mode is disabled by power-off or hardware reset. .It Fl l .Pp Lock the HPA configuration of the device until a successful call to unlock or the next power-on reset occurs. .It Fl P .Pp Make the HPA max sectors persist across power-on reset or a hardware reset. This must be used in combination with .Fl s Ar max_sectors . .It Fl p Ar pwd .Pp Set the HPA configuration password required for unlock calls. .It Fl q .Pp Be quiet, do not print any status messages. This option will not disable the questions. To disable questions, use the .Fl y argument, below. .It Fl s Ar max_sectors .Pp Configures the maximum user accessible sectors of the device. This will change the number of sectors the device reports. .Pp .Em WARNING! WARNING! WARNING! .Pp Changing the max sectors of a device using this option will make the data on the device beyond the specified value inaccessible. .Pp Only one successful .Fl s Ar max_sectors call can be made without a power-on reset or a hardware reset of the device. .It Fl U Ar pwd .Pp Unlock the HPA configuration of the specified device using the given password. If the password specified does not match the password configured via .Fl p Ar pwd the command will fail. .Pp After 5 failed unlock calls, due to password miss-match, the device will refuse additional unlock calls until after a power-on reset. .It Fl y .Pp Confirm yes to dangerous options such as .Fl e without prompting for confirmation .El .Pp The password for all HPA commands is limited to 32 characters, longer passwords will fail. .It Ic fwdownload Program firmware of the named .Tn SCSI or ATA device using the image file provided. .Pp If the device is a .Tn SCSI device and it provides a recommended timeout for the WRITE BUFFER command (see the .Nm opcodes subcommand), that timeout will be used for the firmware download. The drive-recommended timeout value may be overridden on the command line with the .Fl t option. .Pp Current list of supported vendors for SCSI/SAS drives: .Bl -tag -width 10n .It HGST Tested with 4TB SAS drives, model number HUS724040ALS640. .It HITACHI .It HP .It IBM Tested with LTO-5 (ULTRIUM-HH5) and LTO-6 (ULTRIUM-HH6) tape drives. There is a separate table entry for hard drives, because the update method for hard drives is different than the method for tape drives. .It PLEXTOR .It QUALSTAR .It QUANTUM .It SAMSUNG Tested with SM1625 SSDs. .It SEAGATE Tested with Constellation ES (ST32000444SS), ES.2 (ST33000651SS) and ES.3 (ST1000NM0023) drives. .It SmrtStor Tested with 400GB Optimus SSDs (TXA2D20400GA6001). .El .Pp .Em WARNING! WARNING! WARNING! .Pp Little testing has been done to make sure that different device models from each vendor work correctly with the fwdownload command. A vendor name appearing in the supported list means only that firmware of at least one device type from that vendor has successfully been programmed with the fwdownload command. Extra caution should be taken when using this command since there is no guarantee it will not break a device from the listed vendors. Ensure that you have a recent backup of the data on the device before performing a firmware update. .Pp Note that unknown .Tn SCSI protocol devices will not be programmed, since there is little chance of the firmware download succeeding. .Pp .Nm will currently attempt a firmware download to any .Tn ATA or .Tn SATA device, since the standard .Tn ATA DOWNLOAD MICROCODE command may work. Firmware downloads to .Tn ATA and .Tn SATA devices are supported for devices connected to standard .Tn ATA and .Tn SATA controllers, and devices connected to SAS controllers with .Tn SCSI to .Tn ATA translation capability. In the latter case, .Nm uses the .Tn SCSI .Tn ATA PASS-THROUGH command to send the .Tn ATA DOWNLOAD MICROCODE command to the drive. Some .Tn SCSI to .Tn ATA translation implementations don't work fully when translating .Tn SCSI WRITE BUFFER commands to .Tn ATA DOWNLOAD MICROCODE commands, but do support .Tn ATA passthrough well enough to do a firmware download. .Bl -tag -width 11n .It Fl f Ar fw_image Path to the firmware image file to be downloaded to the specified device. .It Fl q Do not print informational messages, only print errors. This option should be used with the .Fl y option to suppress all output. .It Fl s Run in simulation mode. Device checks are run and the confirmation dialog is shown, but no firmware download will occur. .It Fl v Show .Tn SCSI or .Tn ATA errors in the event of a failure. .Pp In simulation mode, print out the .Tn SCSI CDB or .Tn ATA register values that would be used for the firmware download command. .It Fl y Do not ask for confirmation. .El .It Ic persist Persistent reservation support. Persistent reservations are a way to reserve a particular .Tn SCSI LUN for use by one or more .Tn SCSI initiators. If the .Fl i option is specified, .Nm will issue the .Tn SCSI PERSISTENT RESERVE IN command using the requested service action. If the .Fl o option is specified, .Nm will issue the .Tn SCSI PERSISTENT RESERVE OUT command using the requested service action. One of those two options is required. .Pp Persistent reservations are complex, and fully explaining them is outside the scope of this manual. Please visit http://www.t10.org and download the latest SPC spec for a full explanation of persistent reservations. .Bl -tag -width 8n .It Fl i Ar mode Specify the service action for the PERSISTENT RESERVE IN command. Supported service actions: .Bl -tag -width 19n .It read_keys Report the current persistent reservation generation (PRgeneration) and any registered keys. .It read_reservation Report the persistent reservation, if any. .It report_capabilities Report the persistent reservation capabilities of the LUN. .It read_full_status Report the full status of persistent reservations on the LUN. .El .It Fl o Ar mode Specify the service action for the PERSISTENT RESERVE OUT command. For service actions like register that are components of other service action names, the entire name must be specified. Otherwise, enough of the service action name must be specified to distinguish it from other possible service actions. Supported service actions: .Bl -tag -width 15n .It register Register a reservation key with the LUN or unregister a reservation key. To register a key, specify the requested key as the Service Action Reservation Key. To unregister a key, specify the previously registered key as the Reservation Key. To change a key, specify the old key as the Reservation Key and the new key as the Service Action Reservation Key. .It register_ignore This is similar to the register subcommand, except that the Reservation Key is ignored. The Service Action Reservation Key will overwrite any previous key registered for the initiator. .It reserve Create a reservation. A key must be registered with the LUN before the LUN can be reserved, and it must be specified as the Reservation Key. The type of reservation must also be specified. The scope defaults to LUN scope (LU_SCOPE), but may be changed. .It release Release a reservation. The Reservation Key must be specified. .It clear Release a reservation and remove all keys from the device. The Reservation Key must be specified. .It preempt Remove a reservation belonging to another initiator. The Reservation Key must be specified. The Service Action Reservation Key may be specified, depending on the operation being performed. .It preempt_abort Remove a reservation belonging to another initiator and abort all outstanding commands from that initiator. The Reservation Key must be specified. The Service Action Reservation Key may be specified, depending on the operation being performed. .It register_move Register another initiator with the LUN, and establish a reservation on the LUN for that initiator. The Reservation Key and Service Action Reservation Key must be specified. .It replace_lost Replace Lost Reservation information. .El .It Fl a Set the All Target Ports (ALL_TG_PT) bit. This requests that the key registration be applied to all target ports and not just the particular target port that receives the command. This only applies to the register and register_ignore actions. .It Fl I Ar tid Specify a Transport ID. This only applies to the Register and Register and Move service actions for Persistent Reserve Out. Multiple Transport IDs may be specified with multiple .Fl I arguments. With the Register service action, specifying one or more Transport IDs implicitly enables the .Fl S option which turns on the SPEC_I_PT bit. Transport IDs generally have the format protocol,id. .Bl -tag -width 5n .It SAS A SAS Transport ID consists of .Dq sas, followed by a 64-bit SAS address. For example: .Pp .Dl sas,0x1234567812345678 .It FC A Fibre Channel Transport ID consists of .Dq fcp, followed by a 64-bit Fibre Channel World Wide Name. For example: .Pp .Dl fcp,0x1234567812345678 .It SPI A Parallel SCSI address consists of .Dq spi, followed by a SCSI target ID and a relative target port identifier. For example: .Pp .Dl spi,4,1 .It 1394 An IEEE 1394 (Firewire) Transport ID consists of .Dq sbp, followed by a 64-bit EUI-64 IEEE 1394 node unique identifier. For example: .Pp .Dl sbp,0x1234567812345678 .It RDMA A SCSI over RDMA Transport ID consists of .Dq srp, followed by a 128-bit RDMA initiator port identifier. The port identifier must be exactly 32 or 34 (if the leading 0x is included) hexadecimal digits. Only hexadecimal (base 16) numbers are supported. For example: .Pp .Dl srp,0x12345678123456781234567812345678 .It iSCSI An iSCSI Transport ID consists an iSCSI name and optionally a separator and iSCSI session ID. For example, if only the iSCSI name is specified: .Pp .Dl iqn.2012-06.com.example:target0 .Pp If the iSCSI separator and initiator session ID are specified: .Pp .Dl iqn.2012-06.com.example:target0,i,0x123 .It PCIe A SCSI over PCIe Transport ID consists of .Dq sop, followed by a PCIe Routing ID. The Routing ID consists of a bus, device and function or in the alternate form, a bus and function. The bus must be in the range of 0 to 255 inclusive and the device must be in the range of 0 to 31 inclusive. The function must be in the range of 0 to 7 inclusive if the standard form is used, and in the range of 0 to 255 inclusive if the alternate form is used. For example, if a bus, device and function are specified for the standard Routing ID form: .Pp .Dl sop,4,5,1 .Pp If the alternate Routing ID form is used: .Pp .Dl sop,4,1 .El .It Fl k Ar key Specify the Reservation Key. This may be in decimal, octal or hexadecimal format. The value is zero by default if not otherwise specified. The value must be between 0 and 2^64 - 1, inclusive. .It Fl K Ar key Specify the Service Action Reservation Key. This may be in decimal, octal or hexadecimal format. The value is zero by default if not otherwise specified. The value must be between 0 and 2^64 - 1, inclusive. .It Fl p Enable the Activate Persist Through Power Loss bit. This is only used for the register and register_ignore actions. This requests that the reservation persist across power loss events. .It Fl s Ar scope Specify the scope of the reservation. The scope may be specified by name or by number. The scope is ignored for register, register_ignore and clear. If the desired scope isn't available by name, you may specify the number. .Bl -tag -width 7n .It lun LUN scope (0x00). This encompasses the entire LUN. .It extent Extent scope (0x01). .It element Element scope (0x02). .El .It Fl R Ar rtp Specify the Relative Target Port. This only applies to the Register and Move service action of the Persistent Reserve Out command. .It Fl S Enable the SPEC_I_PT bit. This only applies to the Register service action of Persistent Reserve Out. You must also specify at least one Transport ID with .Fl I if this option is set. If you specify a Transport ID, this option is automatically set. It is an error to specify this option for any service action other than Register. .It Fl T Ar type Specify the reservation type. The reservation type may be specified by name or by number. If the desired reservation type isn't available by name, you may specify the number. Supported reservation type names: .Bl -tag -width 11n .It read_shared Read Shared mode. .It wr_ex Write Exclusive mode. May also be specified as .Dq write_exclusive . .It rd_ex Read Exclusive mode. May also be specified as .Dq read_exclusive . .It ex_ac Exclusive access mode. May also be specified as .Dq exclusive_access . .It wr_ex_ro Write Exclusive Registrants Only mode. May also be specified as .Dq write_exclusive_reg_only . .It ex_ac_ro Exclusive Access Registrants Only mode. May also be specified as .Dq exclusive_access_reg_only . .It wr_ex_ar Write Exclusive All Registrants mode. May also be specified as .Dq write_exclusive_all_regs . .It ex_ac_ar Exclusive Access All Registrants mode. May also be specified as .Dq exclusive_access_all_regs . .El .It Fl U Specify that the target should unregister the initiator that sent the Register and Move request. By default, the target will not unregister the initiator that sends the Register and Move request. This option only applies to the Register and Move service action of the Persistent Reserve Out command. .El .It Ic attrib Issue the .Tn SCSI READ or WRITE ATTRIBUTE commands. These commands are used to read and write attributes in Medium Auxiliary Memory (MAM). The most common place Medium Auxiliary Memory is found is small flash chips included tape cartriges. For instance, .Tn LTO tapes have MAM. Either the .Fl r option or the .Fl w option must be specified. .Bl -tag -width 14n .It Fl r Ar action Specify the READ ATTRIBUTE service action. .Bl -tag -width 11n .It attr_values Issue the ATTRIBUTE VALUES service action. Read and decode the available attributes and their values. .It attr_list Issue the ATTRIBUTE LIST service action. List the attributes that are available to read and write. .It lv_list Issue the LOGICAL VOLUME LIST service action. List the available logical volumes in the MAM. .It part_list Issue the PARTITION LIST service action. List the available partitions in the MAM. .It supp_attr Issue the SUPPORTED ATTRIBUTES service action. List attributes that are supported for reading or writing. These attributes may or may not be currently present in the MAM. .El .It Fl w Ar attr Specify an attribute to write to the MAM. This option is not yet implemented. .It Fl a Ar num Specify the attribute number to display. This option only works with the attr_values, attr_list and supp_attr arguments to .Fl r . .It Fl c Display cached attributes. If the device supports this flag, it allows displaying attributes for the last piece of media loaded in the drive. .It Fl e Ar num Specify the element address. This is used for specifying which element number in a medium changer to access when reading attributes. The element number could be for a picker, portal, slot or drive. .It Fl F Ar form1,form2 Specify the output format for the attribute values (attr_val) display as a comma separated list of options. The default output is currently set to field_all,nonascii_trim,text_raw. Once this code is ported to FreeBSD 10, any text fields will be converted from their codeset to the user's native codeset with .Xr iconv 3 . .Pp The text options are mutually exclusive; if you specify more than one, you will get unpredictable results. The nonascii options are also mutually exclusive. Most of the field options may be logically ORed together. .Bl -tag -width 12n .It text_esc Print text fields with non-ASCII characters escaped. .It text_raw Print text fields natively, with no codeset conversion. .It nonascii_esc If any non-ASCII characters occur in fields that are supposed to be ASCII, escape the non-ASCII characters. .It nonascii_trim If any non-ASCII characters occur in fields that are supposed to be ASCII, omit the non-ASCII characters. .It nonascii_raw If any non-ASCII characters occur in fields that are supposed to be ASCII, print them as they are. .It field_all Print all of the prefix fields: description, attribute number, attribute size, and the attribute's readonly status. If field_all is specified, specifying any other field options will not have an effect. .It field_none Print none of the prefix fields, and only print out the attribute value. If field_none is specified, specifying any other field options will result in those fields being printed. .It field_desc Print out the attribute description. .It field_num Print out the attribute number. .It field_size Print out the attribute size. .It field_rw Print out the attribute's readonly status. .El .It Fl p Ar part Specify the partition. When the media has multiple partitions, specifying different partition numbers allows seeing the values for each individual partition. .It Fl s Ar start_num Specify the starting attribute number. This requests that the target device return attribute information starting at the given number. .It Fl T Ar elem_type Specify the element type. For medium changer devices, this allows specifying the type the element referenced in the element address ( .Fl e ) . Valid types are: .Dq all , .Dq picker , .Dq slot , .Dq portal , and .Dq drive . -.El .It Fl V Ar vol_num Specify the number of the logical volume to operate on. If the media has multiple logical volumes, this will allow displaying or writing attributes on the given logical volume. +.El .It Ic opcodes Issue the REPORT SUPPORTED OPCODES service action of the .Tn SCSI MAINTENANCE IN command. Without arguments, this command will return a list of all .Tn SCSI commands supported by the device, including service actions of commands that support service actions. It will also include the .Tn SCSI CDB (Command Data Block) length for each command, and the description of each command if it is known. .Bl -tag -width 18n .It Fl o Ar opcode Request information on a specific opcode instead of the list of supported commands. If supported, the target will return a CDB-like structure that indicates the opcode, service action (if any), and a mask of bits that are supported in that CDB. .It Fl s Ar service_action For commands that support a service action, specify the service action to query. .It Fl N If a service action is specified for a given opcode, and the device does not support the given service action, the device should not return a .Tn SCSI error, but rather indicate in the returned parameter data that the command is not supported. By default, if a service action is specified for an opcode, and service actions are not supported for the opcode in question, the device will return an error. .It Fl T Include timeout values. This option works with the default display, which includes all commands supported by the device, and with the .Fl o and .Fl s options, which request information on a specific command and service action. This requests that the device report Nominal and Recommended timeout values for the given command or commands. The timeout values are in seconds. The timeout descriptor also includes a command-specific .El +.It Ic zone +Manage +.Tn SCSI +and +.Tn ATA +Zoned Block devices. +This allows managing devices that conform to the +.Tn SCSI +Zoned Block Commands (ZBC) and +.Tn ATA +Zoned ATA Command Set (ZAC) +specifications. +Devices using these command sets are usually hard drives using Shingled +Magnetic Recording (SMR). +There are three types of SMR drives: +.Bl -tag -width 13n +.It Drive Managed +Drive Managed drives look and act just like a standard random access block +device, but underneath, the drive reads and writes the bulk of its capacity +using SMR zones. +Sequential writes will yield better performance, but writing sequentially +is not required. +.It Host Aware +Host Aware drives expose the underlying zone layout via +.Tn SCSI +or +.Tn ATA +commands and allow the host to manage the zone conditions. +The host is not required to manage the zones on the drive, though. +Sequential writes will yield better performance in Sequential Write +Preferred zones, but the host can write randomly in those zones. +.It Host Managed +Host Managed drives expose the underlying zone layout via +.Tn SCSI +or +.Tn ATA +commands. +The host is required to access the zones according to the rules described +by the zone layout. +Any commands that violate the rules will be returned with an error. +.El +.Pp +SMR drives are divided into zones (typically in the range of 256MB each) +that fall into three general categories: +.Bl -tag -width 20n +.It Conventional +These are also known as Non Write Pointer zones. +These zones can be randomly written without an unexpected performance penalty. +.It Sequential Preferred +These zones should be written sequentially starting at the write pointer +for the zone. +They may be written randomly. +Writes that do not conform to the zone layout may be significantly slower +than expected. +.It Sequential Required +These zones must be written sequentially. +If they are not written sequentially, starting at the write pointer, the +command will fail. +.El +.Pp +.Bl -tag -width 12n +.It Fl c Ar cmd +Specify the zone subcommand: +.Bl -tag -width 6n +.It rz +Issue the Report Zones command. +All zones are returned by default. +Specify report options with +.Fl o +and printing options with +.Fl P . +Specify the starting LBA with +.Fl l . +Note that +.Dq reportzones +is also accepted as a command argument. +.It open +Explicitly open the zone specified by the starting LBA. +.It close +Close the zone specified by starting LBA. +.It finish +Finish the zone specified by the starting LBA. +.It rwp +Reset the write pointer for the zone specified by the starting LBA. +.El +.It Fl a +For the Open, Close, Finish, and Reset Write Pointer operations, apply the +operation to all zones on the drive. +.It Fl l Ar lba +Specify the starting LBA. +For the Report Zones command, this tells the drive to report starting with +the zone that starts at the given LBA. +For the other commands, this allows the user to identify the zone requested +by its starting LBA. +The LBA may be specified in decimal, hexadecimal or octal notation. +.It Fl o Ar rep_opt +For the Report Zones command, specify a subset of zones to report. +.Bl -tag -width 8n +.It all +Report all zones. +This is the default. +.It emtpy +Report only empty zones. +.It imp_open +Report zones that are implicitly open. +This means that the host has sent a write to the zone without explicitly +opening the zone. +.It exp_open +Report zones that are explicitly open. +.It closed +Report zones that have been closed by the host. +.It full +Report zones that are full. +.It ro +Report zones that are in the read only state. +Note that +.Dq readonly +is also accepted as an argument. +.It offline +Report zones that are in the offline state. +.It reset +Report zones where the device recommends resetting write pointers. +.It nonseq +Report zones that have the Non Sequential Resources Active flag set. +These are zones that are Sequential Write Preferred, but have been written +non-sequentially. +.It nonwp +Report Non Write Pointer zones, also known as Conventional zones. +.El +.It Fl P Ar print_opt +Specify a printing option for Report Zones: +.Bl -tag -width 7n +.It normal +Normal Report Zones output. +This is the default. +The summary and column headings are printed, fields are separated by spaces +and the fields themselves may contain spaces. +.It summary +Just print the summary: the number of zones, the maximum LBA (LBA of the +last logical block on the drive), and the value of the +.Dq same +field. +The +.Dq same +field describes whether the zones on the drive are all identical, all +different, or whether they are the same except for the last zone, etc. +.It script +Print the zones in a script friendly format. +The summary and column headings are omitted, the fields are separated by +commas, and the fields do not contain spaces. +The fields contain underscores where spaces would normally be used. +.El +.El +.It Ic epc +Issue +.Tn ATA +Extended Power Conditions (EPC) feature set commands. +This only works on +.Tn ATA +protocol drives, and will not work on +.Tn SCSI +protocol drives. +It will work on +.Tn SATA +drives behind a +.Tn SCSI +to +.Tn ATA +translation layer (SAT). +It may be helpful to read the ATA Command Set - 4 (ACS-4) description of +the Extended Power Conditions feature set, available at t13.org, to +understand the details of this particular +.Nm +subcommand. +.Bl -tag -width 6n +.It Fl c Ar cmd +Specify the epc subcommand +.Bl -tag -width 7n +.It restore +Restore drive power condition settings. +.Bl -tag -width 6n +.It Fl r Ar src +Specify the source for the restored power settings, either +.Dq default +or +.Dq saved . +This argument is required. +.It Fl s +Save the settings. +This only makes sense to specify when restoring from defaults. +.El +.It goto +Go to the specified power condition. +.Bl -tag -width 7n +.It Fl p Ar cond +Specify the power condition: Idle_a, Idle_b, Idle_c, Standby_y, Standby_z. +This argument is required. +.It Fl D +Specify delayed entry to the power condition. +The drive, if it supports this, can enter the power condition after the +command completes. +.It Fl H +Hold the power condition. +If the drive supports this option, it will hold the power condition and +reject all commands that would normally cause it to exit that power +condition. +.El +.It timer +Set the timer value for a power condition and enable or disable the +condition. +See the +.Dq list +display described below to see what the current timer settings are for each +Idle and Standby mode supported by the drive. +.Bl -tag -width 8n +.It Fl e +Enable the power condition. +One of +.Fl e +or +.Fl d +is required. +.It Fl d +Disable the power condition. +One of +.Fl d +or +.Fl e +is required. +.It Fl T Ar timer +Specify the timer in seconds. +The user may specify a timer as a floating point number with a maximum +supported resolution of tenths of a second. +Drives may or may not support sub-second timer values. +.It Fl p Ar cond +Specify the power condition: Idle_a, Idle_b, Idle_c, Standby_y, Standby_z. +This argument is required. +.It Fl s +Save the timer and power condition enable/disable state. +By default, if this option is not specified, only the current values for +this power condition will be affected. +.El +.It state +Enable or disable a particular power condition. +.Bl -tag -width 7n +.It Fl p Ar cond +Specify the power condition: Idle_a, Idle_b, Idle_c, Standby_y, Standby_z. +This argument is required. +.It Fl s +Save the power condition enable/disable state. +By default, if this option is not specified, only the current values for +this power condition will be affected. +.El +.It enable +Enable the Extended Power Condition (EPC) feature set. +.It disable +Disable the Extended Power Condition (EPC) feature set. +.It source +Specify the EPC power source. +.Bl -tag -width 6n +.It Fl S Ar src +Specify the power source, either +.Dq battery +or +.Dq nonbattery . +.El +.It status +Get the current status of several parameters related to the Extended Power +Condition (EPC) feature set, including whether APM and EPC are supported +and enabled, whether Low Power Standby is supported, whether setting the +EPC power source is supported, whether Low Power Standby is supported and +the current power condition. +.Bl -tag -width 3n +.It Fl P +Only report the current power condition. +Some drives will exit their current power condition if a command other than +the +.Tn ATA +CHECK POWER MODE command is received. +If this flag is specified, +.Nm +will only issue the +.Tn ATA +CHECK POWER MODE command to the drive. +.El +.It list +Display the +.Tn ATA +Power Conditions log (Log Address 0x08). +This shows the list of Idle and Standby power conditions the drive +supports, and a number of parameters about each condition, including +whether it is enabled and what the timer value is. +.El +.El .It Ic help Print out verbose usage information. .El .Sh ENVIRONMENT The .Ev SCSI_MODES variable allows the user to specify an alternate mode page format file. .Pp The .Ev EDITOR variable determines which text editor .Nm starts when editing mode pages. .Sh FILES .Bl -tag -width /usr/share/misc/scsi_modes -compact .It Pa /usr/share/misc/scsi_modes is the SCSI mode format database. .It Pa /dev/xpt0 is the transport layer device. .It Pa /dev/pass* are the CAM application passthrough devices. .El .Sh EXAMPLES .Dl camcontrol eject -n cd -u 1 -v .Pp Eject the CD from cd1, and print SCSI sense information if the command fails. .Pp .Dl camcontrol tur da0 .Pp Send the SCSI test unit ready command to da0. The .Nm utility will report whether the disk is ready, but will not display sense information if the command fails since the .Fl v switch was not specified. .Bd -literal -offset indent camcontrol tur da1 -E -C 4 -t 50 -v .Ed .Pp Send a test unit ready command to da1. Enable kernel error recovery. Specify a retry count of 4, and a timeout of 50 seconds. Enable sense printing (with the .Fl v flag) if the command fails. Since error recovery is turned on, the disk will be spun up if it is not currently spinning. The .Nm utility will report whether the disk is ready. .Bd -literal -offset indent camcontrol cmd -n cd -u 1 -v -c "3C 00 00 00 00 00 00 00 0e 00" \e -i 0xe "s1 i3 i1 i1 i1 i1 i1 i1 i1 i1 i1 i1" .Ed .Pp Issue a READ BUFFER command (0x3C) to cd1. Display the buffer size of cd1, and display the first 10 bytes from the cache on cd1. Display SCSI sense information if the command fails. .Bd -literal -offset indent camcontrol cmd -n cd -u 1 -v -c "3B 00 00 00 00 00 00 00 0e 00" \e -o 14 "00 00 00 00 1 2 3 4 5 6 v v v v" 7 8 9 8 .Ed .Pp Issue a WRITE BUFFER (0x3B) command to cd1. Write out 10 bytes of data, not including the (reserved) 4 byte header. Print out sense information if the command fails. Be very careful with this command, improper use may cause data corruption. .Bd -literal -offset indent camcontrol modepage da3 -m 1 -e -P 3 .Ed .Pp Edit mode page 1 (the Read-Write Error Recover page) for da3, and save the settings on the drive. Mode page 1 contains a disk drive's auto read and write reallocation settings, among other things. .Pp .Dl camcontrol rescan all .Pp Rescan all SCSI busses in the system for devices that have been added, removed or changed. .Pp .Dl camcontrol rescan 0 .Pp Rescan SCSI bus 0 for devices that have been added, removed or changed. .Pp .Dl camcontrol rescan 0:1:0 .Pp Rescan SCSI bus 0, target 1, lun 0 to see if it has been added, removed, or changed. .Pp .Dl camcontrol tags da5 -N 24 .Pp Set the number of concurrent transactions for da5 to 24. .Bd -literal -offset indent camcontrol negotiate -n da -u 4 -T disable .Ed .Pp Disable tagged queueing for da4. .Bd -literal -offset indent camcontrol negotiate -n da -u 3 -R 20.000 -O 15 -a .Ed .Pp Negotiate a sync rate of 20MHz and an offset of 15 with da3. Then send a Test Unit Ready command to make the settings take effect. .Bd -literal -offset indent camcontrol smpcmd ses0 -v -r 4 "40 0 00 0" -R 1020 "s9 i1" .Ed .Pp Send the SMP REPORT GENERAL command to ses0, and display the number of PHYs it contains. Display SMP errors if the command fails. .Bd -literal -offset indent camcontrol security ada0 .Ed .Pp Report security support and settings for ada0 .Bd -literal -offset indent camcontrol security ada0 -U user -s MyPass .Ed .Pp Enable security on device ada0 with the password MyPass .Bd -literal -offset indent camcontrol security ada0 -U user -e MyPass .Ed .Pp Secure erase ada0 which has had security enabled with user password MyPass .Pp .Em WARNING! WARNING! WARNING! .Pp This will .Em ERASE ALL data from the device, so backup your data before using! .Pp This command can be used against an SSD drive to restoring it to factory default write performance. .Bd -literal -offset indent camcontrol hpa ada0 .Ed .Pp Report HPA support and settings for ada0 (also reported via identify). .Bd -literal -offset indent camcontrol hpa ada0 -s 10240 .Ed .Pp Enables HPA on ada0 setting the maximum reported sectors to 10240. .Pp .Em WARNING! WARNING! WARNING! .Pp This will .Em PREVENT ACCESS to all data on the device beyond this limit until HPA is disabled by setting HPA to native max sectors of the device, which can only be done after a power-on or hardware reset! .Pp .Em DO NOT use this on a device which has an active filesystem! .Bd -literal -offset indent camcontrol persist da0 -v -i read_keys .Ed .Pp This will read any persistent reservation keys registered with da0, and display any errors encountered when sending the PERSISTENT RESERVE IN .Tn SCSI command. .Bd -literal -offset indent camcontrol persist da0 -v -o register -a -K 0x12345678 .Ed .Pp This will register the persistent reservation key 0x12345678 with da0, apply that registration to all ports on da0, and display any errors that occur when sending the PERSISTENT RESERVE OUT command. .Bd -literal -offset indent camcontrol persist da0 -v -o reserve -s lun -k 0x12345678 -T ex_ac .Ed .Pp This will reserve da0 for the exlusive use of the initiator issuing the command. The scope of the reservation is the entire LUN. Any errors sending the PERSISTENT RESERVE OUT command will be displayed. .Bd -literal -offset indent camcontrol persist da0 -v -i read_full .Ed .Pp This will display the full status of all reservations on da0 and print out status if there are any errors. .Bd -literal -offset indent camcontrol persist da0 -v -o release -k 0x12345678 -T ex_ac .Ed .Pp This will release a reservation on da0 of the type ex_ac (Exclusive Access). The Reservation Key for this registration is 0x12345678. Any errors that occur will be displayed. .Bd -literal -offset indent camcontrol persist da0 -v -o register -K 0x12345678 -S \e -I sas,0x1234567812345678 -I sas,0x8765432187654321 .Ed .Pp This will register the key 0x12345678 with da0, specifying that it applies to the SAS initiators with SAS addresses 0x1234567812345678 and 0x8765432187654321. .Bd -literal -offset indent camcontrol persist da0 -v -o register_move -k 0x87654321 \e -K 0x12345678 -U -p -R 2 -I fcp,0x1234567812345678 .Ed .Pp This will move the registration from the current initiator, whose Registration Key is 0x87654321, to the Fibre Channel initiator with the Fiber Channel World Wide Node Name 0x1234567812345678. A new registration key, 0x12345678, will be registered for the initiator with the Fibre Channel World Wide Node Name 0x1234567812345678, and the current initiator will be unregistered from the target. The reservation will be moved to relative target port 2 on the target device. The registration will persist across power losses. .Bd -literal -offset indent camcontrol attrib sa0 -v -i attr_values -p 1 .Ed .Pp This will read and decode the attribute values from partition 1 on the tape in tape drive sa0, and will display any .Tn SCSI errors that result. +.Pp +.Bd -literal -offset indent +camcontrol zone da0 -v -c rz -P summary +.Ed +.Pp +This will request the SMR zone list from disk da0, and print out a +summary of the zone parameters, and display any +.Tn SCSI +or +.Tn ATA +errors that result. +.Pp +.Bd -literal -offset indent +camcontrol zone da0 -v -c rz -o reset +.Ed +.Pp +This will request the list of SMR zones that should have their write +pointer reset from the disk da0, and display any +.Tn SCSI +or +.Tn ATA +errors that result. +.Pp +.Bd -literal -offset indent +camcontrol zone da0 -v -c rwp -l 0x2c80000 +.Ed +.Pp +This will issue the Reset Write Pointer command to disk da0 for the zone +that starts at LBA 0x2c80000 and display any +.Tn SCSI +or +.Tn ATA +errors that result. +.Pp +.Bd -literal -offset indent +camcontrol epc ada0 -c timer -T 60.1 -p Idle_a -e -s +.Ed +.Pp +Set the timer for the Idle_a power condition on drive +.Pa ada0 +to 60.1 seconds, enable that particular power condition, and save the timer +value and the enabled state of the power condition. +.Pp +.Bd -literal -offset indent +camcontrol epc da4 -c goto -p Standby_z -H +.Ed +.Pp +Tell drive +.Pa da4 +to go to the Standby_z power state (which is +the drive's lowest power state) and hold in that state until it is +explicitly released by another +.Cm goto +command. +.Pp +.Bd -literal -offset indent +camcontrol epc da2 -c status -P +.Ed +.Pp +Report only the power state of +drive +.Pa da2 . +Some drives will power up in response to the commands sent by the +.Pa status +subcommand, and the +.Fl P +option causes +.Nm +to only send the +.Tn ATA +CHECK POWER MODE command, which should not trigger a change in the drive's +power state. +.Pp +.Bd -literal -offset indent +camcontrol epc ada0 -c list +.Ed +.Pp +Display the ATA Power Conditions log (Log Address 0x08) for +drive +.Pa ada0 . .Sh SEE ALSO .Xr cam 3 , .Xr cam_cdbparse 3 , .Xr cam 4 , .Xr pass 4 , .Xr xpt 4 .Sh HISTORY The .Nm utility first appeared in .Fx 3.0 . .Pp The mode page editing code and arbitrary SCSI command code are based upon code in the old .Xr scsi 8 utility and .Xr scsi 3 library, written by Julian Elischer and Peter Dufault. The .Xr scsi 8 program first appeared in .Bx 386 0.1.2.4 , and first appeared in .Fx in .Fx 2.0.5 . .Sh AUTHORS .An Kenneth Merry Aq Mt ken@FreeBSD.org .Sh BUGS The code that parses the generic command line arguments does not know that some of the subcommands take multiple arguments. So if, for instance, you tried something like this: .Bd -literal -offset indent camcontrol cmd -n da -u 1 -c "00 00 00 00 00 v" 0x00 -v .Ed .Pp The sense information from the test unit ready command would not get printed out, since the first .Xr getopt 3 call in .Nm bails out when it sees the second argument to .Fl c (0x00), above. Fixing this behavior would take some gross code, or changes to the .Xr getopt 3 interface. The best way to circumvent this problem is to always make sure to specify generic .Nm arguments before any command-specific arguments. Index: head/sbin/camcontrol/camcontrol.c =================================================================== --- head/sbin/camcontrol/camcontrol.c (revision 300206) +++ head/sbin/camcontrol/camcontrol.c (revision 300207) @@ -1,9359 +1,9558 @@ /* * Copyright (c) 1997-2007 Kenneth D. Merry * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef MINIMALISTIC #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "camcontrol.h" typedef enum { CAM_CMD_NONE = 0x00000000, CAM_CMD_DEVLIST = 0x00000001, CAM_CMD_TUR = 0x00000002, CAM_CMD_INQUIRY = 0x00000003, CAM_CMD_STARTSTOP = 0x00000004, CAM_CMD_RESCAN = 0x00000005, CAM_CMD_READ_DEFECTS = 0x00000006, CAM_CMD_MODE_PAGE = 0x00000007, CAM_CMD_SCSI_CMD = 0x00000008, CAM_CMD_DEVTREE = 0x00000009, CAM_CMD_USAGE = 0x0000000a, CAM_CMD_DEBUG = 0x0000000b, CAM_CMD_RESET = 0x0000000c, CAM_CMD_FORMAT = 0x0000000d, CAM_CMD_TAG = 0x0000000e, CAM_CMD_RATE = 0x0000000f, CAM_CMD_DETACH = 0x00000010, CAM_CMD_REPORTLUNS = 0x00000011, CAM_CMD_READCAP = 0x00000012, CAM_CMD_IDENTIFY = 0x00000013, CAM_CMD_IDLE = 0x00000014, CAM_CMD_STANDBY = 0x00000015, CAM_CMD_SLEEP = 0x00000016, CAM_CMD_SMP_CMD = 0x00000017, CAM_CMD_SMP_RG = 0x00000018, CAM_CMD_SMP_PC = 0x00000019, CAM_CMD_SMP_PHYLIST = 0x0000001a, CAM_CMD_SMP_MANINFO = 0x0000001b, CAM_CMD_DOWNLOAD_FW = 0x0000001c, CAM_CMD_SECURITY = 0x0000001d, CAM_CMD_HPA = 0x0000001e, CAM_CMD_SANITIZE = 0x0000001f, CAM_CMD_PERSIST = 0x00000020, CAM_CMD_APM = 0x00000021, CAM_CMD_AAM = 0x00000022, CAM_CMD_ATTRIB = 0x00000023, CAM_CMD_OPCODES = 0x00000024, - CAM_CMD_REPROBE = 0x00000025 + CAM_CMD_REPROBE = 0x00000025, + CAM_CMD_ZONE = 0x00000026, + CAM_CMD_EPC = 0x00000027 } cam_cmdmask; typedef enum { CAM_ARG_NONE = 0x00000000, CAM_ARG_VERBOSE = 0x00000001, CAM_ARG_DEVICE = 0x00000002, CAM_ARG_BUS = 0x00000004, CAM_ARG_TARGET = 0x00000008, CAM_ARG_LUN = 0x00000010, CAM_ARG_EJECT = 0x00000020, CAM_ARG_UNIT = 0x00000040, CAM_ARG_FORMAT_BLOCK = 0x00000080, CAM_ARG_FORMAT_BFI = 0x00000100, CAM_ARG_FORMAT_PHYS = 0x00000200, CAM_ARG_PLIST = 0x00000400, CAM_ARG_GLIST = 0x00000800, CAM_ARG_GET_SERIAL = 0x00001000, CAM_ARG_GET_STDINQ = 0x00002000, CAM_ARG_GET_XFERRATE = 0x00004000, CAM_ARG_INQ_MASK = 0x00007000, CAM_ARG_MODE_EDIT = 0x00008000, CAM_ARG_PAGE_CNTL = 0x00010000, CAM_ARG_TIMEOUT = 0x00020000, CAM_ARG_CMD_IN = 0x00040000, CAM_ARG_CMD_OUT = 0x00080000, CAM_ARG_DBD = 0x00100000, CAM_ARG_ERR_RECOVER = 0x00200000, CAM_ARG_RETRIES = 0x00400000, CAM_ARG_START_UNIT = 0x00800000, CAM_ARG_DEBUG_INFO = 0x01000000, CAM_ARG_DEBUG_TRACE = 0x02000000, CAM_ARG_DEBUG_SUBTRACE = 0x04000000, CAM_ARG_DEBUG_CDB = 0x08000000, CAM_ARG_DEBUG_XPT = 0x10000000, CAM_ARG_DEBUG_PERIPH = 0x20000000, CAM_ARG_DEBUG_PROBE = 0x40000000, } cam_argmask; struct camcontrol_opts { const char *optname; uint32_t cmdnum; cam_argmask argnum; const char *subopt; }; #ifndef MINIMALISTIC struct ata_res_pass16 { u_int16_t reserved[5]; u_int8_t flags; u_int8_t error; u_int8_t sector_count_exp; u_int8_t sector_count; u_int8_t lba_low_exp; u_int8_t lba_low; u_int8_t lba_mid_exp; u_int8_t lba_mid; u_int8_t lba_high_exp; u_int8_t lba_high; u_int8_t device; u_int8_t status; }; struct ata_set_max_pwd { u_int16_t reserved1; u_int8_t password[32]; u_int16_t reserved2[239]; }; static const char scsicmd_opts[] = "a:c:dfi:o:r"; static const char readdefect_opts[] = "f:GPqsS:X"; static const char negotiate_opts[] = "acD:M:O:qR:T:UW:"; static const char smprg_opts[] = "l"; static const char smppc_opts[] = "a:A:d:lm:M:o:p:s:S:T:"; static const char smpphylist_opts[] = "lq"; static char pwd_opt; #endif static struct camcontrol_opts option_table[] = { #ifndef MINIMALISTIC {"tur", CAM_CMD_TUR, CAM_ARG_NONE, NULL}, {"inquiry", CAM_CMD_INQUIRY, CAM_ARG_NONE, "DSR"}, {"identify", CAM_CMD_IDENTIFY, CAM_ARG_NONE, NULL}, {"start", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT, NULL}, {"stop", CAM_CMD_STARTSTOP, CAM_ARG_NONE, NULL}, {"load", CAM_CMD_STARTSTOP, CAM_ARG_START_UNIT | CAM_ARG_EJECT, NULL}, {"eject", CAM_CMD_STARTSTOP, CAM_ARG_EJECT, NULL}, {"reportluns", CAM_CMD_REPORTLUNS, CAM_ARG_NONE, "clr:"}, {"readcapacity", CAM_CMD_READCAP, CAM_ARG_NONE, "bhHNqs"}, {"reprobe", CAM_CMD_REPROBE, CAM_ARG_NONE, NULL}, #endif /* MINIMALISTIC */ {"rescan", CAM_CMD_RESCAN, CAM_ARG_NONE, NULL}, {"reset", CAM_CMD_RESET, CAM_ARG_NONE, NULL}, #ifndef MINIMALISTIC {"cmd", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"command", CAM_CMD_SCSI_CMD, CAM_ARG_NONE, scsicmd_opts}, {"smpcmd", CAM_CMD_SMP_CMD, CAM_ARG_NONE, "r:R:"}, {"smprg", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smpreportgeneral", CAM_CMD_SMP_RG, CAM_ARG_NONE, smprg_opts}, {"smppc", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpphycontrol", CAM_CMD_SMP_PC, CAM_ARG_NONE, smppc_opts}, {"smpplist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpphylist", CAM_CMD_SMP_PHYLIST, CAM_ARG_NONE, smpphylist_opts}, {"smpmaninfo", CAM_CMD_SMP_MANINFO, CAM_ARG_NONE, "l"}, {"defects", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, {"defectlist", CAM_CMD_READ_DEFECTS, CAM_ARG_NONE, readdefect_opts}, #endif /* MINIMALISTIC */ {"devlist", CAM_CMD_DEVTREE, CAM_ARG_NONE, "-b"}, #ifndef MINIMALISTIC {"periphlist", CAM_CMD_DEVLIST, CAM_ARG_NONE, NULL}, {"modepage", CAM_CMD_MODE_PAGE, CAM_ARG_NONE, "bdelm:P:"}, {"tags", CAM_CMD_TAG, CAM_ARG_NONE, "N:q"}, {"negotiate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"rate", CAM_CMD_RATE, CAM_ARG_NONE, negotiate_opts}, {"debug", CAM_CMD_DEBUG, CAM_ARG_NONE, "IPTSXcp"}, {"format", CAM_CMD_FORMAT, CAM_ARG_NONE, "qrwy"}, {"sanitize", CAM_CMD_SANITIZE, CAM_ARG_NONE, "a:c:IP:qrUwy"}, {"idle", CAM_CMD_IDLE, CAM_ARG_NONE, "t:"}, {"standby", CAM_CMD_STANDBY, CAM_ARG_NONE, "t:"}, {"sleep", CAM_CMD_SLEEP, CAM_ARG_NONE, ""}, {"apm", CAM_CMD_APM, CAM_ARG_NONE, "l:"}, {"aam", CAM_CMD_AAM, CAM_ARG_NONE, "l:"}, {"fwdownload", CAM_CMD_DOWNLOAD_FW, CAM_ARG_NONE, "f:qsy"}, {"security", CAM_CMD_SECURITY, CAM_ARG_NONE, "d:e:fh:k:l:qs:T:U:y"}, {"hpa", CAM_CMD_HPA, CAM_ARG_NONE, "Pflp:qs:U:y"}, {"persist", CAM_CMD_PERSIST, CAM_ARG_NONE, "ai:I:k:K:o:ps:ST:U"}, {"attrib", CAM_CMD_ATTRIB, CAM_ARG_NONE, "a:ce:F:p:r:s:T:w:V:"}, {"opcodes", CAM_CMD_OPCODES, CAM_ARG_NONE, "No:s:T"}, + {"zone", CAM_CMD_ZONE, CAM_ARG_NONE, "ac:l:No:P:"}, + {"epc", CAM_CMD_EPC, CAM_ARG_NONE, "c:dDeHp:Pr:sS:T:"}, #endif /* MINIMALISTIC */ {"help", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-?", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {"-h", CAM_CMD_USAGE, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; struct cam_devitem { struct device_match_result dev_match; int num_periphs; struct periph_match_result *periph_matches; struct scsi_vpd_device_id *device_id; int device_id_len; STAILQ_ENTRY(cam_devitem) links; }; struct cam_devlist { STAILQ_HEAD(, cam_devitem) dev_queue; path_id_t path_id; }; static cam_cmdmask cmdlist; static cam_argmask arglist; camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt); #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device); #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt); #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet); static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout); static int scsiinquiry(struct cam_device *device, int retry_count, int timeout); static int scsiserial(struct cam_device *device, int retry_count, int timeout); #endif /* MINIMALISTIC */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst); static int dorescan_or_reset(int argc, char **argv, int rescan); static int rescan_or_reset_bus(path_id_t bus, int rescan); static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan); #ifndef MINIMALISTIC static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int getdevid(struct cam_devitem *item); static int buildbusdevlist(struct cam_devlist *devlist); static void freebusdevlist(struct cam_devlist *devlist); static struct cam_devitem *findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr); static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt); static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts); static void cpi_print(struct ccb_pathinq *cpi); static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi); static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd); static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts); static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt); static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len); static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len); static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbose); static int scsireprobe(struct cam_device *device); #endif /* MINIMALISTIC */ #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #ifndef max #define max(a,b) (((a)>(b))?(a):(b)) #endif camcontrol_optret getoption(struct camcontrol_opts *table, char *arg, uint32_t *cmdnum, cam_argmask *argnum, const char **subopt) { struct camcontrol_opts *opts; int num_matches = 0; for (opts = table; (opts != NULL) && (opts->optname != NULL); opts++) { if (strncmp(opts->optname, arg, strlen(arg)) == 0) { *cmdnum = opts->cmdnum; *argnum = opts->argnum; *subopt = opts->subopt; if (++num_matches > 1) return(CC_OR_AMBIGUOUS); } } if (num_matches > 0) return(CC_OR_FOUND); else return(CC_OR_NOT_FOUND); } #ifndef MINIMALISTIC static int getdevlist(struct cam_device *device) { union ccb *ccb; char status[32]; int error = 0; ccb = cam_getccb(device); ccb->ccb_h.func_code = XPT_GDEVLIST; ccb->ccb_h.flags = CAM_DIR_NONE; ccb->ccb_h.retry_count = 1; ccb->cgdl.index = 0; ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { if (cam_send_ccb(device, ccb) < 0) { perror("error getting device list"); cam_freeccb(ccb); return(1); } status[0] = '\0'; switch (ccb->cgdl.status) { case CAM_GDEVLIST_MORE_DEVS: strcpy(status, "MORE"); break; case CAM_GDEVLIST_LAST_DEVICE: strcpy(status, "LAST"); break; case CAM_GDEVLIST_LIST_CHANGED: strcpy(status, "CHANGED"); break; case CAM_GDEVLIST_ERROR: strcpy(status, "ERROR"); error = 1; break; } fprintf(stdout, "%s%d: generation: %d index: %d status: %s\n", ccb->cgdl.periph_name, ccb->cgdl.unit_number, ccb->cgdl.generation, ccb->cgdl.index, status); /* * If the list has changed, we need to start over from the * beginning. */ if (ccb->cgdl.status == CAM_GDEVLIST_LIST_CHANGED) ccb->cgdl.index = 0; } cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ static int getdevtree(int argc, char **argv, char *combinedopt) { union ccb ccb; int bufsize, fd; unsigned int i; int need_close = 0; int error = 0; int skip_device = 0; int busonly = 0; int c; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': if ((arglist & CAM_ARG_VERBOSE) == 0) busonly = 1; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; /* * We fetch all nodes, since we display most of them in the default * case, and all in the verbose case. */ ccb.cdm.num_patterns = 0; ccb.cdm.pattern_buf_len = 0; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); error = 1; break; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); error = 1; break; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_BUS: { struct bus_match_result *bus_result; /* * Only print the bus information if the * user turns on the verbose flag. */ if ((busonly == 0) && (arglist & CAM_ARG_VERBOSE) == 0) break; bus_result = &ccb.cdm.matches[i].result.bus_result; if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "scbus%d on %s%d bus %d%s\n", bus_result->path_id, bus_result->dev_name, bus_result->unit_number, bus_result->bus_id, (busonly ? "" : ":")); break; } case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; char vendor[16], product[48], revision[16]; char fw[5], tmpstr[256]; if (busonly == 1) break; dev_result = &ccb.cdm.matches[i].result.device_result; if ((dev_result->flags & DEV_RESULT_UNCONFIGURED) && ((arglist & CAM_ARG_VERBOSE) == 0)) { skip_device = 1; break; } else skip_device = 0; if (dev_result->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_result->inq_data.vendor, sizeof(dev_result->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_result->inq_data.product, sizeof(dev_result->inq_data.product), sizeof(product)); cam_strvis(revision, dev_result->inq_data.revision, sizeof(dev_result->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if (dev_result->protocol == PROTO_ATA || dev_result->protocol == PROTO_SATAPM) { cam_strvis(product, dev_result->ident_data.model, sizeof(dev_result->ident_data.model), sizeof(product)); cam_strvis(revision, dev_result->ident_data.revision, sizeof(dev_result->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else if (dev_result->protocol == PROTO_SEMB) { struct sep_identify_data *sid; sid = (struct sep_identify_data *) &dev_result->ident_data; cam_strvis(vendor, sid->vendor_id, sizeof(sid->vendor_id), sizeof(vendor)); cam_strvis(product, sid->product_id, sizeof(sid->product_id), sizeof(product)); cam_strvis(revision, sid->product_rev, sizeof(sid->product_rev), sizeof(revision)); cam_strvis(fw, sid->firmware_rev, sizeof(sid->firmware_rev), sizeof(fw)); sprintf(tmpstr, "<%s %s %s %s>", vendor, product, revision, fw); } else { sprintf(tmpstr, "<>"); } if (need_close) { fprintf(stdout, ")\n"); need_close = 0; } fprintf(stdout, "%-33s at scbus%d " "target %d lun %jx (", tmpstr, dev_result->path_id, dev_result->target_id, (uintmax_t)dev_result->target_lun); need_close = 1; break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (busonly || skip_device != 0) break; if (need_close > 1) fprintf(stdout, ","); fprintf(stdout, "%s%d", periph_result->periph_name, periph_result->unit_number); need_close++; break; } default: fprintf(stdout, "unknown match type\n"); break; } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); if (need_close) fprintf(stdout, ")\n"); close(fd); return(error); } #ifndef MINIMALISTIC static int testunitready(struct cam_device *device, int retry_count, int timeout, int quiet) { int error = 0; union ccb *ccb; ccb = cam_getccb(device); scsi_test_unit_ready(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet == 0) perror("error sending test unit ready"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { if (quiet == 0) fprintf(stdout, "Unit is ready\n"); } else { if (quiet == 0) fprintf(stdout, "Unit is not ready\n"); error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } static int scsistart(struct cam_device *device, int startstop, int loadeject, int retry_count, int timeout) { union ccb *ccb; int error = 0; ccb = cam_getccb(device); /* * If we're stopping, send an ordered tag so the drive in question * will finish any previously queued writes before stopping. If * the device isn't capable of tagged queueing, or if tagged * queueing is turned off, the tag action is a no-op. */ scsi_start_stop(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ startstop ? MSG_SIMPLE_Q_TAG : MSG_ORDERED_Q_TAG, /* start/stop */ startstop, /* load_eject */ loadeject, /* immediate */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 120000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending start unit"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) if (startstop) { fprintf(stdout, "Unit started successfully"); if (loadeject) fprintf(stdout,", Media loaded\n"); else fprintf(stdout,"\n"); } else { fprintf(stdout, "Unit stopped successfully"); if (loadeject) fprintf(stdout, ", Media ejected\n"); else fprintf(stdout, "\n"); } else { error = 1; if (startstop) fprintf(stdout, "Error received from start unit command\n"); else fprintf(stdout, "Error received from stop unit command\n"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); return(error); } int scsidoinquiry(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c; int error = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'D': arglist |= CAM_ARG_GET_STDINQ; break; case 'R': arglist |= CAM_ARG_GET_XFERRATE; break; case 'S': arglist |= CAM_ARG_GET_SERIAL; break; default: break; } } /* * If the user didn't specify any inquiry options, he wants all of * them. */ if ((arglist & CAM_ARG_INQ_MASK) == 0) arglist |= CAM_ARG_INQ_MASK; if (arglist & CAM_ARG_GET_STDINQ) error = scsiinquiry(device, retry_count, timeout); if (error != 0) return(error); if (arglist & CAM_ARG_GET_SERIAL) scsiserial(device, retry_count, timeout); if (arglist & CAM_ARG_GET_XFERRATE) error = camxferrate(device); return(error); } static int scsiinquiry(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_inquiry_data *inq_buf; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); inq_buf = (struct scsi_inquiry_data *)malloc( sizeof(struct scsi_inquiry_data)); if (inq_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for inquiry\n"); return(1); } bzero(inq_buf, sizeof(*inq_buf)); /* * Note that although the size of the inquiry buffer is the full * 256 bytes specified in the SCSI spec, we only tell the device * that we have allocated SHORT_INQUIRY_LENGTH bytes. There are * two reasons for this: * * - The SCSI spec says that when a length field is only 1 byte, * a value of 0 will be interpreted as 256. Therefore * scsi_inquiry() will convert an inq_len (which is passed in as * a u_int32_t, but the field in the CDB is only 1 byte) of 256 * to 0. Evidently, very few devices meet the spec in that * regard. Some devices, like many Seagate disks, take the 0 as * 0, and don't return any data. One Pioneer DVD-R drive * returns more data than the command asked for. * * So, since there are numerous devices that just don't work * right with the full inquiry size, we don't send the full size. * * - The second reason not to use the full inquiry data length is * that we don't need it here. The only reason we issue a * standard inquiry is to get the vendor name, device name, * and revision so scsi_print_inquiry() can print them. * * If, at some point in the future, more inquiry data is needed for * some reason, this code should use a procedure similar to the * probe code. i.e., issue a short inquiry, and determine from * the additional length passed back from the device how much * inquiry data the device supports. Once the amount the device * supports is determined, issue an inquiry for that amount and no * more. * * KDM, 2/18/2000 */ scsi_inquiry(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)inq_buf, /* inq_len */ SHORT_INQUIRY_LENGTH, /* evpd */ 0, /* page_code */ 0, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending SCSI inquiry"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(inq_buf); return(error); } fprintf(stdout, "%s%d: ", device->device_name, device->dev_unit_num); scsi_print_inquiry(inq_buf); free(inq_buf); return(0); } static int scsiserial(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct scsi_vpd_unit_serial_number *serial_buf; char serial_num[SVPD_SERIAL_NUM_SIZE + 1]; int error = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); serial_buf = (struct scsi_vpd_unit_serial_number *) malloc(sizeof(*serial_buf)); if (serial_buf == NULL) { cam_freeccb(ccb); warnx("can't malloc memory for serial number"); return(1); } scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)serial_buf, /* inq_len */ sizeof(*serial_buf), /* evpd */ 1, /* page_code */ SVPD_UNIT_SERIAL_NUMBER, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error getting serial number"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); free(serial_buf); return(1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } cam_freeccb(ccb); if (error != 0) { free(serial_buf); return(error); } bcopy(serial_buf->serial_num, serial_num, serial_buf->length); serial_num[serial_buf->length] = '\0'; if ((arglist & CAM_ARG_GET_STDINQ) || (arglist & CAM_ARG_GET_XFERRATE)) fprintf(stdout, "%s%d: Serial Number ", device->device_name, device->dev_unit_num); fprintf(stdout, "%.60s\n", serial_num); free(serial_buf); return(0); } int camxferrate(struct cam_device *device) { struct ccb_pathinq cpi; u_int32_t freq = 0; u_int32_t speed = 0; union ccb *ccb; u_int mb; int retval = 0; if ((retval = get_cpi(device, &cpi)) != 0) return (1); ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char error_string[] = "error getting transfer settings"; if (retval < 0) warn(error_string); else warnx(error_string); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto xferrate_bailout; } speed = cpi.base_transfer_speed; freq = 0; if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { freq = scsi_calc_syncsrate(spi->sync_period); speed = freq; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { speed *= (0x01 << spi->bus_width); } } else if (ccb->cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &ccb->cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_SPEED) speed = fc->bitrate; } else if (ccb->cts.transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &ccb->cts.xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) speed = sas->bitrate; } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; if (pata->valid & CTS_ATA_VALID_MODE) speed = ata_mode2speed(pata->mode); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; if (sata->valid & CTS_SATA_VALID_REVISION) speed = ata_revision2speed(sata->revision); } mb = speed / 1000; if (mb > 0) { fprintf(stdout, "%s%d: %d.%03dMB/s transfers", device->device_name, device->dev_unit_num, mb, speed % 1000); } else { fprintf(stdout, "%s%d: %dKB/s transfers", device->device_name, device->dev_unit_num, speed); } if (ccb->cts.transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &ccb->cts.xport_specific.spi; if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) fprintf(stdout, " (%d.%03dMHz, offset %d", freq / 1000, freq % 1000, spi->sync_offset); if (((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) && (spi->bus_width > 0)) { if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ", "); } else { fprintf(stdout, " ("); } fprintf(stdout, "%dbit)", 8 * (0x01 << spi->bus_width)); } else if (((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) && (spi->sync_offset != 0)) { fprintf(stdout, ")"); } } else if (ccb->cts.transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &ccb->cts.xport_specific.ata; printf(" ("); if (pata->valid & CTS_ATA_VALID_MODE) printf("%s, ", ata_mode2string(pata->mode)); if ((pata->valid & CTS_ATA_VALID_ATAPI) && pata->atapi != 0) printf("ATAPI %dbytes, ", pata->atapi); if (pata->valid & CTS_ATA_VALID_BYTECOUNT) printf("PIO %dbytes", pata->bytecount); printf(")"); } else if (ccb->cts.transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &ccb->cts.xport_specific.sata; printf(" ("); if (sata->valid & CTS_SATA_VALID_REVISION) printf("SATA %d.x, ", sata->revision); else printf("SATA, "); if (sata->valid & CTS_SATA_VALID_MODE) printf("%s, ", ata_mode2string(sata->mode)); if ((sata->valid & CTS_SATA_VALID_ATAPI) && sata->atapi != 0) printf("ATAPI %dbytes, ", sata->atapi); if (sata->valid & CTS_SATA_VALID_BYTECOUNT) printf("PIO %dbytes", sata->bytecount); printf(")"); } if (ccb->cts.protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi = &ccb->cts.proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { if (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) { fprintf(stdout, ", Command Queueing Enabled"); } } } fprintf(stdout, "\n"); xferrate_bailout: cam_freeccb(ccb); return(retval); } static void atahpa_print(struct ata_params *parm, u_int64_t hpasize, int header) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); if (header) { printf("\nFeature " "Support Enabled Value\n"); } printf("Host Protected Area (HPA) "); if (parm->support.command1 & ATA_SUPPORT_PROTECTED) { u_int64_t lba = lbasize48 ? lbasize48 : lbasize; printf("yes %s %ju/%ju\n", (hpasize > lba) ? "yes" : "no ", lba, hpasize); printf("HPA - Security "); if (parm->support.command1 & ATA_SUPPORT_MAXSECURITY) printf("yes\n"); else printf("no\n"); } else { printf("no\n"); } } static int atasata(struct ata_params *parm) { if (parm->satacapabilities != 0xffff && parm->satacapabilities != 0x0000) return 1; return 0; } static void atacapprint(struct ata_params *parm) { u_int32_t lbasize = (u_int32_t)parm->lba_size_1 | ((u_int32_t)parm->lba_size_2 << 16); u_int64_t lbasize48 = ((u_int64_t)parm->lba_size48_1) | ((u_int64_t)parm->lba_size48_2 << 16) | ((u_int64_t)parm->lba_size48_3 << 32) | ((u_int64_t)parm->lba_size48_4 << 48); printf("\n"); printf("protocol "); printf("ATA/ATAPI-%d", ata_version(parm->version_major)); if (parm->satacapabilities && parm->satacapabilities != 0xffff) { if (parm->satacapabilities & ATA_SATA_GEN3) printf(" SATA 3.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN2) printf(" SATA 2.x\n"); else if (parm->satacapabilities & ATA_SATA_GEN1) printf(" SATA 1.x\n"); else printf(" SATA\n"); } else printf("\n"); printf("device model %.40s\n", parm->model); printf("firmware revision %.8s\n", parm->revision); printf("serial number %.20s\n", parm->serial); if (parm->enabled.extension & ATA_SUPPORT_64BITWWN) { printf("WWN %04x%04x%04x%04x\n", parm->wwn[0], parm->wwn[1], parm->wwn[2], parm->wwn[3]); } if (parm->enabled.extension & ATA_SUPPORT_MEDIASN) { printf("media serial number %.30s\n", parm->media_serial); } printf("cylinders %d\n", parm->cylinders); printf("heads %d\n", parm->heads); printf("sectors/track %d\n", parm->sectors); printf("sector size logical %u, physical %lu, offset %lu\n", ata_logical_sector_size(parm), (unsigned long)ata_physical_sector_size(parm), (unsigned long)ata_logical_sector_offset(parm)); if (parm->config == ATA_PROTO_CFA || (parm->support.command2 & ATA_SUPPORT_CFA)) printf("CFA supported\n"); printf("LBA%ssupported ", parm->capabilities1 & ATA_SUPPORT_LBA ? " " : " not "); if (lbasize) printf("%d sectors\n", lbasize); else printf("\n"); printf("LBA48%ssupported ", parm->support.command2 & ATA_SUPPORT_ADDRESS48 ? " " : " not "); if (lbasize48) printf("%ju sectors\n", (uintmax_t)lbasize48); else printf("\n"); printf("PIO supported PIO"); switch (ata_max_pmode(parm)) { case ATA_PIO4: printf("4"); break; case ATA_PIO3: printf("3"); break; case ATA_PIO2: printf("2"); break; case ATA_PIO1: printf("1"); break; default: printf("0"); } if ((parm->capabilities1 & ATA_SUPPORT_IORDY) == 0) printf(" w/o IORDY"); printf("\n"); printf("DMA%ssupported ", parm->capabilities1 & ATA_SUPPORT_DMA ? " " : " not "); if (parm->capabilities1 & ATA_SUPPORT_DMA) { if (parm->mwdmamodes & 0xff) { printf("WDMA"); if (parm->mwdmamodes & 0x04) printf("2"); else if (parm->mwdmamodes & 0x02) printf("1"); else if (parm->mwdmamodes & 0x01) printf("0"); printf(" "); } if ((parm->atavalid & ATA_FLAG_88) && (parm->udmamodes & 0xff)) { printf("UDMA"); if (parm->udmamodes & 0x40) printf("6"); else if (parm->udmamodes & 0x20) printf("5"); else if (parm->udmamodes & 0x10) printf("4"); else if (parm->udmamodes & 0x08) printf("3"); else if (parm->udmamodes & 0x04) printf("2"); else if (parm->udmamodes & 0x02) printf("1"); else if (parm->udmamodes & 0x01) printf("0"); printf(" "); } } printf("\n"); if (parm->media_rotation_rate == 1) { printf("media RPM non-rotating\n"); } else if (parm->media_rotation_rate >= 0x0401 && parm->media_rotation_rate <= 0xFFFE) { printf("media RPM %d\n", parm->media_rotation_rate); } printf("\nFeature " "Support Enabled Value Vendor\n"); printf("read ahead %s %s\n", parm->support.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_LOOKAHEAD ? "yes" : "no"); printf("write cache %s %s\n", parm->support.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_WRITECACHE ? "yes" : "no"); printf("flush cache %s %s\n", parm->support.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_FLUSHCACHE ? "yes" : "no"); printf("overlap %s\n", parm->capabilities1 & ATA_SUPPORT_OVERLAP ? "yes" : "no"); printf("Tagged Command Queuing (TCQ) %s %s", parm->support.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_QUEUED ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_QUEUED) { printf(" %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("\n"); printf("Native Command Queuing (NCQ) "); if (parm->satacapabilities != 0xffff && (parm->satacapabilities & ATA_SUPPORT_NCQ)) { printf("yes %d tags\n", ATA_QUEUE_LEN(parm->queue) + 1); } else printf("no\n"); printf("NCQ Queue Management %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_QMANAGEMENT ? "yes" : "no"); printf("NCQ Streaming %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_NCQ_STREAM ? "yes" : "no"); printf("Receive & Send FPDMA Queued %s\n", atasata(parm) && parm->satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED ? "yes" : "no"); printf("SMART %s %s\n", parm->support.command1 & ATA_SUPPORT_SMART ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SMART ? "yes" : "no"); printf("microcode download %s %s\n", parm->support.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_MICROCODE ? "yes" : "no"); printf("security %s %s\n", parm->support.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_SECURITY ? "yes" : "no"); printf("power management %s %s\n", parm->support.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no", parm->enabled.command1 & ATA_SUPPORT_POWERMGT ? "yes" : "no"); printf("advanced power management %s %s", parm->support.command2 & ATA_SUPPORT_APM ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_APM ? "yes" : "no"); if (parm->support.command2 & ATA_SUPPORT_APM) { printf(" %d/0x%02X\n", parm->apm_value & 0xff, parm->apm_value & 0xff); } else printf("\n"); printf("automatic acoustic management %s %s", parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no", parm->enabled.command2 & ATA_SUPPORT_AUTOACOUSTIC ? "yes" :"no"); if (parm->support.command2 & ATA_SUPPORT_AUTOACOUSTIC) { printf(" %d/0x%02X %d/0x%02X\n", ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_CURRENT(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic), ATA_ACOUSTIC_VENDOR(parm->acoustic)); } else printf("\n"); printf("media status notification %s %s\n", parm->support.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_NOTIFY ? "yes" : "no"); printf("power-up in Standby %s %s\n", parm->support.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no", parm->enabled.command2 & ATA_SUPPORT_STANDBY ? "yes" : "no"); printf("write-read-verify %s %s", parm->support2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_WRITEREADVERIFY ? "yes" : "no"); if (parm->support2 & ATA_SUPPORT_WRITEREADVERIFY) { printf(" %d/0x%x\n", parm->wrv_mode, parm->wrv_mode); } else printf("\n"); printf("unload %s %s\n", parm->support.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_UNLOAD ? "yes" : "no"); printf("general purpose logging %s %s\n", parm->support.extension & ATA_SUPPORT_GENLOG ? "yes" : "no", parm->enabled.extension & ATA_SUPPORT_GENLOG ? "yes" : "no"); printf("free-fall %s %s\n", parm->support2 & ATA_SUPPORT_FREEFALL ? "yes" : "no", parm->enabled2 & ATA_SUPPORT_FREEFALL ? "yes" : "no"); printf("Data Set Management (DSM/TRIM) "); if (parm->support_dsm & ATA_SUPPORT_DSM_TRIM) { printf("yes\n"); printf("DSM - max 512byte blocks "); if (parm->max_dsm_blocks == 0x00) printf("yes not specified\n"); else printf("yes %d\n", parm->max_dsm_blocks); printf("DSM - deterministic read "); if (parm->support3 & ATA_SUPPORT_DRAT) { if (parm->support3 & ATA_SUPPORT_RZAT) printf("yes zeroed\n"); else printf("yes any value\n"); } else { printf("no\n"); } } else { printf("no\n"); } } static int scsi_cam_pass_16_send(struct cam_device *device, union ccb *ccb, int quiet) { struct ata_pass_16 *ata_pass_16; struct ata_cmd ata_cmd; ata_pass_16 = (struct ata_pass_16 *)ccb->csio.cdb_io.cdb_bytes; ata_cmd.command = ata_pass_16->command; ata_cmd.control = ata_pass_16->control; ata_cmd.features = ata_pass_16->features; if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s via pass_16 with timeout of %u msecs", ata_op_string(&ata_cmd), ccb->csio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s via pass_16", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if (!(ata_pass_16->flags & AP_FLAG_CHK_COND) && (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s via pass_16 failed", ata_op_string(&ata_cmd)); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_cam_send(struct cam_device *device, union ccb *ccb, int quiet) { if (arglist & CAM_ARG_VERBOSE) { warnx("sending ATA %s with timeout of %u msecs", ata_op_string(&(ccb->ataio.cmd)), ccb->ataio.ccb_h.timeout); } /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warn("error sending ATA %s", ata_op_string(&(ccb->ataio.cmd))); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (quiet != 1 || arglist & CAM_ARG_VERBOSE) { warnx("ATA %s failed: %d", ata_op_string(&(ccb->ataio.cmd)), quiet); } if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } return (1); } return (0); } static int ata_do_pass_16(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { if (data_ptr != NULL) { ata_flags |= AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT; if (flags & CAM_DIR_OUT) ata_flags |= AP_FLAG_TDIR_TO_DEV; else ata_flags |= AP_FLAG_TDIR_FROM_DEV; } else { ata_flags |= AP_FLAG_TLEN_NO_DATA; } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_ata_pass_16(&ccb->csio, retries, NULL, flags, tag_action, protocol, ata_flags, features, sector_count, lba, command, /*control*/0, data_ptr, dxfer_len, /*sense_len*/SSD_FULL_SIZE, timeout); return scsi_cam_pass_16_send(device, ccb, quiet); } static int ata_try_pass_16(struct cam_device *device) { struct ccb_pathinq cpi; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } if (cpi.protocol == PROTO_SCSI) { /* possibly compatible with pass_16 */ return (1); } /* likely not compatible with pass_16 */ return (0); } static int ata_do_28bit_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int32_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int quiet) { switch (ata_try_pass_16(device)) { case -1: return (1); case 1: /* Try using SCSI Passthrough */ return ata_do_pass_16(device, ccb, retries, flags, protocol, 0, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, quiet); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - sizeof(struct ccb_hdr)); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); return ata_cam_send(device, ccb, quiet); } static int ata_do_cmd(struct cam_device *device, union ccb *ccb, int retries, u_int32_t flags, u_int8_t protocol, u_int8_t ata_flags, u_int8_t tag_action, u_int8_t command, u_int8_t features, u_int64_t lba, u_int8_t sector_count, u_int8_t *data_ptr, u_int16_t dxfer_len, int timeout, int force48bit) { int retval; retval = ata_try_pass_16(device); if (retval == -1) return (1); if (retval == 1) { int error; /* Try using SCSI Passthrough */ error = ata_do_pass_16(device, ccb, retries, flags, protocol, ata_flags, tag_action, command, features, lba, sector_count, data_ptr, dxfer_len, timeout, 0); if (ata_flags & AP_FLAG_CHK_COND) { /* Decode ata_res from sense data */ struct ata_res_pass16 *res_pass16; struct ata_res *res; u_int i; u_int16_t *ptr; /* sense_data is 4 byte aligned */ ptr = (uint16_t*)(uintptr_t)&ccb->csio.sense_data; for (i = 0; i < sizeof(*res_pass16) / 2; i++) ptr[i] = le16toh(ptr[i]); /* sense_data is 4 byte aligned */ res_pass16 = (struct ata_res_pass16 *)(uintptr_t) &ccb->csio.sense_data; res = &ccb->ataio.res; res->flags = res_pass16->flags; res->status = res_pass16->status; res->error = res_pass16->error; res->lba_low = res_pass16->lba_low; res->lba_mid = res_pass16->lba_mid; res->lba_high = res_pass16->lba_high; res->device = res_pass16->device; res->lba_low_exp = res_pass16->lba_low_exp; res->lba_mid_exp = res_pass16->lba_mid_exp; res->lba_high_exp = res_pass16->lba_high_exp; res->sector_count = res_pass16->sector_count; res->sector_count_exp = res_pass16->sector_count_exp; } return (error); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_ataio) - sizeof(struct ccb_hdr)); cam_fill_ataio(&ccb->ataio, retries, NULL, flags, tag_action, data_ptr, dxfer_len, timeout); if (force48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); if (ata_flags & AP_FLAG_CHK_COND) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; return ata_cam_send(device, ccb, 0); } static void dump_data(uint16_t *ptr, uint32_t len) { u_int i; for (i = 0; i < len / 2; i++) { if ((i % 8) == 0) printf(" %3d: ", i); printf("%04hx ", ptr[i]); if ((i % 8) == 7) printf("\n"); } if ((i % 8) != 7) printf("\n"); } static int atahpa_proc_resp(struct cam_device *device, union ccb *ccb, int is48bit, u_int64_t *hpasize) { struct ata_res *res; res = &ccb->ataio.res; if (res->status & ATA_STATUS_ERROR) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); printf("error = 0x%02x, sector_count = 0x%04x, " "device = 0x%02x, status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (res->error & ATA_ERROR_ID_NOT_FOUND) { warnx("Max address has already been set since " "last power-on or hardware reset"); } return (1); } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw native max data:\n", device->device_name, device->dev_unit_num); /* res is 4 byte aligned */ dump_data((uint16_t*)(uintptr_t)res, sizeof(struct ata_res)); printf("error = 0x%02x, sector_count = 0x%04x, device = 0x%02x, " "status = 0x%02x\n", res->error, res->sector_count, res->device, res->status); } if (hpasize != NULL) { if (is48bit) { *hpasize = (((u_int64_t)((res->lba_high_exp << 16) | (res->lba_mid_exp << 8) | res->lba_low_exp) << 24) | ((res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low)) + 1; } else { *hpasize = (((res->device & 0x0f) << 24) | (res->lba_high << 16) | (res->lba_mid << 8) | res->lba_low) + 1; } } return (0); } static int ata_read_native_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, struct ata_params *parm, u_int64_t *hpasize) { int error; u_int cmd, is48bit; u_int8_t protocol; is48bit = parm->support.command2 & ATA_SUPPORT_ADDRESS48; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_READ_NATIVE_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_READ_NATIVE_MAX_ADDRESS; } error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, hpasize); } static int atahpa_set_max(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, u_int64_t maxsize, int persist) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; if (is48bit) { cmd = ATA_SET_MAX_ADDRESS48; protocol |= AP_EXTEND; } else { cmd = ATA_SET_MAX_ADDRESS; } /* lba's are zero indexed so the max lba is requested max - 1 */ if (maxsize) maxsize--; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_MAX_ADDR, /*lba*/maxsize, /*sector_count*/persist, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_password(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_SET_PWD, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_LOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_unlock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit, struct ata_set_max_pwd *pwd) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_PIO_OUT; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_UNLOCK, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t*)pwd, /*dxfer_len*/sizeof(struct ata_set_max_pwd), timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } static int atahpa_freeze_lock(struct cam_device *device, int retry_count, u_int32_t timeout, union ccb *ccb, int is48bit) { int error; u_int cmd; u_int8_t protocol; protocol = AP_PROTO_NON_DATA; cmd = (is48bit) ? ATA_SET_MAX_ADDRESS48 : ATA_SET_MAX_ADDRESS; error = ata_do_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/protocol, /*ata_flags*/AP_FLAG_CHK_COND, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/ATA_HPA_FEAT_FREEZE, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, timeout ? timeout : 1000, is48bit); if (error) return (error); return atahpa_proc_resp(device, ccb, is48bit, NULL); } int ata_do_identify(struct cam_device *device, int retry_count, int timeout, union ccb *ccb, struct ata_params** ident_bufp) { struct ata_params *ident_buf; struct ccb_pathinq cpi; struct ccb_getdev cgd; u_int i, error; int16_t *ptr; u_int8_t command, retry_command; if (get_cpi(device, &cpi) != 0) { warnx("couldn't get CPI"); return (-1); } /* Neither PROTO_ATAPI or PROTO_SATAPM are used in cpi.protocol */ if (cpi.protocol == PROTO_ATA) { if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (-1); } command = (cgd.protocol == PROTO_ATA) ? ATA_ATA_IDENTIFY : ATA_ATAPI_IDENTIFY; retry_command = 0; } else { /* We don't know which for sure so try both */ command = ATA_ATA_IDENTIFY; retry_command = ATA_ATAPI_IDENTIFY; } ptr = (uint16_t *)calloc(1, sizeof(struct ata_params)); if (ptr == NULL) { warnx("can't calloc memory for identify\n"); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t)sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); if (error != 0) { if (retry_command == 0) { free(ptr); return (1); } error = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_IN, /*protocol*/AP_PROTO_PIO_IN, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/retry_command, /*features*/0, /*lba*/0, /*sector_count*/(u_int8_t) sizeof(struct ata_params), /*data_ptr*/(u_int8_t *)ptr, /*dxfer_len*/sizeof(struct ata_params), /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/0); if (error != 0) { free(ptr); return (1); } } error = 1; for (i = 0; i < sizeof(struct ata_params) / 2; i++) { ptr[i] = le16toh(ptr[i]); if (ptr[i] != 0) error = 0; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s%d: Raw identify data:\n", device->device_name, device->dev_unit_num); dump_data(ptr, sizeof(struct ata_params)); } /* check for invalid (all zero) response */ if (error != 0) { warnx("Invalid identify response detected"); free(ptr); return (error); } ident_buf = (struct ata_params *)ptr; if (strncmp(ident_buf->model, "FX", 2) && strncmp(ident_buf->model, "NEC", 3) && strncmp(ident_buf->model, "Pioneer", 7) && strncmp(ident_buf->model, "SHARP", 5)) { ata_bswap(ident_buf->model, sizeof(ident_buf->model)); ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); ata_bswap(ident_buf->media_serial, sizeof(ident_buf->media_serial)); } ata_btrim(ident_buf->model, sizeof(ident_buf->model)); ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); ata_btrim(ident_buf->media_serial, sizeof(ident_buf->media_serial)); ata_bpack(ident_buf->media_serial, ident_buf->media_serial, sizeof(ident_buf->media_serial)); *ident_bufp = ident_buf; return (0); } static int ataidentify(struct cam_device *device, int retry_count, int timeout) { union ccb *ccb; struct ata_params *ident_buf; u_int64_t hpasize; if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); return (1); } if (ident_buf->support.command1 & ATA_SUPPORT_PROTECTED) { if (ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize) != 0) { cam_freeccb(ccb); return (1); } } else { hpasize = 0; } printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); atacapprint(ident_buf); atahpa_print(ident_buf, hpasize, 0); free(ident_buf); cam_freeccb(ccb); return (0); } #endif /* MINIMALISTIC */ #ifndef MINIMALISTIC enum { ATA_SECURITY_ACTION_PRINT, ATA_SECURITY_ACTION_FREEZE, ATA_SECURITY_ACTION_UNLOCK, ATA_SECURITY_ACTION_DISABLE, ATA_SECURITY_ACTION_ERASE, ATA_SECURITY_ACTION_ERASE_ENHANCED, ATA_SECURITY_ACTION_SET_PASSWORD }; static void atasecurity_print_time(u_int16_t tw) { if (tw == 0) printf("unspecified"); else if (tw >= 255) printf("> 508 min"); else printf("%i min", 2 * tw); } static u_int32_t atasecurity_erase_timeout_msecs(u_int16_t timeout) { if (timeout == 0) return 2 * 3600 * 1000; /* default: two hours */ else if (timeout > 255) return (508 + 60) * 60 * 1000; /* spec says > 508 minutes */ return ((2 * timeout) + 5) * 60 * 1000; /* add a 5min margin */ } static void atasecurity_notify(u_int8_t command, struct ata_security_password *pwd) { struct ata_cmd cmd; bzero(&cmd, sizeof(cmd)); cmd.command = command; printf("Issuing %s", ata_op_string(&cmd)); if (pwd != NULL) { char pass[sizeof(pwd->password)+1]; /* pwd->password may not be null terminated */ pass[sizeof(pwd->password)] = '\0'; strncpy(pass, pwd->password, sizeof(pwd->password)); printf(" password='%s', user='%s'", pass, (pwd->ctrl & ATA_SECURITY_PASSWORD_MASTER) ? "master" : "user"); if (command == ATA_SECURITY_SET_PASSWORD) { printf(", mode='%s'", (pwd->ctrl & ATA_SECURITY_LEVEL_MAXIMUM) ? "maximum" : "high"); } } printf("\n"); } static int atasecurity_freeze(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_FREEZE_LOCK, NULL); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_FREEZE_LOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); } static int atasecurity_unlock(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_UNLOCK, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_UNLOCK, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_disable(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_DISABLE_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_DISABLE_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static int atasecurity_erase_confirm(struct cam_device *device, struct ata_params* ident_buf) { printf("\nYou are about to ERASE ALL DATA from the following" " device:\n%s%d,%s%d: ", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to ERASE ALL DATA? (yes/no) "); if (fgets(str, sizeof(str), stdin) != NULL) { if (strncasecmp(str, "yes", 3) == 0) { return (1); } else if (strncasecmp(str, "no", 2) == 0) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atasecurity_erase(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, u_int32_t erase_timeout, struct ata_security_password *pwd, int quiet) { int error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_PREPARE, NULL); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_PREPARE, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout, /*quiet*/0); if (error != 0) return error; if (quiet == 0) atasecurity_notify(ATA_SECURITY_ERASE_UNIT, pwd); error = ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_ERASE_UNIT, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/erase_timeout, /*quiet*/0); if (error == 0 && quiet == 0) printf("\nErase Complete\n"); return error; } static int atasecurity_set_password(struct cam_device *device, union ccb *ccb, int retry_count, u_int32_t timeout, struct ata_security_password *pwd, int quiet) { if (quiet == 0) atasecurity_notify(ATA_SECURITY_SET_PASSWORD, pwd); return ata_do_28bit_cmd(device, ccb, retry_count, /*flags*/CAM_DIR_OUT, /*protocol*/AP_PROTO_PIO_OUT, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SECURITY_SET_PASSWORD, /*features*/0, /*lba*/0, /*sector_count*/0, /*data_ptr*/(u_int8_t *)pwd, /*dxfer_len*/sizeof(*pwd), /*timeout*/timeout, /*quiet*/0); } static void atasecurity_print(struct ata_params *parm) { printf("\nSecurity Option Value\n"); if (arglist & CAM_ARG_VERBOSE) { printf("status %04x\n", parm->security_status); } printf("supported %s\n", parm->security_status & ATA_SECURITY_SUPPORTED ? "yes" : "no"); if (!(parm->security_status & ATA_SECURITY_SUPPORTED)) return; printf("enabled %s\n", parm->security_status & ATA_SECURITY_ENABLED ? "yes" : "no"); printf("drive locked %s\n", parm->security_status & ATA_SECURITY_LOCKED ? "yes" : "no"); printf("security config frozen %s\n", parm->security_status & ATA_SECURITY_FROZEN ? "yes" : "no"); printf("count expired %s\n", parm->security_status & ATA_SECURITY_COUNT_EXP ? "yes" : "no"); printf("security level %s\n", parm->security_status & ATA_SECURITY_LEVEL ? "maximum" : "high"); printf("enhanced erase supported %s\n", parm->security_status & ATA_SECURITY_ENH_SUPP ? "yes" : "no"); printf("erase time "); atasecurity_print_time(parm->erase_time); printf("\n"); printf("enhanced erase time "); atasecurity_print_time(parm->enhanced_erase_time); printf("\n"); printf("master password rev %04x%s\n", parm->master_passwd_revision, parm->master_passwd_revision == 0x0000 || parm->master_passwd_revision == 0xFFFF ? " (unsupported)" : ""); } /* * Validates and copies the password in optarg to the passed buffer. * If the password in optarg is the same length as the buffer then * the data will still be copied but no null termination will occur. */ static int ata_getpwd(u_int8_t *passwd, int max, char opt) { int len; len = strlen(optarg); if (len > max) { warnx("-%c password is too long", opt); return (1); } else if (len == 0) { warnx("-%c password is missing", opt); return (1); } else if (optarg[0] == '-'){ warnx("-%c password starts with '-' (generic arg?)", opt); return (1); } else if (strlen(passwd) != 0 && strcmp(passwd, optarg) != 0) { warnx("-%c password conflicts with existing password from -%c", opt, pwd_opt); return (1); } /* Callers pass in a buffer which does NOT need to be terminated */ strncpy(passwd, optarg, max); pwd_opt = opt; return (0); } enum { ATA_HPA_ACTION_PRINT, ATA_HPA_ACTION_SET_MAX, ATA_HPA_ACTION_SET_PWD, ATA_HPA_ACTION_LOCK, ATA_HPA_ACTION_UNLOCK, ATA_HPA_ACTION_FREEZE_LOCK }; static int atahpa_set_confirm(struct cam_device *device, struct ata_params* ident_buf, u_int64_t maxsize, int persist) { printf("\nYou are about to configure HPA to limit the user accessible\n" "sectors to %ju %s on the device:\n%s%d,%s%d: ", maxsize, persist ? "persistently" : "temporarily", device->device_name, device->dev_unit_num, device->given_dev_name, device->given_unit_number); ata_print_ident(ident_buf); for(;;) { char str[50]; printf("\nAre you SURE you want to configure HPA? (yes/no) "); if (NULL != fgets(str, sizeof(str), stdin)) { if (0 == strncasecmp(str, "yes", 3)) { return (1); } else if (0 == strncasecmp(str, "no", 2)) { return (0); } else { printf("Please answer \"yes\" or " "\"no\"\n"); } } } /* NOTREACHED */ return (0); } static int atahpa(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; struct ccb_getdev cgd; struct ata_set_max_pwd pwd; int error, confirm, quiet, c, action, actions, persist; int security, is48bit, pwdsize; u_int64_t hpasize, maxsize; actions = 0; confirm = 0; quiet = 0; maxsize = 0; persist = 0; security = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print hpa information */ action = ATA_HPA_ACTION_PRINT; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 's': action = ATA_HPA_ACTION_SET_MAX; maxsize = strtoumax(optarg, NULL, 0); actions++; break; case 'p': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_SET_PWD; security = 1; actions++; break; case 'l': action = ATA_HPA_ACTION_LOCK; security = 1; actions++; break; case 'U': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_HPA_ACTION_UNLOCK; security = 1; actions++; break; case 'f': action = ATA_HPA_ACTION_FREEZE_LOCK; security = 1; actions++; break; case 'P': persist = 1; break; case 'y': confirm++; break; case 'q': quiet++; break; } } if (actions > 1) { warnx("too many hpa actions specified"); return (1); } if (get_cgd(device, &cgd) != 0) { warnx("couldn't get CGD"); return (1); } ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_HPA_ACTION_PRINT) { error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) atahpa_print(ident_buf, hpasize, 1); cam_freeccb(ccb); free(ident_buf); return (error); } if (!(ident_buf->support.command1 & ATA_SUPPORT_PROTECTED)) { warnx("HPA is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } if (security && !(ident_buf->support.command1 & ATA_SUPPORT_MAXSECURITY)) { warnx("HPA Security is not supported by this device"); cam_freeccb(ccb); free(ident_buf); return (1); } is48bit = ident_buf->support.command2 & ATA_SUPPORT_ADDRESS48; /* * The ATA spec requires: * 1. Read native max addr is called directly before set max addr * 2. Read native max addr is NOT called before any other set max call */ switch(action) { case ATA_HPA_ACTION_SET_MAX: if (confirm == 0 && atahpa_set_confirm(device, ident_buf, maxsize, persist) == 0) { cam_freeccb(ccb); free(ident_buf); return (1); } error = ata_read_native_max(device, retry_count, timeout, ccb, ident_buf, &hpasize); if (error == 0) { error = atahpa_set_max(device, retry_count, timeout, ccb, is48bit, maxsize, persist); if (error == 0) { /* redo identify to get new lba values */ error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); atahpa_print(ident_buf, hpasize, 1); } } break; case ATA_HPA_ACTION_SET_PWD: error = atahpa_password(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA password has been set\n"); break; case ATA_HPA_ACTION_LOCK: error = atahpa_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been locked\n"); break; case ATA_HPA_ACTION_UNLOCK: error = atahpa_unlock(device, retry_count, timeout, ccb, is48bit, &pwd); if (error == 0) printf("HPA has been unlocked\n"); break; case ATA_HPA_ACTION_FREEZE_LOCK: error = atahpa_freeze_lock(device, retry_count, timeout, ccb, is48bit); if (error == 0) printf("HPA has been frozen\n"); break; default: errx(1, "Option currently not supported"); } cam_freeccb(ccb); free(ident_buf); return (error); } static int atasecurity(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { union ccb *ccb; struct ata_params *ident_buf; int error, confirm, quiet, c, action, actions, setpwd; int security_enabled, erase_timeout, pwdsize; struct ata_security_password pwd; actions = 0; setpwd = 0; erase_timeout = 0; confirm = 0; quiet = 0; memset(&pwd, 0, sizeof(pwd)); /* default action is to print security information */ action = ATA_SECURITY_ACTION_PRINT; /* user is master by default as its safer that way */ pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwdsize = sizeof(pwd.password); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': action = ATA_SECURITY_ACTION_FREEZE; actions++; break; case 'U': if (strcasecmp(optarg, "user") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_USER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_MASTER; } else if (strcasecmp(optarg, "master") == 0) { pwd.ctrl |= ATA_SECURITY_PASSWORD_MASTER; pwd.ctrl &= ~ATA_SECURITY_PASSWORD_USER; } else { warnx("-U argument '%s' is invalid (must be " "'user' or 'master')", optarg); return (1); } break; case 'l': if (strcasecmp(optarg, "high") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_HIGH; pwd.ctrl &= ~ATA_SECURITY_LEVEL_MAXIMUM; } else if (strcasecmp(optarg, "maximum") == 0) { pwd.ctrl |= ATA_SECURITY_LEVEL_MAXIMUM; pwd.ctrl &= ~ATA_SECURITY_LEVEL_HIGH; } else { warnx("-l argument '%s' is unknown (must be " "'high' or 'maximum')", optarg); return (1); } break; case 'k': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_UNLOCK; actions++; break; case 'd': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_DISABLE; actions++; break; case 'e': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); action = ATA_SECURITY_ACTION_ERASE; actions++; break; case 'h': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); pwd.ctrl |= ATA_SECURITY_ERASE_ENHANCED; action = ATA_SECURITY_ACTION_ERASE_ENHANCED; actions++; break; case 's': if (ata_getpwd(pwd.password, pwdsize, c) != 0) return (1); setpwd = 1; if (action == ATA_SECURITY_ACTION_PRINT) action = ATA_SECURITY_ACTION_SET_PASSWORD; /* * Don't increment action as this can be combined * with other actions. */ break; case 'y': confirm++; break; case 'q': quiet++; break; case 'T': erase_timeout = atoi(optarg) * 1000; break; } } if (actions > 1) { warnx("too many security actions specified"); return (1); } if ((ccb = cam_getccb(device)) == NULL) { warnx("couldn't allocate CCB"); return (1); } error = ata_do_identify(device, retry_count, timeout, ccb, &ident_buf); if (error != 0) { cam_freeccb(ccb); return (1); } if (quiet == 0) { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); } if (action == ATA_SECURITY_ACTION_PRINT) { atasecurity_print(ident_buf); free(ident_buf); cam_freeccb(ccb); return (0); } if ((ident_buf->support.command1 & ATA_SUPPORT_SECURITY) == 0) { warnx("Security not supported"); free(ident_buf); cam_freeccb(ccb); return (1); } /* default timeout 15 seconds the same as linux hdparm */ timeout = timeout ? timeout : 15 * 1000; security_enabled = ident_buf->security_status & ATA_SECURITY_ENABLED; /* first set the password if requested */ if (setpwd == 1) { /* confirm we can erase before setting the password if erasing */ if (confirm == 0 && (action == ATA_SECURITY_ACTION_ERASE_ENHANCED || action == ATA_SECURITY_ACTION_ERASE) && atasecurity_erase_confirm(device, ident_buf) == 0) { cam_freeccb(ccb); free(ident_buf); return (error); } if (pwd.ctrl & ATA_SECURITY_PASSWORD_MASTER) { pwd.revision = ident_buf->master_passwd_revision; if (pwd.revision != 0 && pwd.revision != 0xfff && --pwd.revision == 0) { pwd.revision = 0xfffe; } } error = atasecurity_set_password(device, ccb, retry_count, timeout, &pwd, quiet); if (error != 0) { cam_freeccb(ccb); free(ident_buf); return (error); } security_enabled = 1; } switch(action) { case ATA_SECURITY_ACTION_FREEZE: error = atasecurity_freeze(device, ccb, retry_count, timeout, quiet); break; case ATA_SECURITY_ACTION_UNLOCK: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } else { warnx("Can't unlock, drive is not locked"); error = 1; } } else { warnx("Can't unlock, security is disabled"); error = 1; } break; case ATA_SECURITY_ACTION_DISABLE: if (security_enabled) { /* First unlock the drive if its locked */ if (ident_buf->security_status & ATA_SECURITY_LOCKED) { error = atasecurity_unlock(device, ccb, retry_count, timeout, &pwd, quiet); } if (error == 0) { error = atasecurity_disable(device, ccb, retry_count, timeout, &pwd, quiet); } } else { warnx("Can't disable security (already disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE: if (security_enabled) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Can't secure erase (security is disabled)"); error = 1; } break; case ATA_SECURITY_ACTION_ERASE_ENHANCED: if (security_enabled) { if (ident_buf->security_status & ATA_SECURITY_ENH_SUPP) { if (erase_timeout == 0) { erase_timeout = atasecurity_erase_timeout_msecs( ident_buf->enhanced_erase_time); } error = atasecurity_erase(device, ccb, retry_count, timeout, erase_timeout, &pwd, quiet); } else { warnx("Enhanced erase is not supported"); error = 1; } } else { warnx("Can't secure erase (enhanced), " "(security is disabled)"); error = 1; } break; } cam_freeccb(ccb); free(ident_buf); return (error); } #endif /* MINIMALISTIC */ /* * Parse out a bus, or a bus, target and lun in the following * format: * bus * bus:target * bus:target:lun * * Returns the number of parsed components, or 0. */ static int parse_btl(char *tstr, path_id_t *bus, target_id_t *target, lun_id_t *lun, cam_argmask *arglst) { char *tmpstr; int convs = 0; while (isspace(*tstr) && (*tstr != '\0')) tstr++; tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *bus = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_BUS; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *target = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_TARGET; convs++; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')) { *lun = strtol(tmpstr, NULL, 0); *arglst |= CAM_ARG_LUN; convs++; } } } return convs; } static int dorescan_or_reset(int argc, char **argv, int rescan) { static const char must[] = "you must specify \"all\", a bus, or a bus:target:lun to %s"; int rv, error = 0; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr; if (argc < 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } tstr = argv[optind]; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncasecmp(tstr, "all", strlen("all")) == 0) arglist |= CAM_ARG_BUS; else { rv = parse_btl(argv[optind], &bus, &target, &lun, &arglist); if (rv != 1 && rv != 3) { warnx(must, rescan? "rescan" : "reset"); return(1); } } if ((arglist & CAM_ARG_BUS) && (arglist & CAM_ARG_TARGET) && (arglist & CAM_ARG_LUN)) error = scanlun_or_reset_dev(bus, target, lun, rescan); else error = rescan_or_reset_bus(bus, rescan); return(error); } static int rescan_or_reset_bus(path_id_t bus, int rescan) { union ccb ccb, matchccb; int fd, retval; int bufsize; retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } if (bus != CAM_BUS_WILDCARD) { ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { fprintf(stdout, "%s of bus %d was successful\n", rescan ? "Re-scan" : "Reset", bus); } else { fprintf(stdout, "%s of bus %d returned error %#x\n", rescan ? "Re-scan" : "Reset", bus, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } close(fd); return(retval); } /* * The right way to handle this is to modify the xpt so that it can * handle a wildcarded bus in a rescan or reset CCB. At the moment * that isn't implemented, so instead we enumerate the busses and * send the rescan or reset to those busses in the case where the * given bus is -1 (wildcard). We don't send a rescan or reset * to the xpt bus; sending a rescan to the xpt bus is effectively a * no-op, sending a rescan to the xpt bus would result in a status of * CAM_REQ_INVALID. */ bzero(&(&matchccb.ccb_h)[1], sizeof(struct ccb_dev_match) - sizeof(struct ccb_hdr)); matchccb.ccb_h.func_code = XPT_DEV_MATCH; matchccb.ccb_h.path_id = CAM_BUS_WILDCARD; bufsize = sizeof(struct dev_match_result) * 20; matchccb.cdm.match_buf_len = bufsize; matchccb.cdm.matches=(struct dev_match_result *)malloc(bufsize); if (matchccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); retval = 1; goto bailout; } matchccb.cdm.num_matches = 0; matchccb.cdm.num_patterns = 1; matchccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern); matchccb.cdm.patterns = (struct dev_match_pattern *)malloc( matchccb.cdm.pattern_buf_len); if (matchccb.cdm.patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } matchccb.cdm.patterns[0].type = DEV_MATCH_BUS; matchccb.cdm.patterns[0].pattern.bus_pattern.flags = BUS_MATCH_ANY; do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &matchccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((matchccb.ccb_h.status != CAM_REQ_CMP) || ((matchccb.cdm.status != CAM_DEV_MATCH_LAST) && (matchccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", matchccb.ccb_h.status, matchccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < matchccb.cdm.num_matches; i++) { struct bus_match_result *bus_result; /* This shouldn't happen. */ if (matchccb.cdm.matches[i].type != DEV_MATCH_BUS) continue; bus_result = &matchccb.cdm.matches[i].result.bus_result; /* * We don't want to rescan or reset the xpt bus. * See above. */ if (bus_result->path_id == CAM_XPT_PATH_ID) continue; ccb.ccb_h.func_code = rescan ? XPT_SCAN_BUS : XPT_RESET_BUS; ccb.ccb_h.path_id = bus_result->path_id; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); retval = 1; goto bailout; } if ((ccb.ccb_h.status & CAM_STATUS_MASK) ==CAM_REQ_CMP){ fprintf(stdout, "%s of bus %d was successful\n", rescan? "Re-scan" : "Reset", bus_result->path_id); } else { /* * Don't bail out just yet, maybe the other * rescan or reset commands will complete * successfully. */ fprintf(stderr, "%s of bus %d returned error " "%#x\n", rescan? "Re-scan" : "Reset", bus_result->path_id, ccb.ccb_h.status & CAM_STATUS_MASK); retval = 1; } } } while ((matchccb.ccb_h.status == CAM_REQ_CMP) && (matchccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); if (matchccb.cdm.patterns != NULL) free(matchccb.cdm.patterns); if (matchccb.cdm.matches != NULL) free(matchccb.cdm.matches); return(retval); } static int scanlun_or_reset_dev(path_id_t bus, target_id_t target, lun_id_t lun, int scan) { union ccb ccb; struct cam_device *device; int fd; device = NULL; if (bus == CAM_BUS_WILDCARD) { warnx("invalid bus number %d", bus); return(1); } if (target == CAM_TARGET_WILDCARD) { warnx("invalid target number %d", target); return(1); } if (lun == CAM_LUN_WILDCARD) { warnx("invalid lun number %jx", (uintmax_t)lun); return(1); } fd = -1; bzero(&ccb, sizeof(union ccb)); if (scan) { if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s\n", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } } else { device = cam_open_btl(bus, target, lun, O_RDWR, NULL); if (device == NULL) { warnx("%s", cam_errbuf); return(1); } } ccb.ccb_h.func_code = (scan)? XPT_SCAN_LUN : XPT_RESET_DEV; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; ccb.ccb_h.timeout = 5000; ccb.crcn.flags = CAM_FLAG_NONE; /* run this at a low priority */ ccb.ccb_h.pinfo.priority = 5; if (scan) { if (ioctl(fd, CAMIOCOMMAND, &ccb) < 0) { warn("CAMIOCOMMAND ioctl failed"); close(fd); return(1); } } else { if (cam_send_ccb(device, &ccb) < 0) { warn("error sending XPT_RESET_DEV CCB"); cam_close_device(device); return(1); } } if (scan) close(fd); else cam_close_device(device); /* * An error code of CAM_BDR_SENT is normal for a BDR request. */ if (((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) || ((!scan) && ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_BDR_SENT))) { fprintf(stdout, "%s of %d:%d:%jx was successful\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun); return(0); } else { fprintf(stdout, "%s of %d:%d:%jx returned error %#x\n", scan? "Re-scan" : "Reset", bus, target, (uintmax_t)lun, ccb.ccb_h.status & CAM_STATUS_MASK); return(1); } } #ifndef MINIMALISTIC static struct scsi_nv defect_list_type_map[] = { { "block", SRDD10_BLOCK_FORMAT }, { "extbfi", SRDD10_EXT_BFI_FORMAT }, { "extphys", SRDD10_EXT_PHYS_FORMAT }, { "longblock", SRDD10_LONG_BLOCK_FORMAT }, { "bfi", SRDD10_BYTES_FROM_INDEX_FORMAT }, { "phys", SRDD10_PHYSICAL_SECTOR_FORMAT } }; static int readdefects(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb = NULL; struct scsi_read_defect_data_hdr_10 *hdr10 = NULL; struct scsi_read_defect_data_hdr_12 *hdr12 = NULL; size_t hdr_size = 0, entry_size = 0; int use_12byte = 0; int hex_format = 0; u_int8_t *defect_list = NULL; u_int8_t list_format = 0; int list_type_set = 0; u_int32_t dlist_length = 0; u_int32_t returned_length = 0, valid_len = 0; u_int32_t num_returned = 0, num_valid = 0; u_int32_t max_possible_size = 0, hdr_max = 0; u_int32_t starting_offset = 0; u_int8_t returned_format, returned_type; unsigned int i; int summary = 0, quiet = 0; int c, error = 0; int lists_specified = 0; int get_length = 1, first_pass = 1; int mads = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'f': { scsi_nv_status status; int entry_num = 0; status = scsi_get_nv(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); if (status == SCSI_NV_FOUND) { list_format = defect_list_type_map[ entry_num].value; list_type_set = 1; } else { warnx("%s: %s %s option %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", "defect list type", optarg); error = 1; goto defect_bailout; } break; } case 'G': arglist |= CAM_ARG_GLIST; break; case 'P': arglist |= CAM_ARG_PLIST; break; case 'q': quiet = 1; break; case 's': summary = 1; break; case 'S': { char *endptr; starting_offset = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { error = 1; warnx("invalid starting offset %s", optarg); goto defect_bailout; } break; } case 'X': hex_format = 1; break; default: break; } } if (list_type_set == 0) { error = 1; warnx("no defect list format specified"); goto defect_bailout; } if (arglist & CAM_ARG_PLIST) { list_format |= SRDD10_PLIST; lists_specified++; } if (arglist & CAM_ARG_GLIST) { list_format |= SRDD10_GLIST; lists_specified++; } /* * This implies a summary, and was the previous behavior. */ if (lists_specified == 0) summary = 1; ccb = cam_getccb(device); retry_12byte: /* * We start off asking for just the header to determine how much * defect data is available. Some Hitachi drives return an error * if you ask for more data than the drive has. Once we know the * length, we retry the command with the returned length. */ if (use_12byte == 0) dlist_length = sizeof(*hdr10); else dlist_length = sizeof(*hdr12); retry: if (defect_list != NULL) { free(defect_list); defect_list = NULL; } defect_list = malloc(dlist_length); if (defect_list == NULL) { warnx("can't malloc memory for defect list"); error = 1; goto defect_bailout; } next_batch: bzero(defect_list, dlist_length); /* * cam_getccb() zeros the CCB header only. So we need to zero the * payload portion of the ccb. */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_read_defects(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*list_format*/ list_format, /*addr_desc_index*/ starting_offset, /*data_ptr*/ defect_list, /*dxfer_len*/ dlist_length, /*minimum_cmd_size*/ use_12byte ? 12 : 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (cam_send_ccb(device, ccb) < 0) { perror("error reading defect list"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto defect_bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (use_12byte == 0) { hdr10 = (struct scsi_read_defect_data_hdr_10 *)defect_list; hdr_size = sizeof(*hdr10); hdr_max = SRDDH10_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_2btoul(hdr10->length); returned_format = hdr10->format; } else { returned_length = 0; returned_format = 0; } } else { hdr12 = (struct scsi_read_defect_data_hdr_12 *)defect_list; hdr_size = sizeof(*hdr12); hdr_max = SRDDH12_MAX_LENGTH; if (valid_len >= hdr_size) { returned_length = scsi_4btoul(hdr12->length); returned_format = hdr12->format; } else { returned_length = 0; returned_format = 0; } } returned_type = returned_format & SRDDH10_DLIST_FORMAT_MASK; switch (returned_type) { case SRDD10_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_block); break; case SRDD10_LONG_BLOCK_FORMAT: entry_size = sizeof(struct scsi_defect_desc_long_block); break; case SRDD10_EXT_PHYS_FORMAT: case SRDD10_PHYSICAL_SECTOR_FORMAT: entry_size = sizeof(struct scsi_defect_desc_phys_sector); break; case SRDD10_EXT_BFI_FORMAT: case SRDD10_BYTES_FROM_INDEX_FORMAT: entry_size = sizeof(struct scsi_defect_desc_bytes_from_index); break; default: warnx("Unknown defect format 0x%x\n", returned_type); error = 1; goto defect_bailout; break; } max_possible_size = (hdr_max / entry_size) * entry_size; num_returned = returned_length / entry_size; num_valid = min(returned_length, valid_len - hdr_size); num_valid /= entry_size; if (get_length != 0) { get_length = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * If the drive is reporting that it just doesn't * support the defect list format, go ahead and use * the length it reported. Otherwise, the length * may not be valid, so use the maximum. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c) && (ascq == 0x00) && (returned_length > 0)) { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1f) && (ascq == 0x00) && (returned_length > 0)) { /* Partial defect list transfer */ /* * Hitachi drives return this error * along with a partial defect list if they * have more defects than the 10 byte * command can support. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else if ((sense_key == SSD_KEY_ILLEGAL_REQUEST) && (asc == 0x24) && (ascq == 0x00)) { /* Invalid field in CDB */ /* * SBC-3 says that if the drive has more * defects than can be reported with the * 10 byte command, it should return this * error and no data. Retry with the 12 * byte command. */ if (use_12byte == 0) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } else { /* * If we got a SCSI error and no valid length, * just use the 10 byte maximum. The 12 * byte maximum is too large. */ if (returned_length == 0) dlist_length = SRDD10_MAX_LENGTH; else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP){ error = 1; warnx("Error reading defect header"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } else { if ((use_12byte == 0) && (returned_length >= max_possible_size)) { get_length = 1; use_12byte = 1; goto retry_12byte; } dlist_length = returned_length + hdr_size; } if (summary != 0) { fprintf(stdout, "%u", num_returned); if (quiet == 0) { fprintf(stdout, " defect%s", (num_returned != 1) ? "s" : ""); } fprintf(stdout, "\n"); goto defect_bailout; } /* * We always limit the list length to the 10-byte maximum * length (0xffff). The reason is that some controllers * can't handle larger I/Os, and we can transfer the entire * 10 byte list in one shot. For drives that support the 12 * byte read defects command, we'll step through the list * by specifying a starting offset. For drives that don't * support the 12 byte command's starting offset, we'll * just display the first 64K. */ dlist_length = min(dlist_length, SRDD10_MAX_LENGTH); goto retry; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI spec, if the disk doesn't support * the requested format, it will generally return a sense * key of RECOVERED ERROR, and an additional sense code * of "DEFECT LIST NOT FOUND". HGST drives also return * Primary/Grown defect list not found errors. So just * check for an ASC of 0x1c. */ if ((sense_key == SSD_KEY_RECOVERED_ERROR) && (asc == 0x1c)) { const char *format_str; format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), list_format & SRDD10_DLIST_FORMAT_MASK); warnx("requested defect format %s not available", format_str ? format_str : "unknown"); format_str = scsi_nv_to_str(defect_list_type_map, sizeof(defect_list_type_map) / sizeof(defect_list_type_map[0]), returned_type); if (format_str != NULL) { warnx("Device returned %s format", format_str); } else { error = 1; warnx("Device returned unknown defect" " data format %#x", returned_type); goto defect_bailout; } } else { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } } else if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = 1; warnx("Error returned from read defect data command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto defect_bailout; } if (first_pass != 0) { fprintf(stderr, "Got %d defect", num_returned); if ((lists_specified == 0) || (num_returned == 0)) { fprintf(stderr, "s.\n"); goto defect_bailout; } else if (num_returned == 1) fprintf(stderr, ":\n"); else fprintf(stderr, "s:\n"); first_pass = 0; } /* * XXX KDM I should probably clean up the printout format for the * disk defects. */ switch (returned_type) { case SRDD10_PHYSICAL_SECTOR_FORMAT: case SRDD10_EXT_PHYS_FORMAT: { struct scsi_defect_desc_phys_sector *dlist; dlist = (struct scsi_defect_desc_phys_sector *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t sector; sector = scsi_4btoul(dlist[i].sector); if (returned_type == SRDD10_EXT_PHYS_FORMAT) { mads = (sector & SDD_EXT_PHYS_MADS) ? 0 : 1; sector &= ~SDD_EXT_PHYS_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].sector), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_BYTES_FROM_INDEX_FORMAT: case SRDD10_EXT_BFI_FORMAT: { struct scsi_defect_desc_bytes_from_index *dlist; dlist = (struct scsi_defect_desc_bytes_from_index *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { uint32_t bfi; bfi = scsi_4btoul(dlist[i].bytes_from_index); if (returned_type == SRDD10_EXT_BFI_FORMAT) { mads = (bfi & SDD_EXT_BFI_MADS) ? 1 : 0; bfi &= ~SDD_EXT_BFI_FLAG_MASK; } if (hex_format == 0) fprintf(stdout, "%d:%d:%d%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); else fprintf(stdout, "0x%x:0x%x:0x%x%s", scsi_3btoul(dlist[i].cylinder), dlist[i].head, scsi_4btoul(dlist[i].bytes_from_index), mads ? " - " : "\n"); mads = 0; } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDDH10_BLOCK_FORMAT: { struct scsi_defect_desc_block *dlist; dlist = (struct scsi_defect_desc_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%u\n", scsi_4btoul(dlist[i].address)); else fprintf(stdout, "0x%x\n", scsi_4btoul(dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } case SRDD10_LONG_BLOCK_FORMAT: { struct scsi_defect_desc_long_block *dlist; dlist = (struct scsi_defect_desc_long_block *) (defect_list + hdr_size); for (i = 0; i < num_valid; i++) { if (hex_format == 0) fprintf(stdout, "%ju\n", (uintmax_t)scsi_8btou64( dlist[i].address)); else fprintf(stdout, "0x%jx\n", (uintmax_t)scsi_8btou64( dlist[i].address)); } if (num_valid < num_returned) { starting_offset += num_valid; goto next_batch; } break; } default: fprintf(stderr, "Unknown defect format 0x%x\n", returned_type); error = 1; break; } defect_bailout: if (defect_list != NULL) free(defect_list); if (ccb != NULL) cam_freeccb(ccb); return(error); } #endif /* MINIMALISTIC */ #if 0 void reassignblocks(struct cam_device *device, u_int32_t *blocks, int num_blocks) { union ccb *ccb; ccb = cam_getccb(device); cam_freeccb(ccb); } #endif #ifndef MINIMALISTIC void mode_sense(struct cam_device *device, int mode_page, int page_control, int dbd, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_sense: couldn't allocate CCB"); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_mode_sense(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* dbd */ dbd, /* page_code */ page_control << 6, /* page */ mode_page, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode sense command"); else errx(1, "error sending mode sense command"); } cam_freeccb(ccb); } void mode_select(struct cam_device *device, int save_pages, int retry_count, int timeout, u_int8_t *data, int datalen) { union ccb *ccb; int retval; ccb = cam_getccb(device); if (ccb == NULL) errx(1, "mode_select: couldn't allocate CCB"); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); scsi_mode_select(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* scsi_page_fmt */ 1, /* save_pages */ save_pages, /* param_buf */ data, /* param_len */ datalen, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } cam_freeccb(ccb); cam_close_device(device); if (retval < 0) err(1, "error sending mode select command"); else errx(1, "error sending mode select command"); } cam_freeccb(ccb); } void modepage(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, mode_page = -1, page_control = 0; int binary = 0, list = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'b': binary = 1; break; case 'd': arglist |= CAM_ARG_DBD; break; case 'e': arglist |= CAM_ARG_MODE_EDIT; break; case 'l': list = 1; break; case 'm': mode_page = strtol(optarg, NULL, 0); if (mode_page < 0) errx(1, "invalid mode page %d", mode_page); break; case 'P': page_control = strtol(optarg, NULL, 0); if ((page_control < 0) || (page_control > 3)) errx(1, "invalid page control field %d", page_control); arglist |= CAM_ARG_PAGE_CNTL; break; default: break; } } if (mode_page == -1 && list == 0) errx(1, "you must specify a mode page!"); if (list) { mode_list(device, page_control, arglist & CAM_ARG_DBD, retry_count, timeout); } else { mode_edit(device, mode_page, page_control, arglist & CAM_ARG_DBD, arglist & CAM_ARG_MODE_EDIT, binary, retry_count, timeout); } } static int scsicmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int32_t flags = CAM_DIR_NONE; u_int8_t *data_ptr = NULL; u_int8_t cdb[20]; u_int8_t atacmd[12]; struct get_hook hook; int c, data_bytes = 0; int cdb_len = 0; int atacmd_len = 0; int dmacmd = 0; int fpdmacmd = 0; int need_res = 0; char *datastr = NULL, *tstr, *resstr = NULL; int error = 0; int fd_data = 0, fd_res = 0; int retval; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsicmd: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; atacmd_len = buff_encode_visit(atacmd, sizeof(atacmd), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'c': tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; cdb_len = buff_encode_visit(cdb, sizeof(cdb), tstr, iget, &hook); /* * Increment optind by the number of arguments the * encoding routine processed. After each call to * getopt(3), optind points to the argument that * getopt should process _next_. In this case, * that means it points to the first command string * argument, if there is one. Once we increment * this, it should point to either the next command * line argument, or it should be past the end of * the list. */ optind += hook.got; break; case 'd': dmacmd = 1; break; case 'f': fpdmacmd = 1; break; case 'i': if (arglist & CAM_ARG_CMD_OUT) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_IN; flags = CAM_DIR_IN; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of input bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } break; case 'o': if (arglist & CAM_ARG_CMD_IN) { warnx("command must either be " "read or write, not both"); error = 1; goto scsicmd_bailout; } arglist |= CAM_ARG_CMD_OUT; flags = CAM_DIR_OUT; data_bytes = strtol(optarg, NULL, 0); if (data_bytes <= 0) { warnx("invalid number of output bytes %d", data_bytes); error = 1; goto scsicmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); data_ptr = (u_int8_t *)malloc(data_bytes); if (data_ptr == NULL) { warnx("can't malloc memory for data_ptr"); error = 1; goto scsicmd_bailout; } bzero(data_ptr, data_bytes); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_data = 1; else buff_encode_visit(data_ptr, data_bytes, datastr, iget, &hook); optind += hook.got; break; case 'r': need_res = 1; hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; resstr = cget(&hook, NULL); if ((resstr != NULL) && (resstr[0] == '-')) fd_res = 1; optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_data == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto scsicmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (arglist & CAM_ARG_ERR_RECOVER) flags |= CAM_PASS_ERR_RECOVER; /* Disable freezing the device queue */ flags |= CAM_DEV_QFRZDIS; if (cdb_len) { /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. * The next 5 bits are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((cdb[0] >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* computed by buff_encode_visit */ break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } /* * We should probably use csio_build_visit or something like that * here, but it's easier to encode arguments as you go. The * alternative would be skipping the CDB argument and then encoding * it here, since we've got the data buffer argument by now. */ bcopy(cdb, &ccb->csio.cdb_io.cdb_bytes, cdb_len); cam_fill_csio(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*sense_len*/ SSD_FULL_SIZE, /*cdb_len*/ cdb_len, /*timeout*/ timeout ? timeout : 5000); } else { atacmd_len = 12; bcopy(atacmd, &ccb->ataio.cmd.command, atacmd_len); if (need_res) ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; if (dmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; if (fpdmacmd) ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ data_bytes, /*timeout*/ timeout ? timeout : 5000); } if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsicmd_bailout; } if (atacmd_len && need_res) { if (fd_res == 0) { buff_decode_visit(&ccb->ataio.res.status, 11, resstr, arg_put, NULL); fprintf(stdout, "\n"); } else { fprintf(stdout, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", ccb->ataio.res.status, ccb->ataio.res.error, ccb->ataio.res.lba_low, ccb->ataio.res.lba_mid, ccb->ataio.res.lba_high, ccb->ataio.res.device, ccb->ataio.res.lba_low_exp, ccb->ataio.res.lba_mid_exp, ccb->ataio.res.lba_high_exp, ccb->ataio.res.sector_count, ccb->ataio.res.sector_count_exp); fflush(stdout); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (arglist & CAM_ARG_CMD_IN) && (data_bytes > 0)) { if (fd_data == 0) { buff_decode_visit(data_ptr, data_bytes, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = data_bytes; u_int8_t *buf_ptr = data_ptr; for (amt_written = 0; (amt_to_write > 0) && (amt_written =write(1, buf_ptr,amt_to_write))> 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto scsicmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", data_bytes - amt_to_write, data_bytes); } } } scsicmd_bailout: if ((data_bytes > 0) && (data_ptr != NULL)) free(data_ptr); cam_freeccb(ccb); return(error); } static int camdebug(int argc, char **argv, char *combinedopt) { int c, fd; path_id_t bus = CAM_BUS_WILDCARD; target_id_t target = CAM_TARGET_WILDCARD; lun_id_t lun = CAM_LUN_WILDCARD; char *tstr, *tmpstr = NULL; union ccb ccb; int error = 0; bzero(&ccb, sizeof(union ccb)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'I': arglist |= CAM_ARG_DEBUG_INFO; ccb.cdbg.flags |= CAM_DEBUG_INFO; break; case 'P': arglist |= CAM_ARG_DEBUG_PERIPH; ccb.cdbg.flags |= CAM_DEBUG_PERIPH; break; case 'S': arglist |= CAM_ARG_DEBUG_SUBTRACE; ccb.cdbg.flags |= CAM_DEBUG_SUBTRACE; break; case 'T': arglist |= CAM_ARG_DEBUG_TRACE; ccb.cdbg.flags |= CAM_DEBUG_TRACE; break; case 'X': arglist |= CAM_ARG_DEBUG_XPT; ccb.cdbg.flags |= CAM_DEBUG_XPT; break; case 'c': arglist |= CAM_ARG_DEBUG_CDB; ccb.cdbg.flags |= CAM_DEBUG_CDB; break; case 'p': arglist |= CAM_ARG_DEBUG_PROBE; ccb.cdbg.flags |= CAM_DEBUG_PROBE; break; default: break; } } if ((fd = open(XPT_DEVICE, O_RDWR)) < 0) { warnx("error opening transport layer device %s", XPT_DEVICE); warn("%s", XPT_DEVICE); return(1); } argc -= optind; argv += optind; if (argc <= 0) { warnx("you must specify \"off\", \"all\" or a bus,"); warnx("bus:target, or bus:target:lun"); close(fd); return(1); } tstr = *argv; while (isspace(*tstr) && (*tstr != '\0')) tstr++; if (strncmp(tstr, "off", 3) == 0) { ccb.cdbg.flags = CAM_DEBUG_NONE; arglist &= ~(CAM_ARG_DEBUG_INFO|CAM_ARG_DEBUG_PERIPH| CAM_ARG_DEBUG_TRACE|CAM_ARG_DEBUG_SUBTRACE| CAM_ARG_DEBUG_XPT|CAM_ARG_DEBUG_PROBE); } else if (strncmp(tstr, "all", 3) != 0) { tmpstr = (char *)strtok(tstr, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ bus = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_BUS; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ target = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_TARGET; tmpstr = (char *)strtok(NULL, ":"); if ((tmpstr != NULL) && (*tmpstr != '\0')){ lun = strtol(tmpstr, NULL, 0); arglist |= CAM_ARG_LUN; } } } else { error = 1; warnx("you must specify \"all\", \"off\", or a bus,"); warnx("bus:target, or bus:target:lun to debug"); } } if (error == 0) { ccb.ccb_h.func_code = XPT_DEBUG; ccb.ccb_h.path_id = bus; ccb.ccb_h.target_id = target; ccb.ccb_h.target_lun = lun; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("CAMIOCOMMAND ioctl failed"); error = 1; } if (error == 0) { if ((ccb.ccb_h.status & CAM_STATUS_MASK) == CAM_FUNC_NOTAVAIL) { warnx("CAM debugging not available"); warnx("you need to put options CAMDEBUG in" " your kernel config file!"); error = 1; } else if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_DEBUG CCB failed with status %#x", ccb.ccb_h.status); error = 1; } else { if (ccb.cdbg.flags == CAM_DEBUG_NONE) { fprintf(stderr, "Debugging turned off\n"); } else { fprintf(stderr, "Debugging enabled for " "%d:%d:%jx\n", bus, target, (uintmax_t)lun); } } } close(fd); } return(error); } static int tagcontrol(struct cam_device *device, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int numtags = -1; int retval = 0; int quiet = 0; char pathstr[1024]; ccb = cam_getccb(device); if (ccb == NULL) { warnx("tagcontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'N': numtags = strtol(optarg, NULL, 0); if (numtags < 0) { warnx("tag count %d is < 0", numtags); retval = 1; goto tagcontrol_bailout; } break; case 'q': quiet++; break; default: break; } } cam_path_string(device, pathstr, sizeof(pathstr)); if (numtags >= 0) { bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_relsim) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_REL_SIMQ; ccb->ccb_h.flags = CAM_DEV_QFREEZE; ccb->crs.release_flags = RELSIM_ADJUST_OPENINGS; ccb->crs.openings = numtags; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_REL_SIMQ CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_REL_SIMQ CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (quiet == 0) fprintf(stdout, "%stagged openings now %d\n", pathstr, ccb->crs.openings); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_getdevstats) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GDEV_STATS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GDEV_STATS CCB"); retval = 1; goto tagcontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GDEV_STATS CCB failed"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto tagcontrol_bailout; } if (arglist & CAM_ARG_VERBOSE) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_openings %d\n", ccb->cgds.dev_openings); fprintf(stdout, "%s", pathstr); fprintf(stdout, "dev_active %d\n", ccb->cgds.dev_active); fprintf(stdout, "%s", pathstr); fprintf(stdout, "allocated %d\n", ccb->cgds.allocated); fprintf(stdout, "%s", pathstr); fprintf(stdout, "queued %d\n", ccb->cgds.queued); fprintf(stdout, "%s", pathstr); fprintf(stdout, "held %d\n", ccb->cgds.held); fprintf(stdout, "%s", pathstr); fprintf(stdout, "mintags %d\n", ccb->cgds.mintags); fprintf(stdout, "%s", pathstr); fprintf(stdout, "maxtags %d\n", ccb->cgds.maxtags); } else { if (quiet == 0) { fprintf(stdout, "%s", pathstr); fprintf(stdout, "device openings: "); } fprintf(stdout, "%d\n", ccb->cgds.dev_openings + ccb->cgds.dev_active); } tagcontrol_bailout: cam_freeccb(ccb); return(retval); } static void cts_print(struct cam_device *device, struct ccb_trans_settings *cts) { char pathstr[1024]; cam_path_string(device, pathstr, sizeof(pathstr)); if (cts->transport == XPORT_SPI) { struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { fprintf(stdout, "%ssync parameter: %d\n", pathstr, spi->sync_period); if (spi->sync_offset != 0) { u_int freq; freq = scsi_calc_syncsrate(spi->sync_period); fprintf(stdout, "%sfrequency: %d.%03dMHz\n", pathstr, freq / 1000, freq % 1000); } } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { fprintf(stdout, "%soffset: %d\n", pathstr, spi->sync_offset); } if (spi->valid & CTS_SPI_VALID_BUS_WIDTH) { fprintf(stdout, "%sbus width: %d bits\n", pathstr, (0x01 << spi->bus_width) * 8); } if (spi->valid & CTS_SPI_VALID_DISC) { fprintf(stdout, "%sdisconnection is %s\n", pathstr, (spi->flags & CTS_SPI_FLAGS_DISC_ENB) ? "enabled" : "disabled"); } } if (cts->transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWNN) fprintf(stdout, "%sWWNN: 0x%llx\n", pathstr, (long long) fc->wwnn); if (fc->valid & CTS_FC_VALID_WWPN) fprintf(stdout, "%sWWPN: 0x%llx\n", pathstr, (long long) fc->wwpn); if (fc->valid & CTS_FC_VALID_PORT) fprintf(stdout, "%sPortID: 0x%x\n", pathstr, fc->port); if (fc->valid & CTS_FC_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, fc->bitrate / 1000, fc->bitrate % 1000); } if (cts->transport == XPORT_SAS) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; if (sas->valid & CTS_SAS_VALID_SPEED) fprintf(stdout, "%stransfer speed: %d.%03dMB/s\n", pathstr, sas->bitrate / 1000, sas->bitrate % 1000); } if (cts->transport == XPORT_ATA) { struct ccb_trans_settings_pata *pata = &cts->xport_specific.ata; if ((pata->valid & CTS_ATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(pata->mode)); } if ((pata->valid & CTS_ATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, pata->atapi); } if ((pata->valid & CTS_ATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, pata->bytecount); } } if (cts->transport == XPORT_SATA) { struct ccb_trans_settings_sata *sata = &cts->xport_specific.sata; if ((sata->valid & CTS_SATA_VALID_REVISION) != 0) { fprintf(stdout, "%sSATA revision: %d.x\n", pathstr, sata->revision); } if ((sata->valid & CTS_SATA_VALID_MODE) != 0) { fprintf(stdout, "%sATA mode: %s\n", pathstr, ata_mode2string(sata->mode)); } if ((sata->valid & CTS_SATA_VALID_ATAPI) != 0) { fprintf(stdout, "%sATAPI packet length: %d\n", pathstr, sata->atapi); } if ((sata->valid & CTS_SATA_VALID_BYTECOUNT) != 0) { fprintf(stdout, "%sPIO transaction length: %d\n", pathstr, sata->bytecount); } if ((sata->valid & CTS_SATA_VALID_PM) != 0) { fprintf(stdout, "%sPMP presence: %d\n", pathstr, sata->pm_present); } if ((sata->valid & CTS_SATA_VALID_TAGS) != 0) { fprintf(stdout, "%sNumber of tags: %d\n", pathstr, sata->tags); } if ((sata->valid & CTS_SATA_VALID_CAPS) != 0) { fprintf(stdout, "%sSATA capabilities: %08x\n", pathstr, sata->caps); } } if (cts->protocol == PROTO_ATA) { struct ccb_trans_settings_ata *ata= &cts->proto_specific.ata; if (ata->valid & CTS_ATA_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (ata->flags & CTS_ATA_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } if (cts->protocol == PROTO_SCSI) { struct ccb_trans_settings_scsi *scsi= &cts->proto_specific.scsi; if (scsi->valid & CTS_SCSI_VALID_TQ) { fprintf(stdout, "%stagged queueing: %s\n", pathstr, (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) ? "enabled" : "disabled"); } } } /* * Get a path inquiry CCB for the specified device. */ static int get_cpi(struct cam_device *device, struct ccb_pathinq *cpi) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cpi: couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { warn("get_cpi: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cpi_bailout; } bcopy(&ccb->cpi, cpi, sizeof(struct ccb_pathinq)); get_cpi_bailout: cam_freeccb(ccb); return(retval); } /* * Get a get device CCB for the specified device. */ static int get_cgd(struct cam_device *device, struct ccb_getdev *cgd) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_cgd: couldn't allocate CCB"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GDEV_TYPE; if (cam_send_ccb(device, ccb) < 0) { warn("get_cgd: error sending Path Inquiry CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_cgd_bailout; } bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev)); get_cgd_bailout: cam_freeccb(ccb); return(retval); } /* * Returns 1 if the device has the VPD page, 0 if it does not, and -1 on an * error. */ int dev_has_vpd_page(struct cam_device *dev, uint8_t page_id, int retry_count, int timeout, int verbosemode) { union ccb *ccb = NULL; struct scsi_vpd_supported_page_list sup_pages; int i; int retval = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warn("Unable to allocate CCB"); retval = -1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); bzero(&sup_pages, sizeof(sup_pages)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)&sup_pages, /* inq_len */ sizeof(sup_pages), /* evpd */ 1, /* page_code */ SVPD_SUPPORTED_PAGE_LIST, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { cam_freeccb(ccb); ccb = NULL; retval = -1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = -1; goto bailout; } for (i = 0; i < sup_pages.length; i++) { if (sup_pages.list[i] == page_id) { retval = 1; goto bailout; } } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * devtype is filled in with the type of device. * Returns 0 for success, non-zero for failure. */ int get_device_type(struct cam_device *dev, int retry_count, int timeout, int verbosemode, camcontrol_devtype *devtype) { struct ccb_getdev cgd; int retval = 0; retval = get_cgd(dev, &cgd); if (retval != 0) goto bailout; switch (cgd.protocol) { case PROTO_SCSI: break; case PROTO_ATA: case PROTO_ATAPI: case PROTO_SATAPM: *devtype = CC_DT_ATA; goto bailout; break; /*NOTREACHED*/ default: *devtype = CC_DT_UNKNOWN; goto bailout; break; /*NOTREACHED*/ } /* * Check for the ATA Information VPD page (0x89). If this is an * ATA device behind a SCSI to ATA translation layer, this VPD page * should be present. * * If that VPD page isn't present, or we get an error back from the * INQUIRY command, we'll just treat it as a normal SCSI device. */ retval = dev_has_vpd_page(dev, SVPD_ATA_INFORMATION, retry_count, timeout, verbosemode); if (retval == 1) *devtype = CC_DT_ATA_BEHIND_SCSI; else *devtype = CC_DT_SCSI; retval = 0; bailout: return (retval); } -void +int build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, uint16_t features, - uint16_t sector_count, uint64_t lba, uint8_t command, uint8_t *data_ptr, - uint16_t dxfer_len, uint8_t sense_len, uint32_t timeout, + uint16_t sector_count, uint64_t lba, uint8_t command, uint32_t auxiliary, + uint8_t *data_ptr, uint32_t dxfer_len, uint8_t *cdb_storage, + size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout, int is48bit, camcontrol_devtype devtype) { + int retval = 0; + if (devtype == CC_DT_ATA) { cam_fill_ataio(&ccb->ataio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, /*timeout*/ timeout); if (is48bit || lba > ATA_MAX_28BIT_LBA) ata_48bit_cmd(&ccb->ataio, command, features, lba, sector_count); else ata_28bit_cmd(&ccb->ataio, command, features, lba, sector_count); + + if (auxiliary != 0) { + ccb->ataio.ata_flags |= ATA_FLAG_AUX; + ccb->ataio.aux = auxiliary; + } + + if (ata_flags & AP_FLAG_CHK_COND) + ccb->ataio.cmd.flags |= CAM_ATAIO_NEEDRESULT; + + if ((protocol & AP_PROTO_MASK) == AP_PROTO_DMA) + ccb->ataio.cmd.flags |= CAM_ATAIO_DMA; + else if ((protocol & AP_PROTO_MASK) == AP_PROTO_FPDMA) + ccb->ataio.cmd.flags |= CAM_ATAIO_FPDMA; } else { if (is48bit || lba > ATA_MAX_28BIT_LBA) protocol |= AP_EXTEND; - scsi_ata_pass_16(&ccb->csio, + retval = scsi_ata_pass(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*tag_action*/ tag_action, /*protocol*/ protocol, /*ata_flags*/ ata_flags, /*features*/ features, /*sector_count*/ sector_count, /*lba*/ lba, /*command*/ command, + /*device*/ 0, + /*icc*/ 0, + /*auxiliary*/ auxiliary, /*control*/ 0, /*data_ptr*/ data_ptr, /*dxfer_len*/ dxfer_len, + /*cdb_storage*/ cdb_storage, + /*cdb_storage_len*/ cdb_storage_len, + /*minimum_cmd_size*/ 0, /*sense_len*/ sense_len, /*timeout*/ timeout); } + + return (retval); } +int +get_ata_status(struct cam_device *dev, union ccb *ccb, uint8_t *error, + uint16_t *count, uint64_t *lba, uint8_t *device, uint8_t *status) +{ + int retval = 0; + switch (ccb->ccb_h.func_code) { + case XPT_SCSI_IO: { + uint8_t opcode; + int error_code = 0, sense_key = 0, asc = 0, ascq = 0; + + /* + * In this case, we have SCSI ATA PASS-THROUGH command, 12 + * or 16 byte, and need to see what + */ + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + opcode = ccb->csio.cdb_io.cdb_ptr[0]; + else + opcode = ccb->csio.cdb_io.cdb_bytes[0]; + if ((opcode != ATA_PASS_12) + && (opcode != ATA_PASS_16)) { + retval = 1; + warnx("%s: unsupported opcode %02x", __func__, opcode); + goto bailout; + } + + retval = scsi_extract_sense_ccb(ccb, &error_code, &sense_key, + &asc, &ascq); + /* Note: the _ccb() variant returns 0 for an error */ + if (retval == 0) { + retval = 1; + goto bailout; + } else + retval = 0; + + switch (error_code) { + case SSD_DESC_CURRENT_ERROR: + case SSD_DESC_DEFERRED_ERROR: { + struct scsi_sense_data_desc *sense; + struct scsi_sense_ata_ret_desc *desc; + uint8_t *desc_ptr; + + sense = (struct scsi_sense_data_desc *) + &ccb->csio.sense_data; + + desc_ptr = scsi_find_desc(sense, ccb->csio.sense_len - + ccb->csio.sense_resid, SSD_DESC_ATA); + if (desc_ptr == NULL) { + cam_error_print(dev, ccb, CAM_ESF_ALL, + CAM_EPF_ALL, stderr); + retval = 1; + goto bailout; + } + desc = (struct scsi_sense_ata_ret_desc *)desc_ptr; + + *error = desc->error; + *count = (desc->count_15_8 << 8) | + desc->count_7_0; + *lba = ((uint64_t)desc->lba_47_40 << 40) | + ((uint64_t)desc->lba_39_32 << 32) | + (desc->lba_31_24 << 24) | + (desc->lba_23_16 << 16) | + (desc->lba_15_8 << 8) | + desc->lba_7_0; + *device = desc->device; + *status = desc->status; + + /* + * If the extend bit isn't set, the result is for a + * 12-byte ATA PASS-THROUGH command or a 16 or 32 byte + * command without the extend bit set. This means + * that the device is supposed to return 28-bit + * status. The count field is only 8 bits, and the + * LBA field is only 8 bits. + */ + if ((desc->flags & SSD_DESC_ATA_FLAG_EXTEND) == 0){ + *count &= 0xff; + *lba &= 0x0fffffff; + } + break; + } + case SSD_CURRENT_ERROR: + case SSD_DEFERRED_ERROR: { +#if 0 + struct scsi_sense_data_fixed *sense; +#endif + /* + * XXX KDM need to support fixed sense data. + */ + warnx("%s: Fixed sense data not supported yet", + __func__); + retval = 1; + goto bailout; + break; /*NOTREACHED*/ + } + default: + retval = 1; + goto bailout; + break; + } + + break; + } + case XPT_ATA_IO: { + struct ata_res *res; + + /* + * In this case, we have an ATA command, and we need to + * fill in the requested values from the result register + * set. + */ + res = &ccb->ataio.res; + *error = res->error; + *status = res->status; + *device = res->device; + *count = res->sector_count; + *lba = (res->lba_high << 16) | + (res->lba_mid << 8) | + (res->lba_low); + if (res->flags & CAM_ATAIO_48BIT) { + *count |= (res->sector_count_exp << 8); + *lba |= (res->lba_low_exp << 24) | + ((uint64_t)res->lba_mid_exp << 32) | + ((uint64_t)res->lba_high_exp << 40); + } else { + *lba |= (res->device & 0xf) << 24; + } + break; + } + default: + retval = 1; + break; + } +bailout: + return (retval); +} + static void cpi_print(struct ccb_pathinq *cpi) { char adapter_str[1024]; int i; snprintf(adapter_str, sizeof(adapter_str), "%s%d:", cpi->dev_name, cpi->unit_number); fprintf(stdout, "%s SIM/HBA version: %d\n", adapter_str, cpi->version_num); for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_inquiry) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PI_MDP_ABLE: str = "MDP message"; break; case PI_WIDE_32: str = "32 bit wide SCSI"; break; case PI_WIDE_16: str = "16 bit wide SCSI"; break; case PI_SDTR_ABLE: str = "SDTR message"; break; case PI_LINKED_CDB: str = "linked CDBs"; break; case PI_TAG_ABLE: str = "tag queue messages"; break; case PI_SOFT_RST: str = "soft reset alternative"; break; case PI_SATAPM: str = "SATA Port Multiplier"; break; default: str = "unknown PI bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->hba_misc) == 0) continue; fprintf(stdout, "%s ", adapter_str); switch(i) { case PIM_SCANHILO: str = "bus scans from high ID to low ID"; break; case PIM_NOREMOVE: str = "removable devices not included in scan"; break; case PIM_NOINITIATOR: str = "initiator role not supported"; break; case PIM_NOBUSRESET: str = "user has disabled initial BUS RESET or" " controller is in target/mixed mode"; break; case PIM_NO_6_BYTE: str = "do not send 6-byte commands"; break; case PIM_SEQSCAN: str = "scan bus sequentially"; break; default: str = "unknown PIM bit set"; break; } fprintf(stdout, "%s\n", str); } for (i = 1; i < 0xff; i = i << 1) { const char *str; if ((i & cpi->target_sprt) == 0) continue; fprintf(stdout, "%s supports ", adapter_str); switch(i) { case PIT_PROCESSOR: str = "target mode processor mode"; break; case PIT_PHASE: str = "target mode phase cog. mode"; break; case PIT_DISCONNECT: str = "disconnects in target mode"; break; case PIT_TERM_IO: str = "terminate I/O message in target mode"; break; case PIT_GRP_6: str = "group 6 commands in target mode"; break; case PIT_GRP_7: str = "group 7 commands in target mode"; break; default: str = "unknown PIT bit set"; break; } fprintf(stdout, "%s\n", str); } fprintf(stdout, "%s HBA engine count: %d\n", adapter_str, cpi->hba_eng_cnt); fprintf(stdout, "%s maximum target: %d\n", adapter_str, cpi->max_target); fprintf(stdout, "%s maximum LUN: %d\n", adapter_str, cpi->max_lun); fprintf(stdout, "%s highest path ID in subsystem: %d\n", adapter_str, cpi->hpath_id); fprintf(stdout, "%s initiator ID: %d\n", adapter_str, cpi->initiator_id); fprintf(stdout, "%s SIM vendor: %s\n", adapter_str, cpi->sim_vid); fprintf(stdout, "%s HBA vendor: %s\n", adapter_str, cpi->hba_vid); fprintf(stdout, "%s HBA vendor ID: 0x%04x\n", adapter_str, cpi->hba_vendor); fprintf(stdout, "%s HBA device ID: 0x%04x\n", adapter_str, cpi->hba_device); fprintf(stdout, "%s HBA subvendor ID: 0x%04x\n", adapter_str, cpi->hba_subvendor); fprintf(stdout, "%s HBA subdevice ID: 0x%04x\n", adapter_str, cpi->hba_subdevice); fprintf(stdout, "%s bus ID: %d\n", adapter_str, cpi->bus_id); fprintf(stdout, "%s base transfer speed: ", adapter_str); if (cpi->base_transfer_speed > 1000) fprintf(stdout, "%d.%03dMB/sec\n", cpi->base_transfer_speed / 1000, cpi->base_transfer_speed % 1000); else fprintf(stdout, "%dKB/sec\n", (cpi->base_transfer_speed % 1000) * 1000); fprintf(stdout, "%s maximum transfer size: %u bytes\n", adapter_str, cpi->maxio); } static int get_print_cts(struct cam_device *device, int user_settings, int quiet, struct ccb_trans_settings *cts) { int retval; union ccb *ccb; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("get_print_cts: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; if (user_settings == 0) ccb->cts.type = CTS_TYPE_CURRENT_SETTINGS; else ccb->cts.type = CTS_TYPE_USER_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_GET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_GET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto get_print_cts_bailout; } if (quiet == 0) cts_print(device, &ccb->cts); if (cts != NULL) bcopy(&ccb->cts, cts, sizeof(struct ccb_trans_settings)); get_print_cts_bailout: cam_freeccb(ccb); return(retval); } static int ratecontrol(struct cam_device *device, int retry_count, int timeout, int argc, char **argv, char *combinedopt) { int c; union ccb *ccb; int user_settings = 0; int retval = 0; int disc_enable = -1, tag_enable = -1; int mode = -1; int offset = -1; double syncrate = -1; int bus_width = -1; int quiet = 0; int change_settings = 0, send_tur = 0; struct ccb_pathinq cpi; ccb = cam_getccb(device); if (ccb == NULL) { warnx("ratecontrol: error allocating ccb"); return(1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c){ case 'a': send_tur = 1; break; case 'c': user_settings = 0; break; case 'D': if (strncasecmp(optarg, "enable", 6) == 0) disc_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) disc_enable = 0; else { warnx("-D argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'M': mode = ata_string2mode(optarg); if (mode < 0) { warnx("unknown mode '%s'", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'O': offset = strtol(optarg, NULL, 0); if (offset < 0) { warnx("offset value %d is < 0", offset); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'q': quiet++; break; case 'R': syncrate = atof(optarg); if (syncrate < 0) { warnx("sync rate %f is < 0", syncrate); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'T': if (strncasecmp(optarg, "enable", 6) == 0) tag_enable = 1; else if (strncasecmp(optarg, "disable", 7) == 0) tag_enable = 0; else { warnx("-T argument \"%s\" is unknown", optarg); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; case 'U': user_settings = 1; break; case 'W': bus_width = strtol(optarg, NULL, 0); if (bus_width < 0) { warnx("bus width %d is < 0", bus_width); retval = 1; goto ratecontrol_bailout; } change_settings = 1; break; default: break; } } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_pathinq) - sizeof(struct ccb_hdr)); /* * Grab path inquiry information, so we can determine whether * or not the initiator is capable of the things that the user * requests. */ ccb->ccb_h.func_code = XPT_PATH_INQ; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_PATH_INQ CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_PATH_INQ CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } bcopy(&ccb->cpi, &cpi, sizeof(struct ccb_pathinq)); bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_trans_settings) - sizeof(struct ccb_hdr)); if (quiet == 0) { fprintf(stdout, "%s parameters:\n", user_settings ? "User" : "Current"); } retval = get_print_cts(device, user_settings, quiet, &ccb->cts); if (retval != 0) goto ratecontrol_bailout; if (arglist & CAM_ARG_VERBOSE) cpi_print(&cpi); if (change_settings) { int didsettings = 0; struct ccb_trans_settings_spi *spi = NULL; struct ccb_trans_settings_pata *pata = NULL; struct ccb_trans_settings_sata *sata = NULL; struct ccb_trans_settings_ata *ata = NULL; struct ccb_trans_settings_scsi *scsi = NULL; if (ccb->cts.transport == XPORT_SPI) spi = &ccb->cts.xport_specific.spi; if (ccb->cts.transport == XPORT_ATA) pata = &ccb->cts.xport_specific.ata; if (ccb->cts.transport == XPORT_SATA) sata = &ccb->cts.xport_specific.sata; if (ccb->cts.protocol == PROTO_ATA) ata = &ccb->cts.proto_specific.ata; if (ccb->cts.protocol == PROTO_SCSI) scsi = &ccb->cts.proto_specific.scsi; ccb->cts.xport_specific.valid = 0; ccb->cts.proto_specific.valid = 0; if (spi && disc_enable != -1) { spi->valid |= CTS_SPI_VALID_DISC; if (disc_enable == 0) spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; else spi->flags |= CTS_SPI_FLAGS_DISC_ENB; didsettings++; } if (tag_enable != -1) { if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0) { warnx("HBA does not support tagged queueing, " "so you cannot modify tag settings"); retval = 1; goto ratecontrol_bailout; } if (ata) { ata->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) ata->flags &= ~CTS_ATA_FLAGS_TAG_ENB; else ata->flags |= CTS_ATA_FLAGS_TAG_ENB; didsettings++; } else if (scsi) { scsi->valid |= CTS_SCSI_VALID_TQ; if (tag_enable == 0) scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; else scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; didsettings++; } } if (spi && offset != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing offset"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->sync_offset = offset; didsettings++; } if (spi && syncrate != -1) { int prelim_sync_period; if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_SYNC_RATE; /* * The sync rate the user gives us is in MHz. * We need to translate it into KHz for this * calculation. */ syncrate *= 1000; /* * Next, we calculate a "preliminary" sync period * in tenths of a nanosecond. */ if (syncrate == 0) prelim_sync_period = 0; else prelim_sync_period = 10000000 / syncrate; spi->sync_period = scsi_calc_syncparam(prelim_sync_period); didsettings++; } if (sata && syncrate != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user rate " "settings for SATA"); retval = 1; goto ratecontrol_bailout; } sata->revision = ata_speed2revision(syncrate * 100); if (sata->revision < 0) { warnx("Invalid rate %f", syncrate); retval = 1; goto ratecontrol_bailout; } sata->valid |= CTS_SATA_VALID_REVISION; didsettings++; } if ((pata || sata) && mode != -1) { if ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { warnx("HBA is not capable of changing " "transfer rates"); retval = 1; goto ratecontrol_bailout; } if (!user_settings) { warnx("You can modify only user mode " "settings for ATA/SATA"); retval = 1; goto ratecontrol_bailout; } if (pata) { pata->mode = mode; pata->valid |= CTS_ATA_VALID_MODE; } else { sata->mode = mode; sata->valid |= CTS_SATA_VALID_MODE; } didsettings++; } /* * The bus_width argument goes like this: * 0 == 8 bit * 1 == 16 bit * 2 == 32 bit * Therefore, if you shift the number of bits given on the * command line right by 4, you should get the correct * number. */ if (spi && bus_width != -1) { /* * We might as well validate things here with a * decipherable error message, rather than what * will probably be an indecipherable error message * by the time it gets back to us. */ if ((bus_width == 16) && ((cpi.hba_inquiry & PI_WIDE_16) == 0)) { warnx("HBA does not support 16 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width == 32) && ((cpi.hba_inquiry & PI_WIDE_32) == 0)) { warnx("HBA does not support 32 bit bus width"); retval = 1; goto ratecontrol_bailout; } else if ((bus_width != 8) && (bus_width != 16) && (bus_width != 32)) { warnx("Invalid bus width %d", bus_width); retval = 1; goto ratecontrol_bailout; } spi->valid |= CTS_SPI_VALID_BUS_WIDTH; spi->bus_width = bus_width >> 4; didsettings++; } if (didsettings == 0) { goto ratecontrol_bailout; } ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; if (cam_send_ccb(device, ccb) < 0) { perror("error sending XPT_SET_TRAN_SETTINGS CCB"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("XPT_SET_TRANS_SETTINGS CCB failed"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto ratecontrol_bailout; } } if (send_tur) { retval = testunitready(device, retry_count, timeout, (arglist & CAM_ARG_VERBOSE) ? 0 : 1); /* * If the TUR didn't succeed, just bail. */ if (retval != 0) { if (quiet == 0) fprintf(stderr, "Test Unit Ready failed\n"); goto ratecontrol_bailout; } } if ((change_settings || send_tur) && !quiet && (ccb->cts.transport == XPORT_ATA || ccb->cts.transport == XPORT_SATA || send_tur)) { fprintf(stdout, "New parameters:\n"); retval = get_print_cts(device, user_settings, 0, NULL); } ratecontrol_bailout: cam_freeccb(ccb); return(retval); } static int scsiformat(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; struct format_defect_list_header fh; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsiformat: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'q': quiet++; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsiformat: error sending inquiry"); goto scsiformat_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsiformat_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current format timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } /* * Keep this outside the if block below to silence any unused * variable warnings. */ bzero(&fh, sizeof(fh)); /* * If we're in immediate mode, we've got to include the format * header */ if (immediate != 0) { fh.byte2 = FU_DLH_IMMED; data_ptr = (u_int8_t *)&fh; dxfer_len = sizeof(fh); byte2 = FU_FMT_DATA; } else if (quiet == 0) { fprintf(stdout, "Formatting..."); fflush(stdout); } scsi_format_unit(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* ileave */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((immediate == 0) && ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP))) { const char errstr[] = "error sending format command"; if (retval < 0) warn(errstr); else warnx(errstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Format Complete\n"); } goto scsiformat_bailout; } doreport: do { cam_status status; bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish formatting. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsiformat_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-2 and SCSI-3 specs, a * drive that is in the middle of a format should * return NOT READY with an ASC of "logical unit * not ready, format in progress". The sense key * specific bytes will then be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x04)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rFormatting: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during format:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but format will " "proceed."); warnx("will exit when format is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during format"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsiformat_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nFormat Complete\n"); scsiformat_bailout: cam_freeccb(ccb); return(error); } static int scsisanitize(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; u_int8_t action = 0; int c; int ycount = 0, quiet = 0; int error = 0, retval = 0; int use_timeout = 10800 * 1000; int immediate = 1; int invert = 0; int passes = 0; int ause = 0; int fd = -1; const char *pattern = NULL; u_int8_t *data_ptr = NULL; u_int32_t dxfer_len = 0; u_int8_t byte2 = 0; int num_warnings = 0; int reportonly = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("scsisanitize: error allocating ccb"); return(1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch(c) { case 'a': if (strcasecmp(optarg, "overwrite") == 0) action = SSZ_SERVICE_ACTION_OVERWRITE; else if (strcasecmp(optarg, "block") == 0) action = SSZ_SERVICE_ACTION_BLOCK_ERASE; else if (strcasecmp(optarg, "crypto") == 0) action = SSZ_SERVICE_ACTION_CRYPTO_ERASE; else if (strcasecmp(optarg, "exitfailure") == 0) action = SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE; else { warnx("invalid service operation \"%s\"", optarg); error = 1; goto scsisanitize_bailout; } break; case 'c': passes = strtol(optarg, NULL, 0); if (passes < 1 || passes > 31) { warnx("invalid passes value %d", passes); error = 1; goto scsisanitize_bailout; } break; case 'I': invert = 1; break; case 'P': pattern = optarg; break; case 'q': quiet++; break; case 'U': ause = 1; break; case 'r': reportonly = 1; break; case 'w': immediate = 0; break; case 'y': ycount++; break; } } if (reportonly) goto doreport; if (action == 0) { warnx("an action is required"); error = 1; goto scsisanitize_bailout; } else if (action == SSZ_SERVICE_ACTION_OVERWRITE) { struct scsi_sanitize_parameter_list *pl; struct stat sb; ssize_t sz, amt; if (pattern == NULL) { warnx("overwrite action requires -P argument"); error = 1; goto scsisanitize_bailout; } fd = open(pattern, O_RDONLY); if (fd < 0) { warn("cannot open pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } if (fstat(fd, &sb) < 0) { warn("cannot stat pattern file %s", pattern); error = 1; goto scsisanitize_bailout; } sz = sb.st_size; if (sz > SSZPL_MAX_PATTERN_LENGTH) { warnx("pattern file size exceeds maximum value %d", SSZPL_MAX_PATTERN_LENGTH); error = 1; goto scsisanitize_bailout; } dxfer_len = sizeof(*pl) + sz; data_ptr = calloc(1, dxfer_len); if (data_ptr == NULL) { warnx("cannot allocate parameter list buffer"); error = 1; goto scsisanitize_bailout; } amt = read(fd, data_ptr + sizeof(*pl), sz); if (amt < 0) { warn("cannot read pattern file"); error = 1; goto scsisanitize_bailout; } else if (amt != sz) { warnx("short pattern file read"); error = 1; goto scsisanitize_bailout; } pl = (struct scsi_sanitize_parameter_list *)data_ptr; if (passes == 0) pl->byte1 = 1; else pl->byte1 = passes; if (invert != 0) pl->byte1 |= SSZPL_INVERT; scsi_ulto2b(sz, pl->length); } else { const char *arg; if (passes != 0) arg = "-c"; else if (invert != 0) arg = "-I"; else if (pattern != NULL) arg = "-P"; else arg = NULL; if (arg != NULL) { warnx("%s argument only valid with overwrite " "operation", arg); error = 1; goto scsisanitize_bailout; } } if (quiet == 0) { fprintf(stdout, "You are about to REMOVE ALL DATA from the " "following device:\n"); error = scsidoinquiry(device, argc, argv, combinedopt, retry_count, timeout); if (error != 0) { warnx("scsisanitize: error sending inquiry"); goto scsisanitize_bailout; } } if (ycount == 0) { if (!get_confirmation()) { error = 1; goto scsisanitize_bailout; } } if (timeout != 0) use_timeout = timeout; if (quiet == 0) { fprintf(stdout, "Current sanitize timeout is %d seconds\n", use_timeout / 1000); } /* * If the user hasn't disabled questions and didn't specify a * timeout on the command line, ask them if they want the current * timeout. */ if ((ycount == 0) && (timeout == 0)) { char str[1024]; int new_timeout = 0; fprintf(stdout, "Enter new timeout in seconds or press\n" "return to keep the current timeout [%d] ", use_timeout / 1000); if (fgets(str, sizeof(str), stdin) != NULL) { if (str[0] != '\0') new_timeout = atoi(str); } if (new_timeout != 0) { use_timeout = new_timeout * 1000; fprintf(stdout, "Using new timeout value %d\n", use_timeout / 1000); } } byte2 = action; if (ause != 0) byte2 |= SSZ_UNRESTRICTED_EXIT; if (immediate != 0) byte2 |= SSZ_IMMED; scsi_sanitize(&ccb->csio, /* retries */ retry_count, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* byte2 */ byte2, /* control */ 0, /* data_ptr */ data_ptr, /* dxfer_len */ dxfer_len, /* sense_len */ SSD_FULL_SIZE, /* timeout */ use_timeout); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending sanitize command"); error = 1; goto scsisanitize_bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) { sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); if (sense_key == SSD_KEY_ILLEGAL_REQUEST && asc == 0x20 && ascq == 0x00) warnx("sanitize is not supported by " "this device"); else warnx("error sanitizing this device"); } else warnx("error sanitizing this device"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } /* * If we ran in non-immediate mode, we already checked for errors * above and printed out any necessary information. If we're in * immediate mode, we need to loop through and get status * information periodically. */ if (immediate == 0) { if (quiet == 0) { fprintf(stdout, "Sanitize Complete\n"); } goto scsisanitize_bailout; } doreport: do { cam_status status; bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * There's really no need to do error recovery or * retries here, since we're just going to sit in a * loop and wait for the device to finish sanitizing. */ scsi_test_unit_ready(&ccb->csio, /* retries */ 0, /* cbfcnp */ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* sense_len */ SSD_FULL_SIZE, /* timeout */ 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(device, ccb); /* * If we get an error from the ioctl, bail out. SCSI * errors are expected. */ if (retval < 0) { warn("error sending CAMIOCOMMAND ioctl"); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto scsisanitize_bailout; } status = ccb->ccb_h.status & CAM_STATUS_MASK; if ((status != CAM_REQ_CMP) && (status == CAM_SCSI_STATUS_ERROR) && ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0)) { struct scsi_sense_data *sense; int error_code, sense_key, asc, ascq; sense = &ccb->csio.sense_data; scsi_extract_sense_len(sense, ccb->csio.sense_len - ccb->csio.sense_resid, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); /* * According to the SCSI-3 spec, a drive that is in the * middle of a sanitize should return NOT READY with an * ASC of "logical unit not ready, sanitize in * progress". The sense key specific bytes will then * be a progress indicator. */ if ((sense_key == SSD_KEY_NOT_READY) && (asc == 0x04) && (ascq == 0x1b)) { uint8_t sks[3]; if ((scsi_get_sks(sense, ccb->csio.sense_len - ccb->csio.sense_resid, sks) == 0) && (quiet == 0)) { int val; u_int64_t percentage; val = scsi_2btoul(&sks[1]); percentage = 10000 * val; fprintf(stdout, "\rSanitizing: %ju.%02u %% " "(%d/%d) done", (uintmax_t)(percentage / (0x10000 * 100)), (unsigned)((percentage / 0x10000) % 100), val, 0x10000); fflush(stdout); } else if ((quiet == 0) && (++num_warnings <= 1)) { warnx("Unexpected SCSI Sense Key " "Specific value returned " "during sanitize:"); scsi_sense_print(device, &ccb->csio, stderr); warnx("Unable to print status " "information, but sanitze will " "proceed."); warnx("will exit when sanitize is " "complete"); } sleep(1); } else { warnx("Unexpected SCSI error during sanitize"); cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } else if (status != CAM_REQ_CMP) { warnx("Unexpected CAM status %#x", status); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); error = 1; goto scsisanitize_bailout; } } while((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP); if (quiet == 0) fprintf(stdout, "\nSanitize Complete\n"); scsisanitize_bailout: if (fd >= 0) close(fd); if (data_ptr != NULL) free(data_ptr); cam_freeccb(ccb); return(error); } static int scsireportluns(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int c, countonly, lunsonly; struct scsi_report_luns_data *lundata; int alloc_len; uint8_t report_type; uint32_t list_len, i, j; int retval; retval = 0; lundata = NULL; report_type = RPL_REPORT_DEFAULT; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); countonly = 0; lunsonly = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'c': countonly++; break; case 'l': lunsonly++; break; case 'r': if (strcasecmp(optarg, "default") == 0) report_type = RPL_REPORT_DEFAULT; else if (strcasecmp(optarg, "wellknown") == 0) report_type = RPL_REPORT_WELLKNOWN; else if (strcasecmp(optarg, "all") == 0) report_type = RPL_REPORT_ALL; else { warnx("%s: invalid report type \"%s\"", __func__, optarg); retval = 1; goto bailout; } break; default: break; } } if ((countonly != 0) && (lunsonly != 0)) { warnx("%s: you can only specify one of -c or -l", __func__); retval = 1; goto bailout; } /* * According to SPC-4, the allocation length must be at least 16 * bytes -- enough for the header and one LUN. */ alloc_len = sizeof(*lundata) + 8; retry: lundata = malloc(alloc_len); if (lundata == NULL) { warn("%s: error mallocing %d bytes", __func__, alloc_len); retval = 1; goto bailout; } scsi_report_luns(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*select_report*/ report_type, /*rpl_buf*/ lundata, /*alloc_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending REPORT LUNS command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } list_len = scsi_4btoul(lundata->length); /* * If we need to list the LUNs, and our allocation * length was too short, reallocate and retry. */ if ((countonly == 0) && (list_len > (alloc_len - sizeof(*lundata)))) { alloc_len = list_len + sizeof(*lundata); free(lundata); goto retry; } if (lunsonly == 0) fprintf(stdout, "%u LUN%s found\n", list_len / 8, ((list_len / 8) > 1) ? "s" : ""); if (countonly != 0) goto bailout; for (i = 0; i < (list_len / 8); i++) { int no_more; no_more = 0; for (j = 0; j < sizeof(lundata->luns[i].lundata); j += 2) { if (j != 0) fprintf(stdout, ","); switch (lundata->luns[i].lundata[j] & RPL_LUNDATA_ATYP_MASK) { case RPL_LUNDATA_ATYP_PERIPH: if ((lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK) != 0) fprintf(stdout, "%d:", lundata->luns[i].lundata[j] & RPL_LUNDATA_PERIPH_BUS_MASK); else if ((j == 0) && ((lundata->luns[i].lundata[j+2] & RPL_LUNDATA_PERIPH_BUS_MASK) == 0)) no_more = 1; fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); break; case RPL_LUNDATA_ATYP_FLAT: { uint8_t tmplun[2]; tmplun[0] = lundata->luns[i].lundata[j] & RPL_LUNDATA_FLAT_LUN_MASK; tmplun[1] = lundata->luns[i].lundata[j+1]; fprintf(stdout, "%d", scsi_2btoul(tmplun)); no_more = 1; break; } case RPL_LUNDATA_ATYP_LUN: fprintf(stdout, "%d:%d:%d", (lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_BUS_MASK) >> 5, lundata->luns[i].lundata[j] & RPL_LUNDATA_LUN_TARG_MASK, lundata->luns[i].lundata[j+1] & RPL_LUNDATA_LUN_LUN_MASK); break; case RPL_LUNDATA_ATYP_EXTLUN: { int field_len_code, eam_code; eam_code = lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_EAM_MASK; field_len_code = (lundata->luns[i].lundata[j] & RPL_LUNDATA_EXT_LEN_MASK) >> 4; if ((eam_code == RPL_LUNDATA_EXT_EAM_WK) && (field_len_code == 0x00)) { fprintf(stdout, "%d", lundata->luns[i].lundata[j+1]); } else if ((eam_code == RPL_LUNDATA_EXT_EAM_NOT_SPEC) && (field_len_code == 0x03)) { uint8_t tmp_lun[8]; /* * This format takes up all 8 bytes. * If we aren't starting at offset 0, * that's a bug. */ if (j != 0) { fprintf(stdout, "Invalid " "offset %d for " "Extended LUN not " "specified format", j); no_more = 1; break; } bzero(tmp_lun, sizeof(tmp_lun)); bcopy(&lundata->luns[i].lundata[j+1], &tmp_lun[1], sizeof(tmp_lun) - 1); fprintf(stdout, "%#jx", (intmax_t)scsi_8btou64(tmp_lun)); no_more = 1; } else { fprintf(stderr, "Unknown Extended LUN" "Address method %#x, length " "code %#x", eam_code, field_len_code); no_more = 1; } break; } default: fprintf(stderr, "Unknown LUN address method " "%#x\n", lundata->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK); break; } /* * For the flat addressing method, there are no * other levels after it. */ if (no_more != 0) break; } fprintf(stdout, "\n"); } bailout: cam_freeccb(ccb); free(lundata); return (retval); } static int scsireadcapacity(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int blocksizeonly, humanize, numblocks, quiet, sizeonly, baseten; struct scsi_read_capacity_data rcap; struct scsi_read_capacity_data_long rcaplong; uint64_t maxsector; uint32_t block_len; int retval; int c; blocksizeonly = 0; humanize = 0; numblocks = 0; quiet = 0; sizeonly = 0; baseten = 0; retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'b': blocksizeonly++; break; case 'h': humanize++; baseten = 0; break; case 'H': humanize++; baseten++; break; case 'N': numblocks++; break; case 'q': quiet++; break; case 's': sizeonly++; break; default: break; } } if ((blocksizeonly != 0) && (numblocks != 0)) { warnx("%s: you can only specify one of -b or -N", __func__); retval = 1; goto bailout; } if ((blocksizeonly != 0) && (sizeonly != 0)) { warnx("%s: you can only specify one of -b or -s", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (quiet != 0)) { warnx("%s: you can only specify one of -h/-H or -q", __func__); retval = 1; goto bailout; } if ((humanize != 0) && (blocksizeonly != 0)) { warnx("%s: you can only specify one of -h/-H or -b", __func__); retval = 1; goto bailout; } scsi_read_capacity(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, &rcap, SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_4btoul(rcap.addr); block_len = scsi_4btoul(rcap.length); /* * A last block of 2^32-1 means that the true capacity is over 2TB, * and we need to issue the long READ CAPACITY to get the real * capacity. Otherwise, we're all set. */ if (maxsector != 0xffffffff) goto do_print; scsi_read_capacity_16(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladdr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)&rcaplong, /*rcap_buf_len*/ sizeof(rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (arglist & CAM_ARG_ERR_RECOVER) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { warn("error sending READ CAPACITY (16) command"); if (arglist & CAM_ARG_VERBOSE) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } maxsector = scsi_8btou64(rcaplong.addr); block_len = scsi_4btoul(rcaplong.length); do_print: if (blocksizeonly == 0) { /* * Humanize implies !quiet, and also implies numblocks. */ if (humanize != 0) { char tmpstr[6]; int64_t tmpbytes; int ret; tmpbytes = (maxsector + 1) * block_len; ret = humanize_number(tmpstr, sizeof(tmpstr), tmpbytes, "", HN_AUTOSCALE, HN_B | HN_DECIMAL | ((baseten != 0) ? HN_DIVISOR_1000 : 0)); if (ret == -1) { warnx("%s: humanize_number failed!", __func__); retval = 1; goto bailout; } fprintf(stdout, "Device Size: %s%s", tmpstr, (sizeonly == 0) ? ", " : "\n"); } else if (numblocks != 0) { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Blocks: " : "", (uintmax_t)maxsector + 1, (sizeonly == 0) ? ", " : "\n"); } else { fprintf(stdout, "%s%ju%s", (quiet == 0) ? "Last Block: " : "", (uintmax_t)maxsector, (sizeonly == 0) ? ", " : "\n"); } } if (sizeonly == 0) fprintf(stdout, "%s%u%s\n", (quiet == 0) ? "Block Length: " : "", block_len, (quiet == 0) ? " bytes" : ""); bailout: cam_freeccb(ccb); return (retval); } static int smpcmd(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { int c, error = 0; union ccb *ccb; uint8_t *smp_request = NULL, *smp_response = NULL; int request_size = 0, response_size = 0; int fd_request = 0, fd_response = 0; char *datastr = NULL; struct get_hook hook; int retval; int flags = 0; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'R': arglist |= CAM_ARG_CMD_IN; response_size = strtol(optarg, NULL, 0); if (response_size <= 0) { warnx("invalid number of response bytes %d", response_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; optind++; datastr = cget(&hook, NULL); /* * If the user supplied "-" instead of a format, he * wants the data to be written to stdout. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_response = 1; smp_response = (u_int8_t *)malloc(response_size); if (smp_response == NULL) { warn("can't malloc memory for SMP response"); error = 1; goto smpcmd_bailout; } break; case 'r': arglist |= CAM_ARG_CMD_OUT; request_size = strtol(optarg, NULL, 0); if (request_size <= 0) { warnx("invalid number of request bytes %d", request_size); error = 1; goto smpcmd_bailout; } hook.argc = argc - optind; hook.argv = argv + optind; hook.got = 0; datastr = cget(&hook, NULL); smp_request = (u_int8_t *)malloc(request_size); if (smp_request == NULL) { warn("can't malloc memory for SMP request"); error = 1; goto smpcmd_bailout; } bzero(smp_request, request_size); /* * If the user supplied "-" instead of a format, he * wants the data to be read from stdin. */ if ((datastr != NULL) && (datastr[0] == '-')) fd_request = 1; else buff_encode_visit(smp_request, request_size, datastr, iget, &hook); optind += hook.got; break; default: break; } } /* * If fd_data is set, and we're writing to the device, we need to * read the data the user wants written from stdin. */ if ((fd_request == 1) && (arglist & CAM_ARG_CMD_OUT)) { ssize_t amt_read; int amt_to_read = request_size; u_int8_t *buf_ptr = smp_request; for (amt_read = 0; amt_to_read > 0; amt_read = read(STDIN_FILENO, buf_ptr, amt_to_read)) { if (amt_read == -1) { warn("error reading data from stdin"); error = 1; goto smpcmd_bailout; } amt_to_read -= amt_read; buf_ptr += amt_read; } } if (((arglist & CAM_ARG_CMD_IN) == 0) || ((arglist & CAM_ARG_CMD_OUT) == 0)) { warnx("%s: need both the request (-r) and response (-R) " "arguments", __func__); error = 1; goto smpcmd_bailout; } flags |= CAM_DEV_QFRZDIS; cam_fill_smpio(&ccb->smpio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*flags*/ flags, /*smp_request*/ smp_request, /*smp_request_len*/ request_size, /*smp_response*/ smp_response, /*smp_response_len*/ response_size, /*timeout*/ timeout ? timeout : 5000); ccb->smpio.flags = SMP_FLAG_NONE; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } } if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (response_size > 0)) { if (fd_response == 0) { buff_decode_visit(smp_response, response_size, datastr, arg_put, NULL); fprintf(stdout, "\n"); } else { ssize_t amt_written; int amt_to_write = response_size; u_int8_t *buf_ptr = smp_response; for (amt_written = 0; (amt_to_write > 0) && (amt_written = write(STDOUT_FILENO, buf_ptr, amt_to_write)) > 0;){ amt_to_write -= amt_written; buf_ptr += amt_written; } if (amt_written == -1) { warn("error writing data to stdout"); error = 1; goto smpcmd_bailout; } else if ((amt_written == 0) && (amt_to_write > 0)) { warnx("only wrote %u bytes out of %u", response_size - amt_to_write, response_size); } } } smpcmd_bailout: if (ccb != NULL) cam_freeccb(ccb); if (smp_request != NULL) free(smp_request); if (smp_response != NULL) free(smp_response); return (error); } static int smpreportgeneral(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_general_request *request = NULL; struct smp_report_general_response *response = NULL; struct sbuf *sb = NULL; int error = 0; int c, long_response = 0; int retval; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); error = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*response)); error = 1; goto bailout; } try_long: smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, /*request_len*/ sizeof(*request), (uint8_t *)response, /*response_len*/ sizeof(*response), /*long_response*/ long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } error = 1; goto bailout; } /* * If the device supports the long response bit, try again and see * if we can get all of the data. */ if ((response->long_response & SMP_RG_LONG_RESPONSE) && (long_response == 0)) { ccb->ccb_h.status = CAM_REQ_INPROG; bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); long_response = 1; goto try_long; } /* * XXX KDM detect and decode SMP errors here. */ sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_general_sbuf(response, sizeof(*response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); if (sb != NULL) sbuf_delete(sb); return (error); } static struct camcontrol_opts phy_ops[] = { {"nop", SMP_PC_PHY_OP_NOP, CAM_ARG_NONE, NULL}, {"linkreset", SMP_PC_PHY_OP_LINK_RESET, CAM_ARG_NONE, NULL}, {"hardreset", SMP_PC_PHY_OP_HARD_RESET, CAM_ARG_NONE, NULL}, {"disable", SMP_PC_PHY_OP_DISABLE, CAM_ARG_NONE, NULL}, {"clearerrlog", SMP_PC_PHY_OP_CLEAR_ERR_LOG, CAM_ARG_NONE, NULL}, {"clearaffiliation", SMP_PC_PHY_OP_CLEAR_AFFILIATON, CAM_ARG_NONE,NULL}, {"sataportsel", SMP_PC_PHY_OP_TRANS_SATA_PSS, CAM_ARG_NONE, NULL}, {"clearitnl", SMP_PC_PHY_OP_CLEAR_STP_ITN_LS, CAM_ARG_NONE, NULL}, {"setdevname", SMP_PC_PHY_OP_SET_ATT_DEV_NAME, CAM_ARG_NONE, NULL}, {NULL, 0, 0, NULL} }; static int smpphycontrol(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_phy_control_request *request = NULL; struct smp_phy_control_response *response = NULL; int long_response = 0; int retval = 0; int phy = -1; uint32_t phy_operation = SMP_PC_PHY_OP_NOP; int phy_op_set = 0; uint64_t attached_dev_name = 0; int dev_name_set = 0; uint32_t min_plr = 0, max_plr = 0; uint32_t pp_timeout_val = 0; int slumber_partial = 0; int set_pp_timeout_val = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'a': case 'A': case 's': case 'S': { int enable = -1; if (strcasecmp(optarg, "enable") == 0) enable = 1; else if (strcasecmp(optarg, "disable") == 0) enable = 2; else { warnx("%s: Invalid argument %s", __func__, optarg); retval = 1; goto bailout; } switch (c) { case 's': slumber_partial |= enable << SMP_PC_SAS_SLUMBER_SHIFT; break; case 'S': slumber_partial |= enable << SMP_PC_SAS_PARTIAL_SHIFT; break; case 'a': slumber_partial |= enable << SMP_PC_SATA_SLUMBER_SHIFT; break; case 'A': slumber_partial |= enable << SMP_PC_SATA_PARTIAL_SHIFT; break; default: warnx("%s: programmer error", __func__); retval = 1; goto bailout; break; /*NOTREACHED*/ } break; } case 'd': attached_dev_name = (uintmax_t)strtoumax(optarg, NULL,0); dev_name_set = 1; break; case 'l': long_response = 1; break; case 'm': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ min_plr = strtoul(optarg, NULL, 0); if ((min_plr == 0) || (min_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, min_plr); retval = 1; goto bailout; } break; case 'M': /* * We don't do extensive checking here, so this * will continue to work when new speeds come out. */ max_plr = strtoul(optarg, NULL, 0); if ((max_plr == 0) || (max_plr > 0xf)) { warnx("%s: invalid link rate %x", __func__, max_plr); retval = 1; goto bailout; } break; case 'o': { camcontrol_optret optreturn; cam_argmask argnums; const char *subopt; if (phy_op_set != 0) { warnx("%s: only one phy operation argument " "(-o) allowed", __func__); retval = 1; goto bailout; } phy_op_set = 1; /* * Allow the user to specify the phy operation * numerically, as well as with a name. This will * future-proof it a bit, so options that are added * in future specs can be used. */ if (isdigit(optarg[0])) { phy_operation = strtoul(optarg, NULL, 0); if ((phy_operation == 0) || (phy_operation > 0xff)) { warnx("%s: invalid phy operation %#x", __func__, phy_operation); retval = 1; goto bailout; } break; } optreturn = getoption(phy_ops, optarg, &phy_operation, &argnums, &subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("%s: ambiguous option %s", __func__, optarg); usage(0); retval = 1; goto bailout; } else if (optreturn == CC_OR_NOT_FOUND) { warnx("%s: option %s not found", __func__, optarg); usage(0); retval = 1; goto bailout; } break; } case 'p': phy = atoi(optarg); break; case 'T': pp_timeout_val = strtoul(optarg, NULL, 0); if (pp_timeout_val > 15) { warnx("%s: invalid partial pathway timeout " "value %u, need a value less than 16", __func__, pp_timeout_val); retval = 1; goto bailout; } set_pp_timeout_val = 1; break; default: break; } } if (phy == -1) { warnx("%s: a PHY (-p phy) argument is required",__func__); retval = 1; goto bailout; } if (((dev_name_set != 0) && (phy_operation != SMP_PC_PHY_OP_SET_ATT_DEV_NAME)) || ((phy_operation == SMP_PC_PHY_OP_SET_ATT_DEV_NAME) && (dev_name_set == 0))) { warnx("%s: -d name and -o setdevname arguments both " "required to set device name", __func__); retval = 1; goto bailout; } request = malloc(sizeof(*request)); if (request == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } response = malloc(sizeof(*response)); if (response == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*request)); retval = 1; goto bailout; } smp_phy_control(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, request, sizeof(*request), (uint8_t *)response, sizeof(*response), long_response, /*expected_exp_change_count*/ 0, phy, phy_operation, (set_pp_timeout_val != 0) ? 1 : 0, attached_dev_name, min_plr, max_plr, slumber_partial, pp_timeout_val, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { /* * Use CAM_EPF_NORMAL so we only get one line of * SMP command decoding. */ cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_NORMAL, stderr); } retval = 1; goto bailout; } /* XXX KDM print out something here for success? */ bailout: if (ccb != NULL) cam_freeccb(ccb); if (request != NULL) free(request); if (response != NULL) free(response); return (retval); } static int smpmaninfo(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; struct smp_report_manuf_info_request request; struct smp_report_manuf_info_response response; struct sbuf *sb = NULL; int long_response = 0; int retval = 0; int c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; default: break; } } bzero(&request, sizeof(request)); bzero(&response, sizeof(response)); smp_report_manuf_info(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, &request, sizeof(request), (uint8_t *)&response, sizeof(response), long_response, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } sb = sbuf_new_auto(); if (sb == NULL) { warnx("%s: error allocating sbuf", __func__); goto bailout; } smp_report_manuf_info_sbuf(&response, sizeof(response), sb); if (sbuf_finish(sb) != 0) { warnx("%s: sbuf_finish", __func__); goto bailout; } printf("%s", sbuf_data(sb)); bailout: if (ccb != NULL) cam_freeccb(ccb); if (sb != NULL) sbuf_delete(sb); return (retval); } static int getdevid(struct cam_devitem *item) { int retval = 0; union ccb *ccb = NULL; struct cam_device *dev; dev = cam_open_btl(item->dev_match.path_id, item->dev_match.target_id, item->dev_match.target_lun, O_RDWR, NULL); if (dev == NULL) { warnx("%s", cam_errbuf); retval = 1; goto bailout; } item->device_id_len = 0; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); retval = 1; goto bailout; } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); /* * On the first try, we just probe for the size of the data, and * then allocate that much memory and try again. */ retry: ccb->ccb_h.func_code = XPT_DEV_ADVINFO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->cdai.flags = CDAI_FLAG_NONE; ccb->cdai.buftype = CDAI_TYPE_SCSI_DEVID; ccb->cdai.bufsiz = item->device_id_len; if (item->device_id_len != 0) ccb->cdai.buf = (uint8_t *)item->device_id; if (cam_send_ccb(dev, ccb) < 0) { warn("%s: error sending XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } if (ccb->ccb_h.status != CAM_REQ_CMP) { warnx("%s: CAM status %#x", __func__, ccb->ccb_h.status); retval = 1; goto bailout; } if (item->device_id_len == 0) { /* * This is our first time through. Allocate the buffer, * and then go back to get the data. */ if (ccb->cdai.provsiz == 0) { warnx("%s: invalid .provsiz field returned with " "XPT_GDEV_ADVINFO CCB", __func__); retval = 1; goto bailout; } item->device_id_len = ccb->cdai.provsiz; item->device_id = malloc(item->device_id_len); if (item->device_id == NULL) { warn("%s: unable to allocate %d bytes", __func__, item->device_id_len); retval = 1; goto bailout; } ccb->ccb_h.status = CAM_REQ_INPROG; goto retry; } bailout: if (dev != NULL) cam_close_device(dev); if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * XXX KDM merge this code with getdevtree()? */ static int buildbusdevlist(struct cam_devlist *devlist) { union ccb ccb; int bufsize, fd = -1; struct dev_match_pattern *patterns; struct cam_devitem *item = NULL; int skip_device = 0; int retval = 0; if ((fd = open(XPT_DEVICE, O_RDWR)) == -1) { warn("couldn't open %s", XPT_DEVICE); return(1); } bzero(&ccb, sizeof(union ccb)); ccb.ccb_h.path_id = CAM_XPT_PATH_ID; ccb.ccb_h.target_id = CAM_TARGET_WILDCARD; ccb.ccb_h.target_lun = CAM_LUN_WILDCARD; ccb.ccb_h.func_code = XPT_DEV_MATCH; bufsize = sizeof(struct dev_match_result) * 100; ccb.cdm.match_buf_len = bufsize; ccb.cdm.matches = (struct dev_match_result *)malloc(bufsize); if (ccb.cdm.matches == NULL) { warnx("can't malloc memory for matches"); close(fd); return(1); } ccb.cdm.num_matches = 0; ccb.cdm.num_patterns = 2; ccb.cdm.pattern_buf_len = sizeof(struct dev_match_pattern) * ccb.cdm.num_patterns; patterns = (struct dev_match_pattern *)malloc(ccb.cdm.pattern_buf_len); if (patterns == NULL) { warnx("can't malloc memory for patterns"); retval = 1; goto bailout; } ccb.cdm.patterns = patterns; bzero(patterns, ccb.cdm.pattern_buf_len); patterns[0].type = DEV_MATCH_DEVICE; patterns[0].pattern.device_pattern.flags = DEV_MATCH_PATH; patterns[0].pattern.device_pattern.path_id = devlist->path_id; patterns[1].type = DEV_MATCH_PERIPH; patterns[1].pattern.periph_pattern.flags = PERIPH_MATCH_PATH; patterns[1].pattern.periph_pattern.path_id = devlist->path_id; /* * We do the ioctl multiple times if necessary, in case there are * more than 100 nodes in the EDT. */ do { unsigned int i; if (ioctl(fd, CAMIOCOMMAND, &ccb) == -1) { warn("error sending CAMIOCOMMAND ioctl"); retval = 1; goto bailout; } if ((ccb.ccb_h.status != CAM_REQ_CMP) || ((ccb.cdm.status != CAM_DEV_MATCH_LAST) && (ccb.cdm.status != CAM_DEV_MATCH_MORE))) { warnx("got CAM error %#x, CDM error %d\n", ccb.ccb_h.status, ccb.cdm.status); retval = 1; goto bailout; } for (i = 0; i < ccb.cdm.num_matches; i++) { switch (ccb.cdm.matches[i].type) { case DEV_MATCH_DEVICE: { struct device_match_result *dev_result; dev_result = &ccb.cdm.matches[i].result.device_result; if (dev_result->flags & DEV_RESULT_UNCONFIGURED) { skip_device = 1; break; } else skip_device = 0; item = malloc(sizeof(*item)); if (item == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*item)); retval = 1; goto bailout; } bzero(item, sizeof(*item)); bcopy(dev_result, &item->dev_match, sizeof(*dev_result)); STAILQ_INSERT_TAIL(&devlist->dev_queue, item, links); if (getdevid(item) != 0) { retval = 1; goto bailout; } break; } case DEV_MATCH_PERIPH: { struct periph_match_result *periph_result; periph_result = &ccb.cdm.matches[i].result.periph_result; if (skip_device != 0) break; item->num_periphs++; item->periph_matches = realloc( item->periph_matches, item->num_periphs * sizeof(struct periph_match_result)); if (item->periph_matches == NULL) { warn("%s: error allocating periph " "list", __func__); retval = 1; goto bailout; } bcopy(periph_result, &item->periph_matches[ item->num_periphs - 1], sizeof(*periph_result)); break; } default: fprintf(stderr, "%s: unexpected match " "type %d\n", __func__, ccb.cdm.matches[i].type); retval = 1; goto bailout; break; /*NOTREACHED*/ } } } while ((ccb.ccb_h.status == CAM_REQ_CMP) && (ccb.cdm.status == CAM_DEV_MATCH_MORE)); bailout: if (fd != -1) close(fd); free(patterns); free(ccb.cdm.matches); if (retval != 0) freebusdevlist(devlist); return (retval); } static void freebusdevlist(struct cam_devlist *devlist) { struct cam_devitem *item, *item2; STAILQ_FOREACH_SAFE(item, &devlist->dev_queue, links, item2) { STAILQ_REMOVE(&devlist->dev_queue, item, cam_devitem, links); free(item->device_id); free(item->periph_matches); free(item); } } static struct cam_devitem * findsasdevice(struct cam_devlist *devlist, uint64_t sasaddr) { struct cam_devitem *item; STAILQ_FOREACH(item, &devlist->dev_queue, links) { struct scsi_vpd_id_descriptor *idd; /* * XXX KDM look for LUN IDs as well? */ idd = scsi_get_devid(item->device_id, item->device_id_len, scsi_devid_is_sas_target); if (idd == NULL) continue; if (scsi_8btou64(idd->identifier) == sasaddr) return (item); } return (NULL); } static int smpphylist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { struct smp_report_general_request *rgrequest = NULL; struct smp_report_general_response *rgresponse = NULL; struct smp_discover_request *disrequest = NULL; struct smp_discover_response *disresponse = NULL; struct cam_devlist devlist; union ccb *ccb; int long_response = 0; int num_phys = 0; int quiet = 0; int retval; int i, c; /* * Note that at the moment we don't support sending SMP CCBs to * devices that aren't probed by CAM. */ ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating CCB", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); STAILQ_INIT(&devlist.dev_queue); rgrequest = malloc(sizeof(*rgrequest)); if (rgrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgrequest)); retval = 1; goto bailout; } rgresponse = malloc(sizeof(*rgresponse)); if (rgresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*rgresponse)); retval = 1; goto bailout; } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': long_response = 1; break; case 'q': quiet = 1; break; default: break; } } smp_report_general(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, rgrequest, /*request_len*/ sizeof(*rgrequest), (uint8_t *)rgresponse, /*response_len*/ sizeof(*rgresponse), /*long_response*/ long_response, timeout); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (((retval = cam_send_ccb(device, ccb)) < 0) || ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } num_phys = rgresponse->num_phys; if (num_phys == 0) { if (quiet == 0) fprintf(stdout, "%s: No Phys reported\n", __func__); retval = 1; goto bailout; } devlist.path_id = device->path_id; retval = buildbusdevlist(&devlist); if (retval != 0) goto bailout; if (quiet == 0) { fprintf(stdout, "%d PHYs:\n", num_phys); fprintf(stdout, "PHY Attached SAS Address\n"); } disrequest = malloc(sizeof(*disrequest)); if (disrequest == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disrequest)); retval = 1; goto bailout; } disresponse = malloc(sizeof(*disresponse)); if (disresponse == NULL) { warn("%s: unable to allocate %zd bytes", __func__, sizeof(*disresponse)); retval = 1; goto bailout; } for (i = 0; i < num_phys; i++) { struct cam_devitem *item; struct device_match_result *dev_match; char vendor[16], product[48], revision[16]; char tmpstr[256]; int j; bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); ccb->ccb_h.status = CAM_REQ_INPROG; ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; smp_discover(&ccb->smpio, retry_count, /*cbfcnp*/ NULL, disrequest, sizeof(*disrequest), (uint8_t *)disresponse, sizeof(*disresponse), long_response, /*ignore_zone_group*/ 0, /*phy*/ i, timeout); if (((retval = cam_send_ccb(device, ccb)) < 0) || (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (disresponse->function_result != SMP_FR_PHY_VACANT))) { const char warnstr[] = "error sending command"; if (retval < 0) warn(warnstr); else warnx(warnstr); if (arglist & CAM_ARG_VERBOSE) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); } retval = 1; goto bailout; } if (disresponse->function_result == SMP_FR_PHY_VACANT) { if (quiet == 0) fprintf(stdout, "%3d \n", i); continue; } if (disresponse->attached_device == SMP_DIS_AD_TYPE_NONE) { item = NULL; } else { item = findsasdevice(&devlist, scsi_8btou64(disresponse->attached_sas_address)); } if ((quiet == 0) || (item != NULL)) { fprintf(stdout, "%3d 0x%016jx", i, (uintmax_t)scsi_8btou64( disresponse->attached_sas_address)); if (item == NULL) { fprintf(stdout, "\n"); continue; } } else if (quiet != 0) continue; dev_match = &item->dev_match; if (dev_match->protocol == PROTO_SCSI) { cam_strvis(vendor, dev_match->inq_data.vendor, sizeof(dev_match->inq_data.vendor), sizeof(vendor)); cam_strvis(product, dev_match->inq_data.product, sizeof(dev_match->inq_data.product), sizeof(product)); cam_strvis(revision, dev_match->inq_data.revision, sizeof(dev_match->inq_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s %s>", vendor, product, revision); } else if ((dev_match->protocol == PROTO_ATA) || (dev_match->protocol == PROTO_SATAPM)) { cam_strvis(product, dev_match->ident_data.model, sizeof(dev_match->ident_data.model), sizeof(product)); cam_strvis(revision, dev_match->ident_data.revision, sizeof(dev_match->ident_data.revision), sizeof(revision)); sprintf(tmpstr, "<%s %s>", product, revision); } else { sprintf(tmpstr, "<>"); } fprintf(stdout, " %-33s ", tmpstr); /* * If we have 0 periphs, that's a bug... */ if (item->num_periphs == 0) { fprintf(stdout, "\n"); continue; } fprintf(stdout, "("); for (j = 0; j < item->num_periphs; j++) { if (j > 0) fprintf(stdout, ","); fprintf(stdout, "%s%d", item->periph_matches[j].periph_name, item->periph_matches[j].unit_number); } fprintf(stdout, ")\n"); } bailout: if (ccb != NULL) cam_freeccb(ccb); free(rgrequest); free(rgresponse); free(disrequest); free(disresponse); freebusdevlist(&devlist); return (retval); } static int atapm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int t = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 't': t = atoi(optarg); break; default: break; } } if (strcmp(argv[1], "idle") == 0) { if (t == -1) cmd = ATA_IDLE_IMMEDIATE; else cmd = ATA_IDLE_CMD; } else if (strcmp(argv[1], "standby") == 0) { if (t == -1) cmd = ATA_STANDBY_IMMEDIATE; else cmd = ATA_STANDBY_CMD; } else { cmd = ATA_SLEEP; t = -1; } if (t < 0) sc = 0; else if (t <= (240 * 5)) sc = (t + 4) / 5; else if (t <= (252 * 5)) /* special encoding for 21 minutes */ sc = 252; else if (t <= (11 * 30 * 60)) sc = (t - 1) / (30 * 60) + 241; else sc = 253; retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/cmd, /*features*/0, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } static int ataaxm(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout) { union ccb *ccb; int retval = 0; int l = -1; int c; u_char cmd, sc; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'l': l = atoi(optarg); break; default: break; } } sc = 0; if (strcmp(argv[1], "apm") == 0) { if (l == -1) cmd = 0x85; else { cmd = 0x05; sc = l; } } else /* aam */ { if (l == -1) cmd = 0xC2; else { cmd = 0x42; sc = l; } } retval = ata_do_28bit_cmd(device, ccb, /*retries*/retry_count, /*flags*/CAM_DIR_NONE, /*protocol*/AP_PROTO_NON_DATA, /*tag_action*/MSG_SIMPLE_Q_TAG, /*command*/ATA_SETFEATURES, /*features*/cmd, /*lba*/0, /*sector_count*/sc, /*data_ptr*/NULL, /*dxfer_len*/0, /*timeout*/timeout ? timeout : 30 * 1000, /*quiet*/1); cam_freeccb(ccb); return (retval); } int scsigetopcodes(struct cam_device *device, int opcode_set, int opcode, int show_sa_errors, int sa_set, int service_action, int timeout_desc, int retry_count, int timeout, int verbosemode, uint32_t *fill_len, uint8_t **data_ptr) { union ccb *ccb = NULL; uint8_t *buf = NULL; uint32_t alloc_len = 0, num_opcodes; uint32_t valid_len = 0; uint32_t avail_len = 0; struct scsi_report_supported_opcodes_all *all_hdr; struct scsi_report_supported_opcodes_one *one; int options = 0; int retval = 0; /* * Make it clear that we haven't yet allocated or filled anything. */ *fill_len = 0; *data_ptr = NULL; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); if (opcode_set != 0) { options |= RSO_OPTIONS_OC; num_opcodes = 1; alloc_len = sizeof(*one) + CAM_MAX_CDBLEN; } else { num_opcodes = 256; alloc_len = sizeof(*all_hdr) + (num_opcodes * sizeof(struct scsi_report_supported_opcodes_descr)); } if (timeout_desc != 0) { options |= RSO_RCTD; alloc_len += num_opcodes * sizeof(struct scsi_report_supported_opcodes_timeout); } if (sa_set != 0) { options |= RSO_OPTIONS_OC_SA; if (show_sa_errors != 0) options &= ~RSO_OPTIONS_OC; } retry_alloc: if (buf != NULL) { free(buf); buf = NULL; } buf = malloc(alloc_len); if (buf == NULL) { warn("Unable to allocate %u bytes", alloc_len); retval = 1; goto bailout; } bzero(buf, alloc_len); scsi_report_supported_opcodes(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*options*/ options, /*req_opcode*/ opcode, /*req_service_action*/ service_action, /*data_ptr*/ buf, /*dxfer_len*/ alloc_len, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 10000); ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(device, ccb) < 0) { perror("error sending REPORT SUPPORTED OPERATION CODES"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (verbosemode != 0) cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } valid_len = ccb->csio.dxfer_len - ccb->csio.resid; if (((options & RSO_OPTIONS_MASK) == RSO_OPTIONS_ALL) && (valid_len >= sizeof(*all_hdr))) { all_hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(all_hdr->length) + sizeof(*all_hdr); } else if (((options & RSO_OPTIONS_MASK) != RSO_OPTIONS_ALL) && (valid_len >= sizeof(*one))) { uint32_t cdb_length; one = (struct scsi_report_supported_opcodes_one *)buf; cdb_length = scsi_2btoul(one->cdb_length); avail_len = sizeof(*one) + cdb_length; if (one->support & RSO_ONE_CTDP) { struct scsi_report_supported_opcodes_timeout *td; td = (struct scsi_report_supported_opcodes_timeout *) &buf[avail_len]; if (valid_len >= (avail_len + sizeof(td->length))) { avail_len += scsi_2btoul(td->length) + sizeof(td->length); } else { avail_len += sizeof(*td); } } } /* * avail_len could be zero if we didn't get enough data back from * thet target to determine */ if ((avail_len != 0) && (avail_len > valid_len)) { alloc_len = avail_len; goto retry_alloc; } *fill_len = valid_len; *data_ptr = buf; bailout: if (retval != 0) free(buf); cam_freeccb(ccb); return (retval); } static int scsiprintoneopcode(struct cam_device *device, int req_opcode, int sa_set, int req_sa, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint32_t cdb_len = 0, td_len = 0; const char *op_desc = NULL; unsigned int i; int retval = 0; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If we don't have the full single opcode descriptor, no point in * continuing. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_length)) { warnx("Only %u bytes returned, not enough to verify support", valid_len); retval = 1; goto bailout; } op_desc = scsi_op_desc(req_opcode, &device->inq_data); printf("%s (0x%02x)", op_desc != NULL ? op_desc : "UNKNOWN", req_opcode); if (sa_set != 0) printf(", SA 0x%x", req_sa); printf(": "); switch (one->support & RSO_ONE_SUP_MASK) { case RSO_ONE_SUP_UNAVAIL: printf("No command support information currently available\n"); break; case RSO_ONE_SUP_NOT_SUP: printf("Command not supported\n"); retval = 1; goto bailout; break; /*NOTREACHED*/ case RSO_ONE_SUP_AVAIL: printf("Command is supported, complies with a SCSI standard\n"); break; case RSO_ONE_SUP_VENDOR: printf("Command is supported, vendor-specific " "implementation\n"); break; default: printf("Unknown command support flags 0x%#x\n", one->support & RSO_ONE_SUP_MASK); break; } /* * If we don't have the CDB length, it isn't exactly an error, the * command probably isn't supported. */ if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one, cdb_usage)) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); /* * If our valid data doesn't include the full reported length, * return. The caller should have detected this and adjusted his * allocation length to get all of the available data. */ if (valid_len < sizeof(*one) + cdb_len) { retval = 1; goto bailout; } /* * If all we have is the opcode, there is no point in printing out * the usage bitmap. */ if (cdb_len <= 1) { retval = 1; goto bailout; } printf("CDB usage bitmap:"); for (i = 0; i < cdb_len; i++) { printf(" %02x", one->cdb_usage[i]); } printf("\n"); /* * If we don't have a timeout descriptor, we're done. */ if ((one->support & RSO_ONE_CTDP) == 0) goto bailout; /* * If we don't have enough valid length to include the timeout * descriptor length, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + sizeof(td->length))) goto bailout; td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); /* * If we don't have the full timeout descriptor, we're done. */ if (td_len < sizeof(*td)) goto bailout; /* * If we don't have enough valid length to contain the full timeout * descriptor, we're done. */ if (valid_len < (sizeof(*one) + cdb_len + td_len)) goto bailout; printf("Timeout information:\n"); printf("Command-specific: 0x%02x\n", td->cmd_specific); printf("Nominal timeout: %u seconds\n", scsi_4btoul(td->nominal_time)); printf("Recommended timeout: %u seconds\n", scsi_4btoul(td->recommended_time)); bailout: return (retval); } static int scsiprintopcodes(struct cam_device *device, int td_req, uint8_t *buf, uint32_t valid_len) { struct scsi_report_supported_opcodes_all *hdr; struct scsi_report_supported_opcodes_descr *desc; uint32_t avail_len = 0, used_len = 0; uint8_t *cur_ptr; int retval = 0; if (valid_len < sizeof(*hdr)) { warnx("%s: not enough returned data (%u bytes) opcode list", __func__, valid_len); retval = 1; goto bailout; } hdr = (struct scsi_report_supported_opcodes_all *)buf; avail_len = scsi_4btoul(hdr->length); avail_len += sizeof(hdr->length); /* * Take the lesser of the amount of data the drive claims is * available, and the amount of data the HBA says was returned. */ avail_len = MIN(avail_len, valid_len); used_len = sizeof(hdr->length); printf("%-6s %4s %8s ", "Opcode", "SA", "CDB len" ); if (td_req != 0) printf("%5s %6s %6s ", "CS", "Nom", "Rec"); printf(" Description\n"); while ((avail_len - used_len) > sizeof(*desc)) { struct scsi_report_supported_opcodes_timeout *td; uint32_t td_len; const char *op_desc = NULL; cur_ptr = &buf[used_len]; desc = (struct scsi_report_supported_opcodes_descr *)cur_ptr; op_desc = scsi_op_desc(desc->opcode, &device->inq_data); if (op_desc == NULL) op_desc = "UNKNOWN"; printf("0x%02x %#4x %8u ", desc->opcode, scsi_2btoul(desc->service_action), scsi_2btoul(desc->cdb_length)); used_len += sizeof(*desc); if ((desc->flags & RSO_CTDP) == 0) { printf(" %s\n", op_desc); continue; } /* * If we don't have enough space to fit a timeout * descriptor, then we're done. */ if (avail_len - used_len < sizeof(*td)) { used_len = avail_len; printf(" %s\n", op_desc); continue; } cur_ptr = &buf[used_len]; td = (struct scsi_report_supported_opcodes_timeout *)cur_ptr; td_len = scsi_2btoul(td->length); td_len += sizeof(td->length); used_len += td_len; /* * If the given timeout descriptor length is less than what * we understand, skip it. */ if (td_len < sizeof(*td)) { printf(" %s\n", op_desc); continue; } printf(" 0x%02x %6u %6u %s\n", td->cmd_specific, scsi_4btoul(td->nominal_time), scsi_4btoul(td->recommended_time), op_desc); } bailout: return (retval); } static int scsiopcodes(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbosemode) { int c; uint32_t opcode = 0, service_action = 0; int td_set = 0, opcode_set = 0, sa_set = 0; int show_sa_errors = 1; uint32_t valid_len = 0; uint8_t *buf = NULL; char *endptr; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'N': show_sa_errors = 0; break; case 'o': opcode = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid opcode \"%s\", must be a number", optarg); retval = 1; goto bailout; } if (opcode > 0xff) { warnx("Invalid opcode 0x%#x, must be between" "0 and 0xff inclusive", opcode); retval = 1; goto bailout; } opcode_set = 1; break; case 's': service_action = strtoul(optarg, &endptr, 0); if (*endptr != '\0') { warnx("Invalid service action \"%s\", must " "be a number", optarg); retval = 1; goto bailout; } if (service_action > 0xffff) { warnx("Invalid service action 0x%#x, must " "be between 0 and 0xffff inclusive", service_action); retval = 1; } sa_set = 1; break; case 'T': td_set = 1; break; default: break; } } if ((sa_set != 0) && (opcode_set == 0)) { warnx("You must specify an opcode with -o if a service " "action is given"); retval = 1; goto bailout; } retval = scsigetopcodes(device, opcode_set, opcode, show_sa_errors, sa_set, service_action, td_set, retry_count, timeout, verbosemode, &valid_len, &buf); if (retval != 0) goto bailout; if ((opcode_set != 0) || (sa_set != 0)) { retval = scsiprintoneopcode(device, opcode, sa_set, service_action, buf, valid_len); } else { retval = scsiprintopcodes(device, td_set, buf, valid_len); } bailout: free(buf); return (retval); } #endif /* MINIMALISTIC */ static int scsireprobe(struct cam_device *device) { union ccb *ccb; int retval = 0; ccb = cam_getccb(device); if (ccb == NULL) { warnx("%s: error allocating ccb", __func__); return (1); } bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); ccb->ccb_h.func_code = XPT_REPROBE_LUN; if (cam_send_ccb(device, ccb) < 0) { warn("error sending XPT_REPROBE_LUN CCB"); retval = 1; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } bailout: cam_freeccb(ccb); return (retval); } void usage(int printlong) { fprintf(printlong ? stdout : stderr, "usage: camcontrol [device id][generic args][command args]\n" " camcontrol devlist [-b] [-v]\n" #ifndef MINIMALISTIC " camcontrol periphlist [dev_id][-n dev_name] [-u unit]\n" " camcontrol tur [dev_id][generic args]\n" " camcontrol inquiry [dev_id][generic args] [-D] [-S] [-R]\n" " camcontrol identify [dev_id][generic args] [-v]\n" " camcontrol reportluns [dev_id][generic args] [-c] [-l] [-r report]\n" " camcontrol readcap [dev_id][generic args] [-b] [-h] [-H] [-N]\n" " [-q] [-s]\n" " camcontrol start [dev_id][generic args]\n" " camcontrol stop [dev_id][generic args]\n" " camcontrol load [dev_id][generic args]\n" " camcontrol eject [dev_id][generic args]\n" " camcontrol reprobe [dev_id][generic args]\n" #endif /* MINIMALISTIC */ " camcontrol rescan \n" " camcontrol reset \n" #ifndef MINIMALISTIC " camcontrol defects [dev_id][generic args] <-f format> [-P][-G]\n" " [-q][-s][-S offset][-X]\n" " camcontrol modepage [dev_id][generic args] <-m page | -l>\n" " [-P pagectl][-e | -b][-d]\n" " camcontrol cmd [dev_id][generic args]\n" " <-a cmd [args] | -c cmd [args]>\n" " [-d] [-f] [-i len fmt|-o len fmt [args]] [-r fmt]\n" " camcontrol smpcmd [dev_id][generic args]\n" " <-r len fmt [args]> <-R len fmt [args]>\n" " camcontrol smprg [dev_id][generic args][-l]\n" " camcontrol smppc [dev_id][generic args] <-p phy> [-l]\n" " [-o operation][-d name][-m rate][-M rate]\n" " [-T pp_timeout][-a enable|disable]\n" " [-A enable|disable][-s enable|disable]\n" " [-S enable|disable]\n" " camcontrol smpphylist [dev_id][generic args][-l][-q]\n" " camcontrol smpmaninfo [dev_id][generic args][-l]\n" " camcontrol debug [-I][-P][-T][-S][-X][-c]\n" " \n" " camcontrol tags [dev_id][generic args] [-N tags] [-q] [-v]\n" " camcontrol negotiate [dev_id][generic args] [-a][-c]\n" " [-D ][-M mode][-O offset]\n" " [-q][-R syncrate][-v][-T ]\n" " [-U][-W bus_width]\n" " camcontrol format [dev_id][generic args][-q][-r][-w][-y]\n" " camcontrol sanitize [dev_id][generic args]\n" " [-a overwrite|block|crypto|exitfailure]\n" " [-c passes][-I][-P pattern][-q][-U][-r][-w]\n" " [-y]\n" " camcontrol idle [dev_id][generic args][-t time]\n" " camcontrol standby [dev_id][generic args][-t time]\n" " camcontrol sleep [dev_id][generic args]\n" " camcontrol apm [dev_id][generic args][-l level]\n" " camcontrol aam [dev_id][generic args][-l level]\n" " camcontrol fwdownload [dev_id][generic args] <-f fw_image> [-q]\n" " [-s][-y]\n" " camcontrol security [dev_id][generic args]\n" " <-d pwd | -e pwd | -f | -h pwd | -k pwd>\n" " [-l ] [-q] [-s pwd] [-T timeout]\n" " [-U ] [-y]\n" " camcontrol hpa [dev_id][generic args] [-f] [-l] [-P] [-p pwd]\n" " [-q] [-s max_sectors] [-U pwd] [-y]\n" " camcontrol persist [dev_id][generic args] <-i action|-o action>\n" " [-a][-I tid][-k key][-K sa_key][-p][-R rtp]\n" " [-s scope][-S][-T type][-U]\n" " camcontrol attrib [dev_id][generic args] <-r action|-w attr>\n" " [-a attr_num][-c][-e elem][-F form1,form1]\n" " [-p part][-s start][-T type][-V vol]\n" " camcontrol opcodes [dev_id][generic args][-o opcode][-s SA]\n" " [-N][-T]\n" +" camcontrol zone [dev_id][generic args]<-c cmd> [-a] [-l LBA]\n" +" [-o rep_opts] [-P print_opts]\n" +" camcontrol epc [dev_id][generic_args]<-c cmd> [-d] [-D] [-e]\n" +" [-H] [-p power_cond] [-P] [-r rst_src] [-s]\n" +" [-S power_src] [-T timer]\n" #endif /* MINIMALISTIC */ " camcontrol help\n"); if (!printlong) return; #ifndef MINIMALISTIC fprintf(stdout, "Specify one of the following options:\n" "devlist list all CAM devices\n" "periphlist list all CAM peripheral drivers attached to a device\n" "tur send a test unit ready to the named device\n" "inquiry send a SCSI inquiry command to the named device\n" "identify send a ATA identify command to the named device\n" "reportluns send a SCSI report luns command to the device\n" "readcap send a SCSI read capacity command to the device\n" "start send a Start Unit command to the device\n" "stop send a Stop Unit command to the device\n" "load send a Start Unit command to the device with the load bit set\n" "eject send a Stop Unit command to the device with the eject bit set\n" "reprobe update capacity information of the given device\n" "rescan rescan all busses, the given bus, or bus:target:lun\n" "reset reset all busses, the given bus, or bus:target:lun\n" "defects read the defect list of the specified device\n" "modepage display or edit (-e) the given mode page\n" "cmd send the given SCSI command, may need -i or -o as well\n" "smpcmd send the given SMP command, requires -o and -i\n" "smprg send the SMP Report General command\n" "smppc send the SMP PHY Control command, requires -p\n" "smpphylist display phys attached to a SAS expander\n" "smpmaninfo send the SMP Report Manufacturer Info command\n" "debug turn debugging on/off for a bus, target, or lun, or all devices\n" "tags report or set the number of transaction slots for a device\n" "negotiate report or set device negotiation parameters\n" "format send the SCSI FORMAT UNIT command to the named device\n" "sanitize send the SCSI SANITIZE command to the named device\n" "idle send the ATA IDLE command to the named device\n" "standby send the ATA STANDBY command to the named device\n" "sleep send the ATA SLEEP command to the named device\n" "fwdownload program firmware of the named device with the given image\n" "security report or send ATA security commands to the named device\n" "persist send the SCSI PERSISTENT RESERVE IN or OUT commands\n" "attrib send the SCSI READ or WRITE ATTRIBUTE commands\n" "opcodes send the SCSI REPORT SUPPORTED OPCODES command\n" +"zone manage Zoned Block (Shingled) devices\n" +"epc send ATA Extended Power Conditions commands\n" "help this message\n" "Device Identifiers:\n" "bus:target specify the bus and target, lun defaults to 0\n" "bus:target:lun specify the bus, target and lun\n" "deviceUNIT specify the device name, like \"da4\" or \"cd2\"\n" "Generic arguments:\n" "-v be verbose, print out sense information\n" "-t timeout command timeout in seconds, overrides default timeout\n" "-n dev_name specify device name, e.g. \"da\", \"cd\"\n" "-u unit specify unit number, e.g. \"0\", \"5\"\n" "-E have the kernel attempt to perform SCSI error recovery\n" "-C count specify the SCSI command retry count (needs -E to work)\n" "modepage arguments:\n" "-l list all available mode pages\n" "-m page specify the mode page to view or edit\n" "-e edit the specified mode page\n" "-b force view to binary mode\n" "-d disable block descriptors for mode sense\n" "-P pgctl page control field 0-3\n" "defects arguments:\n" "-f format specify defect list format (block, bfi or phys)\n" "-G get the grown defect list\n" "-P get the permanent defect list\n" "inquiry arguments:\n" "-D get the standard inquiry data\n" "-S get the serial number\n" "-R get the transfer rate, etc.\n" "reportluns arguments:\n" "-c only report a count of available LUNs\n" "-l only print out luns, and not a count\n" "-r specify \"default\", \"wellknown\" or \"all\"\n" "readcap arguments\n" "-b only report the blocksize\n" "-h human readable device size, base 2\n" "-H human readable device size, base 10\n" "-N print the number of blocks instead of last block\n" "-q quiet, print numbers only\n" "-s only report the last block/device size\n" "cmd arguments:\n" "-c cdb [args] specify the SCSI CDB\n" "-i len fmt specify input data and input data format\n" "-o len fmt [args] specify output data and output data fmt\n" "smpcmd arguments:\n" "-r len fmt [args] specify the SMP command to be sent\n" "-R len fmt [args] specify SMP response format\n" "smprg arguments:\n" "-l specify the long response format\n" "smppc arguments:\n" "-p phy specify the PHY to operate on\n" "-l specify the long request/response format\n" "-o operation specify the phy control operation\n" "-d name set the attached device name\n" "-m rate set the minimum physical link rate\n" "-M rate set the maximum physical link rate\n" "-T pp_timeout set the partial pathway timeout value\n" "-a enable|disable enable or disable SATA slumber\n" "-A enable|disable enable or disable SATA partial phy power\n" "-s enable|disable enable or disable SAS slumber\n" "-S enable|disable enable or disable SAS partial phy power\n" "smpphylist arguments:\n" "-l specify the long response format\n" "-q only print phys with attached devices\n" "smpmaninfo arguments:\n" "-l specify the long response format\n" "debug arguments:\n" "-I CAM_DEBUG_INFO -- scsi commands, errors, data\n" "-T CAM_DEBUG_TRACE -- routine flow tracking\n" "-S CAM_DEBUG_SUBTRACE -- internal routine command flow\n" "-c CAM_DEBUG_CDB -- print out SCSI CDBs only\n" "tags arguments:\n" "-N tags specify the number of tags to use for this device\n" "-q be quiet, don't report the number of tags\n" "-v report a number of tag-related parameters\n" "negotiate arguments:\n" "-a send a test unit ready after negotiation\n" "-c report/set current negotiation settings\n" "-D \"enable\" or \"disable\" disconnection\n" "-M mode set ATA mode\n" "-O offset set command delay offset\n" "-q be quiet, don't report anything\n" "-R syncrate synchronization rate in MHz\n" "-T \"enable\" or \"disable\" tagged queueing\n" "-U report/set user negotiation settings\n" "-W bus_width set the bus width in bits (8, 16 or 32)\n" "-v also print a Path Inquiry CCB for the controller\n" "format arguments:\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-w don't send immediate format command\n" "-y don't ask any questions\n" "sanitize arguments:\n" "-a operation operation mode: overwrite, block, crypto or exitfailure\n" "-c passes overwrite passes to perform (1 to 31)\n" "-I invert overwrite pattern after each pass\n" "-P pattern path to overwrite pattern file\n" "-q be quiet, don't print status messages\n" "-r run in report only mode\n" "-U run operation in unrestricted completion exit mode\n" "-w don't send immediate sanitize command\n" "-y don't ask any questions\n" "idle/standby arguments:\n" "-t number of seconds before respective state.\n" "fwdownload arguments:\n" "-f fw_image path to firmware image file\n" "-q don't print informational messages, only errors\n" "-s run in simulation mode\n" "-v print info for every firmware segment sent to device\n" "-y don't ask any questions\n" "security arguments:\n" "-d pwd disable security using the given password for the selected\n" " user\n" "-e pwd erase the device using the given pwd for the selected user\n" "-f freeze the security configuration of the specified device\n" "-h pwd enhanced erase the device using the given pwd for the\n" " selected user\n" "-k pwd unlock the device using the given pwd for the selected\n" " user\n" "-l specifies which security level to set: high or maximum\n" "-q be quiet, do not print any status messages\n" "-s pwd password the device (enable security) using the given\n" " pwd for the selected user\n" "-T timeout overrides the timeout (seconds) used for erase operation\n" "-U specifies which user to set: user or master\n" "-y don't ask any questions\n" "hpa arguments:\n" "-f freeze the HPA configuration of the device\n" "-l lock the HPA configuration of the device\n" "-P make the HPA max sectors persist\n" "-p pwd Set the HPA configuration password required for unlock\n" " calls\n" "-q be quiet, do not print any status messages\n" "-s sectors configures the maximum user accessible sectors of the\n" " device\n" "-U pwd unlock the HPA configuration of the device\n" "-y don't ask any questions\n" "persist arguments:\n" "-i action specify read_keys, read_reservation, report_cap, or\n" " read_full_status\n" "-o action specify register, register_ignore, reserve, release,\n" " clear, preempt, preempt_abort, register_move, replace_lost\n" "-a set the All Target Ports (ALL_TG_PT) bit\n" "-I tid specify a Transport ID, e.g.: sas,0x1234567812345678\n" "-k key specify the Reservation Key\n" "-K sa_key specify the Service Action Reservation Key\n" "-p set the Activate Persist Through Power Loss bit\n" "-R rtp specify the Relative Target Port\n" "-s scope specify the scope: lun, extent, element or a number\n" "-S specify Transport ID for register, requires -I\n" "-T res_type specify the reservation type: read_shared, wr_ex, rd_ex,\n" " ex_ac, wr_ex_ro, ex_ac_ro, wr_ex_ar, ex_ac_ar\n" "-U unregister the current initiator for register_move\n" "attrib arguments:\n" "-r action specify attr_values, attr_list, lv_list, part_list, or\n" " supp_attr\n" "-w attr specify an attribute to write, one -w argument per attr\n" "-a attr_num only display this attribute number\n" "-c get cached attributes\n" "-e elem_addr request attributes for the given element in a changer\n" "-F form1,form2 output format, comma separated list: text_esc, text_raw,\n" " nonascii_esc, nonascii_trim, nonascii_raw, field_all,\n" " field_none, field_desc, field_num, field_size, field_rw\n" "-p partition request attributes for the given partition\n" "-s start_attr request attributes starting at the given number\n" "-T elem_type specify the element type (used with -e)\n" "-V logical_vol specify the logical volume ID\n" "opcodes arguments:\n" "-o opcode specify the individual opcode to list\n" "-s service_action specify the service action for the opcode\n" "-N do not return SCSI error for unsupported SA\n" "-T request nominal and recommended timeout values\n" +"zone arguments:\n" +"-c cmd required: rz, open, close, finish, or rwp\n" +"-a apply the action to all zones\n" +"-l LBA specify the zone starting LBA\n" +"-o rep_opts report zones options: all, empty, imp_open, exp_open,\n" +" closed, full, ro, offline, reset, nonseq, nonwp\n" +"-P print_opt report zones printing: normal, summary, script\n" +"epc arguments:\n" +"-c cmd required: restore, goto, timer, state, enable, disable,\n" +" source, status, list\n" +"-d disable power mode (timer, state)\n" +"-D delayed entry (goto)\n" +"-e enable power mode (timer, state)\n" +"-H hold power mode (goto)\n" +"-p power_cond Idle_a, Idle_b, Idle_c, Standby_y, Standby_z (timer,\n" +" state, goto)\n" +"-P only display power mode (status)\n" +"-r rst_src restore settings from: default, saved (restore)\n" +"-s save mode (timer, state, restore)\n" +"-S power_src set power source: battery, nonbattery (source)\n" +"-T timer set timer, seconds, .1 sec resolution (timer)\n" ); #endif /* MINIMALISTIC */ } int main(int argc, char **argv) { int c; char *device = NULL; int unit = 0; struct cam_device *cam_dev = NULL; int timeout = 0, retry_count = 1; camcontrol_optret optreturn; char *tstr; const char *mainopt = "C:En:t:u:v"; const char *subopt = NULL; char combinedopt[256]; int error = 0, optstart = 2; int devopen = 1; #ifndef MINIMALISTIC path_id_t bus; target_id_t target; lun_id_t lun; #endif /* MINIMALISTIC */ cmdlist = CAM_CMD_NONE; arglist = CAM_ARG_NONE; if (argc < 2) { usage(0); exit(1); } /* * Get the base option. */ optreturn = getoption(option_table,argv[1], &cmdlist, &arglist,&subopt); if (optreturn == CC_OR_AMBIGUOUS) { warnx("ambiguous option %s", argv[1]); usage(0); exit(1); } else if (optreturn == CC_OR_NOT_FOUND) { warnx("option %s not found", argv[1]); usage(0); exit(1); } /* * Ahh, getopt(3) is a pain. * * This is a gross hack. There really aren't many other good * options (excuse the pun) for parsing options in a situation like * this. getopt is kinda braindead, so you end up having to run * through the options twice, and give each invocation of getopt * the option string for the other invocation. * * You would think that you could just have two groups of options. * The first group would get parsed by the first invocation of * getopt, and the second group would get parsed by the second * invocation of getopt. It doesn't quite work out that way. When * the first invocation of getopt finishes, it leaves optind pointing * to the argument _after_ the first argument in the second group. * So when the second invocation of getopt comes around, it doesn't * recognize the first argument it gets and then bails out. * * A nice alternative would be to have a flag for getopt that says * "just keep parsing arguments even when you encounter an unknown * argument", but there isn't one. So there's no real clean way to * easily parse two sets of arguments without having one invocation * of getopt know about the other. * * Without this hack, the first invocation of getopt would work as * long as the generic arguments are first, but the second invocation * (in the subfunction) would fail in one of two ways. In the case * where you don't set optreset, it would fail because optind may be * pointing to the argument after the one it should be pointing at. * In the case where you do set optreset, and reset optind, it would * fail because getopt would run into the first set of options, which * it doesn't understand. * * All of this would "sort of" work if you could somehow figure out * whether optind had been incremented one option too far. The * mechanics of that, however, are more daunting than just giving * both invocations all of the expect options for either invocation. * * Needless to say, I wouldn't mind if someone invented a better * (non-GPL!) command line parsing interface than getopt. I * wouldn't mind if someone added more knobs to getopt to make it * work better. Who knows, I may talk myself into doing it someday, * if the standards weenies let me. As it is, it just leads to * hackery like this and causes people to avoid it in some cases. * * KDM, September 8th, 1998 */ if (subopt != NULL) sprintf(combinedopt, "%s%s", mainopt, subopt); else sprintf(combinedopt, "%s", mainopt); /* * For these options we do not parse optional device arguments and * we do not open a passthrough device. */ if ((cmdlist == CAM_CMD_RESCAN) || (cmdlist == CAM_CMD_RESET) || (cmdlist == CAM_CMD_DEVTREE) || (cmdlist == CAM_CMD_USAGE) || (cmdlist == CAM_CMD_DEBUG)) devopen = 0; #ifndef MINIMALISTIC if ((devopen == 1) && (argc > 2 && argv[2][0] != '-')) { char name[30]; int rv; if (isdigit(argv[2][0])) { /* device specified as bus:target[:lun] */ rv = parse_btl(argv[2], &bus, &target, &lun, &arglist); if (rv < 2) errx(1, "numeric device specification must " "be either bus:target, or " "bus:target:lun"); /* default to 0 if lun was not specified */ if ((arglist & CAM_ARG_LUN) == 0) { lun = 0; arglist |= CAM_ARG_LUN; } optstart++; } else { if (cam_get_device(argv[2], name, sizeof name, &unit) == -1) errx(1, "%s", cam_errbuf); device = strdup(name); arglist |= CAM_ARG_DEVICE | CAM_ARG_UNIT; optstart++; } } #endif /* MINIMALISTIC */ /* * Start getopt processing at argv[2/3], since we've already * accepted argv[1..2] as the command name, and as a possible * device name. */ optind = optstart; /* * Now we run through the argument list looking for generic * options, and ignoring options that possibly belong to * subfunctions. */ while ((c = getopt(argc, argv, combinedopt))!= -1){ switch(c) { case 'C': retry_count = strtol(optarg, NULL, 0); if (retry_count < 0) errx(1, "retry count %d is < 0", retry_count); arglist |= CAM_ARG_RETRIES; break; case 'E': arglist |= CAM_ARG_ERR_RECOVER; break; case 'n': arglist |= CAM_ARG_DEVICE; tstr = optarg; while (isspace(*tstr) && (*tstr != '\0')) tstr++; device = (char *)strdup(tstr); break; case 't': timeout = strtol(optarg, NULL, 0); if (timeout < 0) errx(1, "invalid timeout %d", timeout); /* Convert the timeout from seconds to ms */ timeout *= 1000; arglist |= CAM_ARG_TIMEOUT; break; case 'u': arglist |= CAM_ARG_UNIT; unit = strtol(optarg, NULL, 0); break; case 'v': arglist |= CAM_ARG_VERBOSE; break; default: break; } } #ifndef MINIMALISTIC /* * For most commands we'll want to open the passthrough device * associated with the specified device. In the case of the rescan * commands, we don't use a passthrough device at all, just the * transport layer device. */ if (devopen == 1) { if (((arglist & (CAM_ARG_BUS|CAM_ARG_TARGET)) == 0) && (((arglist & CAM_ARG_DEVICE) == 0) || ((arglist & CAM_ARG_UNIT) == 0))) { errx(1, "subcommand \"%s\" requires a valid device " "identifier", argv[1]); } if ((cam_dev = ((arglist & (CAM_ARG_BUS | CAM_ARG_TARGET))? cam_open_btl(bus, target, lun, O_RDWR, NULL) : cam_open_spec_device(device,unit,O_RDWR,NULL))) == NULL) errx(1,"%s", cam_errbuf); } #endif /* MINIMALISTIC */ /* * Reset optind to 2, and reset getopt, so these routines can parse * the arguments again. */ optind = optstart; optreset = 1; switch(cmdlist) { #ifndef MINIMALISTIC case CAM_CMD_DEVLIST: error = getdevlist(cam_dev); break; case CAM_CMD_HPA: error = atahpa(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; #endif /* MINIMALISTIC */ case CAM_CMD_DEVTREE: error = getdevtree(argc, argv, combinedopt); break; #ifndef MINIMALISTIC case CAM_CMD_TUR: error = testunitready(cam_dev, retry_count, timeout, 0); break; case CAM_CMD_INQUIRY: error = scsidoinquiry(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDENTIFY: error = ataidentify(cam_dev, retry_count, timeout); break; case CAM_CMD_STARTSTOP: error = scsistart(cam_dev, arglist & CAM_ARG_START_UNIT, arglist & CAM_ARG_EJECT, retry_count, timeout); break; #endif /* MINIMALISTIC */ case CAM_CMD_RESCAN: error = dorescan_or_reset(argc, argv, 1); break; case CAM_CMD_RESET: error = dorescan_or_reset(argc, argv, 0); break; #ifndef MINIMALISTIC case CAM_CMD_READ_DEFECTS: error = readdefects(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_MODE_PAGE: modepage(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SCSI_CMD: error = scsicmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_CMD: error = smpcmd(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_RG: error = smpreportgeneral(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PC: error = smpphycontrol(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_PHYLIST: error = smpphylist(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SMP_MANINFO: error = smpmaninfo(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_DEBUG: error = camdebug(argc, argv, combinedopt); break; case CAM_CMD_TAG: error = tagcontrol(cam_dev, argc, argv, combinedopt); break; case CAM_CMD_RATE: error = ratecontrol(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_FORMAT: error = scsiformat(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_REPORTLUNS: error = scsireportluns(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_READCAP: error = scsireadcapacity(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_IDLE: case CAM_CMD_STANDBY: case CAM_CMD_SLEEP: error = atapm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_APM: case CAM_CMD_AAM: error = ataaxm(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_SECURITY: error = atasecurity(cam_dev, retry_count, timeout, argc, argv, combinedopt); break; case CAM_CMD_DOWNLOAD_FW: error = fwdownload(cam_dev, argc, argv, combinedopt, arglist & CAM_ARG_VERBOSE, retry_count, timeout); break; case CAM_CMD_SANITIZE: error = scsisanitize(cam_dev, argc, argv, combinedopt, retry_count, timeout); break; case CAM_CMD_PERSIST: error = scsipersist(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_ATTRIB: error = scsiattrib(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE, arglist & CAM_ARG_ERR_RECOVER); break; case CAM_CMD_OPCODES: error = scsiopcodes(cam_dev, argc, argv, combinedopt, retry_count, timeout, arglist & CAM_ARG_VERBOSE); break; case CAM_CMD_REPROBE: error = scsireprobe(cam_dev); break; - + case CAM_CMD_ZONE: + error = zone(cam_dev, argc, argv, combinedopt, + retry_count, timeout, arglist & CAM_ARG_VERBOSE); + break; + case CAM_CMD_EPC: + error = epc(cam_dev, argc, argv, combinedopt, + retry_count, timeout, arglist & CAM_ARG_VERBOSE); + break; #endif /* MINIMALISTIC */ case CAM_CMD_USAGE: usage(1); break; default: usage(0); error = 1; break; } if (cam_dev != NULL) cam_close_device(cam_dev); exit(error); } Index: head/sbin/camcontrol/camcontrol.h =================================================================== --- head/sbin/camcontrol/camcontrol.h (revision 300206) +++ head/sbin/camcontrol/camcontrol.h (revision 300207) @@ -1,102 +1,110 @@ /* * Copyright (c) 1998 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _CAMCONTROL_H #define _CAMCONTROL_H typedef enum { CC_OR_NOT_FOUND, CC_OR_AMBIGUOUS, CC_OR_FOUND } camcontrol_optret; typedef enum { CC_DT_NONE, CC_DT_SCSI, CC_DT_ATA_BEHIND_SCSI, CC_DT_ATA, CC_DT_UNKNOWN } camcontrol_devtype; /* * get_hook: Structure for evaluating args in a callback. */ struct get_hook { int argc; char **argv; int got; }; extern int verbose; int ata_do_identify(struct cam_device *device, int retry_count, int timeout, union ccb *ccb, struct ata_params **ident_bufp); int dev_has_vpd_page(struct cam_device *dev, uint8_t page_id, int retry_count, int timeout, int verbosemode); int get_device_type(struct cam_device *dev, int retry_count, int timeout, int verbosemode, camcontrol_devtype *devtype); -void build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, - uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, - uint16_t features, uint16_t sector_count, uint64_t lba, - uint8_t command, uint8_t *data_ptr, uint16_t dxfer_len, - uint8_t sense_len, uint32_t timeout, int is48bit, - camcontrol_devtype devtype); +int build_ata_cmd(union ccb *ccb, uint32_t retry_count, uint32_t flags, + uint8_t tag_action, uint8_t protocol, uint8_t ata_flags, + uint16_t features, uint16_t sector_count, uint64_t lba, + uint8_t command, uint32_t auxiliary, uint8_t *data_ptr, + uint32_t dxfer_len, uint8_t *cdb_storage, + size_t cdb_storage_len, uint8_t sense_len, uint32_t timeout, + int is48bit, camcontrol_devtype devtype); +int get_ata_status(struct cam_device *dev, union ccb *ccb, uint8_t *error, + uint16_t *count, uint64_t *lba, uint8_t *device, + uint8_t *status); int camxferrate(struct cam_device *device); int fwdownload(struct cam_device *device, int argc, char **argv, char *combinedopt, int printerrors, int retry_count, int timeout); +int zone(struct cam_device *device, int argc, char **argv, char *combinedopt, + int retry_count, int timeout, int verbosemode); +int epc(struct cam_device *device, int argc, char **argv, char *combinedopt, + int retry_count, int timeout, int verbosemode); void mode_sense(struct cam_device *device, int mode_page, int page_control, int dbd, int retry_count, int timeout, u_int8_t *data, int datalen); void mode_select(struct cam_device *device, int save_pages, int retry_count, int timeout, u_int8_t *data, int datalen); void mode_edit(struct cam_device *device, int page, int page_control, int dbd, int edit, int binary, int retry_count, int timeout); void mode_list(struct cam_device *device, int page_control, int dbd, int retry_count, int timeout); int scsidoinquiry(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout); int scsigetopcodes(struct cam_device *device, int opcode_set, int opcode, int show_sa_errors, int sa_set, int service_action, int timeout_desc, int retry_count, int timeout, int verbosemode, uint32_t *fill_len, uint8_t **data_ptr); int scsipersist(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbose, int err_recover); int scsiattrib(struct cam_device *device, int argc, char **argv, char *combinedopt, int retry_count, int timeout, int verbose, int err_recover); char *cget(void *hook, char *name); int iget(void *hook, char *name); void arg_put(void *hook, int letter, void *arg, int count, char *name); int get_confirmation(void); void usage(int printlong); #endif /* _CAMCONTROL_H */ Index: head/sbin/camcontrol/epc.c =================================================================== --- head/sbin/camcontrol/epc.c (nonexistent) +++ head/sbin/camcontrol/epc.c (revision 300207) @@ -0,0 +1,857 @@ +/*- + * Copyright (c) 2016 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * Authors: Ken Merry (Spectra Logic Corporation) + */ +/* + * ATA Extended Power Conditions (EPC) support + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "camcontrol.h" + +typedef enum { + EPC_ACTION_NONE = 0x00, + EPC_ACTION_LIST = 0x01, + EPC_ACTION_TIMER_SET = 0x02, + EPC_ACTION_IMMEDIATE = 0x03, + EPC_ACTION_GETMODE = 0x04 +} epc_action; + +static struct scsi_nv epc_flags[] = { + { "Supported", ATA_PCL_COND_SUPPORTED }, + { "Saveable", ATA_PCL_COND_SUPPORTED }, + { "Changeable", ATA_PCL_COND_CHANGEABLE }, + { "Default Timer Enabled", ATA_PCL_DEFAULT_TIMER_EN }, + { "Saved Timer Enabled", ATA_PCL_SAVED_TIMER_EN }, + { "Current Timer Enabled", ATA_PCL_CURRENT_TIMER_EN }, + { "Hold Power Condition Not Supported", ATA_PCL_HOLD_PC_NOT_SUP } +}; + +static struct scsi_nv epc_power_cond_map[] = { + { "Standby_z", ATA_EPC_STANDBY_Z }, + { "z", ATA_EPC_STANDBY_Z }, + { "Standby_y", ATA_EPC_STANDBY_Y }, + { "y", ATA_EPC_STANDBY_Y }, + { "Idle_a", ATA_EPC_IDLE_A }, + { "a", ATA_EPC_IDLE_A }, + { "Idle_b", ATA_EPC_IDLE_B }, + { "b", ATA_EPC_IDLE_B }, + { "Idle_c", ATA_EPC_IDLE_C }, + { "c", ATA_EPC_IDLE_C } +}; + +static struct scsi_nv epc_rst_val[] = { + { "default", ATA_SF_EPC_RST_DFLT }, + { "saved", 0} +}; + +static struct scsi_nv epc_ps_map[] = { + { "unknown", ATA_SF_EPC_SRC_UNKNOWN }, + { "battery", ATA_SF_EPC_SRC_BAT }, + { "notbattery", ATA_SF_EPC_SRC_NOT_BAT } +}; + +/* + * These aren't subcommands of the EPC SET FEATURES subcommand, but rather + * commands that determine the current capabilities and status of the drive. + * The EPC subcommands are limited to 4 bits, so we won't collide with any + * future values. + */ +#define CCTL_EPC_GET_STATUS 0x8001 +#define CCTL_EPC_LIST 0x8002 + +static struct scsi_nv epc_cmd_map[] = { + { "restore", ATA_SF_EPC_RESTORE }, + { "goto", ATA_SF_EPC_GOTO }, + { "timer", ATA_SF_EPC_SET_TIMER }, + { "state", ATA_SF_EPC_SET_STATE }, + { "enable", ATA_SF_EPC_ENABLE }, + { "disable", ATA_SF_EPC_DISABLE }, + { "source", ATA_SF_EPC_SET_SOURCE }, + { "status", CCTL_EPC_GET_STATUS }, + { "list", CCTL_EPC_LIST } +}; + +static int epc_list(struct cam_device *device, camcontrol_devtype devtype, + union ccb *ccb, int retry_count, int timeout); +static void epc_print_pcl_desc(struct ata_power_cond_log_desc *desc, + const char *prefix); +static int epc_getmode(struct cam_device *device, camcontrol_devtype devtype, + union ccb *ccb, int retry_count, int timeout, + int power_only); +static int epc_set_features(struct cam_device *device, + camcontrol_devtype devtype, union ccb *ccb, + int retry_count, int timeout, int action, + int power_cond, int timer, int enable, int save, + int delayed_entry, int hold, int power_src, + int restore_src); + +static void +epc_print_pcl_desc(struct ata_power_cond_log_desc *desc, const char *prefix) +{ + int first; + unsigned int i, num_printed, max_chars; + + first = 1; + max_chars = 75; + + num_printed = printf("%sFlags: ", prefix); + for (i = 0; i < (sizeof(epc_flags) / sizeof(epc_flags[0])); i++) { + if ((desc->flags & epc_flags[i].value) == 0) + continue; + if (first == 0) { + num_printed += printf(", "); + } + if ((num_printed + strlen(epc_flags[i].name)) > max_chars) { + printf("\n"); + num_printed = printf("%s ", prefix); + } + num_printed += printf("%s", epc_flags[i].name); + first = 0; + } + if (first != 0) + printf("None"); + printf("\n"); + + printf("%sDefault timer setting: %.1f sec\n", prefix, + (double)(le32dec(desc->default_timer) / 10)); + printf("%sSaved timer setting: %.1f sec\n", prefix, + (double)(le32dec(desc->saved_timer) / 10)); + printf("%sCurrent timer setting: %.1f sec\n", prefix, + (double)(le32dec(desc->current_timer) / 10)); + printf("%sNominal time to active: %.1f sec\n", prefix, + (double)(le32dec(desc->nom_time_to_active) / 10)); + printf("%sMinimum timer: %.1f sec\n", prefix, + (double)(le32dec(desc->min_timer) / 10)); + printf("%sMaximum timer: %.1f sec\n", prefix, + (double)(le32dec(desc->max_timer) / 10)); + printf("%sNumber of transitions to power condition: %u\n", prefix, + le32dec(desc->num_transitions_to_pc)); + printf("%sHours in power condition: %u\n", prefix, + le32dec(desc->hours_in_pc)); +} + +static int +epc_list(struct cam_device *device, camcontrol_devtype devtype, union ccb *ccb, + int retry_count, int timeout) +{ + struct ata_power_cond_log_idle *idle_log; + struct ata_power_cond_log_standby *standby_log; + uint8_t log_buf[sizeof(*idle_log) + sizeof(*standby_log)]; + uint16_t log_addr = ATA_POWER_COND_LOG; + uint16_t page_number = ATA_PCL_IDLE; + uint64_t lba; + int error = 0; + + lba = (((uint64_t)page_number & 0xff00) << 32) | + ((page_number & 0x00ff) << 8) | + (log_addr & 0xff); + + error = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_IN | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ AP_PROTO_DMA | AP_EXTEND, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TLEN_SECT_CNT | + AP_FLAG_TDIR_FROM_DEV, + /*features*/ 0, + /*sector_count*/ 2, + /*lba*/ lba, + /*command*/ ATA_READ_LOG_DMA_EXT, + /*auxiliary*/ 0, + /*data_ptr*/ log_buf, + /*dxfer_len*/ sizeof(log_buf), + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 1, + /*devtype*/ devtype); + + if (error != 0) { + warnx("%s: build_ata_cmd() failed, likely programmer error", + __func__); + goto bailout; + } + + if (retry_count > 0) + ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; + + error = cam_send_ccb(device, ccb); + if (error != 0) { + warn("error sending ATA READ LOG EXT CCB"); + error = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL,stderr); + error = 1; + goto bailout; + } + + idle_log = (struct ata_power_cond_log_idle *)log_buf; + standby_log = + (struct ata_power_cond_log_standby *)&log_buf[sizeof(*idle_log)]; + + printf("ATA Power Conditions Log:\n"); + printf(" Idle power conditions page:\n"); + printf(" Idle A condition:\n"); + epc_print_pcl_desc(&idle_log->idle_a_desc, " "); + printf(" Idle B condition:\n"); + epc_print_pcl_desc(&idle_log->idle_b_desc, " "); + printf(" Idle C condition:\n"); + epc_print_pcl_desc(&idle_log->idle_c_desc, " "); + printf(" Standby power conditions page:\n"); + printf(" Standby Y condition:\n"); + epc_print_pcl_desc(&standby_log->standby_y_desc, " "); + printf(" Standby Z condition:\n"); + epc_print_pcl_desc(&standby_log->standby_z_desc, " "); +bailout: + return (error); +} + +static int +epc_getmode(struct cam_device *device, camcontrol_devtype devtype, + union ccb *ccb, int retry_count, int timeout, int power_only) +{ + struct ata_params *ident = NULL; + struct ata_identify_log_sup_cap sup_cap; + const char *mode_name = NULL; + uint8_t error = 0, ata_device = 0, status = 0; + uint16_t count = 0; + uint64_t lba = 0; + uint32_t page_number, log_address; + uint64_t caps = 0; + int avail_bytes = 0; + int res_available = 0; + int retval; + + retval = 0; + + if (power_only != 0) + goto check_power_mode; + + /* + * Get standard ATA Identify data. + */ + retval = ata_do_identify(device, retry_count, timeout, ccb, &ident); + if (retval != 0) { + warnx("Couldn't get identify data"); + goto bailout; + } + + /* + * Get the ATA Identify Data Log (0x30), + * Supported Capabilities Page (0x03). + */ + log_address = ATA_IDENTIFY_DATA_LOG; + page_number = ATA_IDL_SUP_CAP; + lba = (((uint64_t)page_number & 0xff00) << 32) | + ((page_number & 0x00ff) << 8) | + (log_address & 0xff); + + bzero(&sup_cap, sizeof(sup_cap)); + /* + * XXX KDM check the supported protocol. + */ + retval = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_IN | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ AP_PROTO_DMA | + AP_EXTEND, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TLEN_SECT_CNT | + AP_FLAG_TDIR_FROM_DEV, + /*features*/ 0, + /*sector_count*/ 1, + /*lba*/ lba, + /*command*/ ATA_READ_LOG_DMA_EXT, + /*auxiliary*/ 0, + /*data_ptr*/ (uint8_t *)&sup_cap, + /*dxfer_len*/ sizeof(sup_cap), + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 1, + /*devtype*/ devtype); + + if (retval != 0) { + warnx("%s: build_ata_cmd() failed, likely a programmer error", + __func__); + goto bailout; + } + + if (retry_count > 0) + ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; + + retval = cam_send_ccb(device, ccb); + if (retval != 0) { + warn("error sending ATA READ LOG CCB"); + retval = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL,stderr); + retval = 1; + goto bailout; + } + + if (ccb->ccb_h.func_code == XPT_SCSI_IO) { + avail_bytes = ccb->csio.dxfer_len - ccb->csio.resid; + } else { + avail_bytes = ccb->ataio.dxfer_len - ccb->ataio.resid; + } + if (avail_bytes < (int)sizeof(sup_cap)) { + warnx("Couldn't get enough of the ATA Supported " + "Capabilities log, %d bytes returned", avail_bytes); + retval = 1; + goto bailout; + } + caps = le64dec(sup_cap.sup_cap); + if ((caps & ATA_SUP_CAP_VALID) == 0) { + warnx("Supported capabilities bits are not valid"); + retval = 1; + goto bailout; + } + + printf("APM: %sSupported, %sEnabled\n", + (ident->support.command2 & ATA_SUPPORT_APM) ? "" : "NOT ", + (ident->enabled.command2 & ATA_SUPPORT_APM) ? "" : "NOT "); + printf("EPC: %sSupported, %sEnabled\n", + (ident->support2 & ATA_SUPPORT_EPC) ? "" : "NOT ", + (ident->enabled2 & ATA_ENABLED_EPC) ? "" : "NOT "); + printf("Low Power Standby %sSupported\n", + (caps & ATA_SC_LP_STANDBY_SUP) ? "" : "NOT "); + printf("Set EPC Power Source %sSupported\n", + (caps & ATA_SC_SET_EPC_PS_SUP) ? "" : "NOT "); + + +check_power_mode: + + retval = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_NONE | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ AP_PROTO_NON_DATA | + AP_EXTEND, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TLEN_NO_DATA | + AP_FLAG_CHK_COND, + /*features*/ ATA_SF_EPC, + /*sector_count*/ 0, + /*lba*/ 0, + /*command*/ ATA_CHECK_POWER_MODE, + /*auxiliary*/ 0, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 0, + /*devtype*/ devtype); + + if (retval != 0) { + warnx("%s: build_ata_cmd() failed, likely a programmer error", + __func__); + goto bailout; + } + + if (retry_count > 0) + ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; + + retval = cam_send_ccb(device, ccb); + if (retval != 0) { + warn("error sending ATA CHECK POWER MODE CCB"); + retval = 1; + goto bailout; + } + + /* + * Check to see whether we got the requested ATA result if this + * is an SCSI ATA PASS-THROUGH command. + */ + if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) + && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND)) { + int error_code, sense_key, asc, ascq; + + retval = scsi_extract_sense_ccb(ccb, &error_code, + &sense_key, &asc, &ascq); + if (retval == 0) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL, + stderr); + retval = 1; + goto bailout; + } + if ((sense_key == SSD_KEY_RECOVERED_ERROR) + && (asc == 0x00) + && (ascq == 0x1d)) { + res_available = 1; + } + + } + if (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) + && (res_available == 0)) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL,stderr); + retval = 1; + goto bailout; + } + + retval = get_ata_status(device, ccb, &error, &count, &lba, &ata_device, + &status); + if (retval != 0) { + warnx("Unable to get ATA CHECK POWER MODE result"); + retval = 1; + goto bailout; + } + + mode_name = scsi_nv_to_str(epc_power_cond_map, + sizeof(epc_power_cond_map) / sizeof(epc_power_cond_map[0]), count); + printf("Current power state: "); + /* Note: ident can be null in power_only mode */ + if ((ident == NULL) + || (ident->enabled2 & ATA_ENABLED_EPC)) { + if (mode_name != NULL) + printf("%s", mode_name); + else if (count == 0xff) { + printf("PM0:Active or PM1:Idle"); + } + } else { + switch (count) { + case 0x00: + printf("PM2:Standby"); + break; + case 0x80: + printf("PM1:Idle"); + break; + case 0xff: + printf("PM0:Active or PM1:Idle"); + break; + } + } + printf("(0x%02x)\n", count); + + if (power_only != 0) + goto bailout; + + if (caps & ATA_SC_LP_STANDBY_SUP) { + uint32_t wait_mode; + + wait_mode = (lba >> 20) & 0xff; + if (wait_mode == 0xff) { + printf("Device not waiting to enter lower power " + "condition"); + } else { + mode_name = scsi_nv_to_str(epc_power_cond_map, + sizeof(epc_power_cond_map) / + sizeof(epc_power_cond_map[0]), wait_mode); + printf("Device waiting to enter mode %s (0x%02x)\n", + (mode_name != NULL) ? mode_name : "Unknown", + wait_mode); + } + printf("Device is %sheld in the current power condition\n", + (lba & 0x80000) ? "" : "NOT "); + } +bailout: + return (retval); + +} + +static int +epc_set_features(struct cam_device *device, camcontrol_devtype devtype, + union ccb *ccb, int retry_count, int timeout, int action, + int power_cond, int timer, int enable, int save, + int delayed_entry, int hold, int power_src, int restore_src) +{ + uint64_t lba; + uint16_t count = 0; + int error; + + error = 0; + + lba = action; + + switch (action) { + case ATA_SF_EPC_SET_TIMER: + lba |= ((timer << ATA_SF_EPC_TIMER_SHIFT) & + ATA_SF_EPC_TIMER_MASK); + /* FALLTHROUGH */ + case ATA_SF_EPC_SET_STATE: + lba |= (enable ? ATA_SF_EPC_TIMER_EN : 0) | + (save ? ATA_SF_EPC_TIMER_SAVE : 0); + count = power_cond; + break; + case ATA_SF_EPC_GOTO: + count = power_cond; + lba |= (delayed_entry ? ATA_SF_EPC_GOTO_DELAY : 0) | + (hold ? ATA_SF_EPC_GOTO_HOLD : 0); + break; + case ATA_SF_EPC_RESTORE: + lba |= restore_src | + (save ? ATA_SF_EPC_RST_SAVE : 0); + break; + case ATA_SF_EPC_ENABLE: + case ATA_SF_EPC_DISABLE: + break; + case ATA_SF_EPC_SET_SOURCE: + count = power_src; + break; + } + + error = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_NONE | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ AP_PROTO_NON_DATA | AP_EXTEND, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TLEN_NO_DATA | + AP_FLAG_TDIR_FROM_DEV, + /*features*/ ATA_SF_EPC, + /*sector_count*/ count, + /*lba*/ lba, + /*command*/ ATA_SETFEATURES, + /*auxiliary*/ 0, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 1, + /*devtype*/ devtype); + + if (error != 0) { + warnx("%s: build_ata_cmd() failed, likely a programmer error", + __func__); + goto bailout; + } + + if (retry_count > 0) + ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; + + error = cam_send_ccb(device, ccb); + if (error != 0) { + warn("error sending ATA SET FEATURES CCB"); + error = 1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL,stderr); + error = 1; + goto bailout; + } + +bailout: + return (error); +} + +int +epc(struct cam_device *device, int argc, char **argv, char *combinedopt, + int retry_count, int timeout, int verbosemode __unused) +{ + union ccb *ccb = NULL; + int error = 0; + int c; + int action = -1; + camcontrol_devtype devtype; + double timer_val = -1; + int timer_tenths = 0, power_cond = -1; + int delayed_entry = 0, hold = 0; + int enable = -1, save = 0; + int restore_src = -1; + int power_src = -1; + int power_only = 0; + + + ccb = cam_getccb(device); + if (ccb == NULL) { + warnx("%s: error allocating CCB", __func__); + error = 1; + goto bailout; + } + + bzero(&(&ccb->ccb_h)[1], + sizeof(union ccb) - sizeof(struct ccb_hdr)); + + while ((c = getopt(argc, argv, combinedopt)) != -1) { + switch (c) { + case 'c': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(epc_cmd_map, + (sizeof(epc_cmd_map) / sizeof(epc_cmd_map[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + action = epc_cmd_map[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "epc command", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'd': + enable = 0; + break; + case 'D': + delayed_entry = 1; + break; + case 'e': + enable = 1; + break; + case 'H': + hold = 1; + break; + case 'p': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(epc_power_cond_map, + (sizeof(epc_power_cond_map) / + sizeof(epc_power_cond_map[0])), optarg, + &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + power_cond =epc_power_cond_map[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "power condition", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'P': + power_only = 1; + break; + case 'r': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(epc_rst_val, + (sizeof(epc_rst_val) / + sizeof(epc_rst_val[0])), optarg, + &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + restore_src = epc_rst_val[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", + "restore value source", optarg); + error = 1; + goto bailout; + } + break; + } + case 's': + save = 1; + break; + case 'S': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(epc_ps_map, + (sizeof(epc_ps_map) / sizeof(epc_ps_map[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + power_src = epc_ps_map[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "power source", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'T': { + char *endptr; + + timer_val = strtod(optarg, &endptr); + if (timer_val < 0) { + warnx("Invalid timer value %f", timer_val); + error = 1; + goto bailout; + } else if (*endptr != '\0') { + warnx("Invalid timer value %s", optarg); + error = 1; + goto bailout; + } + timer_tenths = timer_val * 10; + break; + } + default: + break; + } + } + + if (action == -1) { + warnx("Must specify an action"); + error = 1; + goto bailout; + } + + error = get_device_type(device, retry_count, timeout, + /*printerrors*/ 1, &devtype); + if (error != 0) + errx(1, "Unable to determine device type"); + + switch (devtype) { + case CC_DT_ATA: + case CC_DT_ATA_BEHIND_SCSI: + break; + default: + warnx("The epc subcommand only works with ATA protocol " + "devices"); + error = 1; + goto bailout; + break; /*NOTREACHED*/ + } + + switch (action) { + case ATA_SF_EPC_SET_TIMER: + if (timer_val == -1) { + warnx("Must specify a timer value (-T time)"); + error = 1; + } + case ATA_SF_EPC_SET_STATE: + if (enable == -1) { + warnx("Must specify enable (-e) or disable (-d)"); + error = 1; + } + /* FALLTHROUGH */ + case ATA_SF_EPC_GOTO: + if (power_cond == -1) { + warnx("Must specify a power condition with -p"); + error = 1; + } + if (error != 0) + goto bailout; + break; + case ATA_SF_EPC_SET_SOURCE: + if (power_src == -1) { + warnx("Must specify a power source (-S battery or " + "-S notbattery) value"); + error = 1; + goto bailout; + } + break; + case ATA_SF_EPC_RESTORE: + if (restore_src == -1) { + warnx("Must specify a source for restored value, " + "-r default or -r saved"); + error = 1; + goto bailout; + } + break; + case ATA_SF_EPC_ENABLE: + case ATA_SF_EPC_DISABLE: + case CCTL_EPC_GET_STATUS: + case CCTL_EPC_LIST: + default: + break; + } + + switch (action) { + case CCTL_EPC_GET_STATUS: + error = epc_getmode(device, devtype, ccb, retry_count, timeout, + power_only); + break; + case CCTL_EPC_LIST: + error = epc_list(device, devtype, ccb, retry_count, timeout); + break; + case ATA_SF_EPC_RESTORE: + case ATA_SF_EPC_GOTO: + case ATA_SF_EPC_SET_TIMER: + case ATA_SF_EPC_SET_STATE: + case ATA_SF_EPC_ENABLE: + case ATA_SF_EPC_DISABLE: + case ATA_SF_EPC_SET_SOURCE: + error = epc_set_features(device, devtype, ccb, retry_count, + timeout, action, power_cond, timer_tenths, enable, save, + delayed_entry, hold, power_src, restore_src); + break; + default: + warnx("Not implemented yet"); + error = 1; + goto bailout; + break; + } + + +bailout: + if (ccb != NULL) + cam_freeccb(ccb); + + return (error); +} Property changes on: head/sbin/camcontrol/epc.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sbin/camcontrol/fwdownload.c =================================================================== --- head/sbin/camcontrol/fwdownload.c (revision 300206) +++ head/sbin/camcontrol/fwdownload.c (revision 300207) @@ -1,1038 +1,1056 @@ /*- * Copyright (c) 2011 Sandvine Incorporated. All rights reserved. * Copyright (c) 2002-2011 Andre Albsmeier * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This software is derived from Andre Albsmeier's fwprog.c which contained * the following note: * * Many thanks goes to Marc Frajola from * TeraSolutions for the initial idea and his programme for upgrading * the firmware of I*M DDYS drives. */ /* * BEWARE: * * The fact that you see your favorite vendor listed below does not * imply that your equipment won't break when you use this software * with it. It only means that the firmware of at least one device type * of each vendor listed has been programmed successfully using this code. * * The -s option simulates a download but does nothing apart from that. * It can be used to check what chunk sizes would have been used with the * specified device. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "progress.h" #include "camcontrol.h" #define WB_TIMEOUT 50000 /* 50 seconds */ typedef enum { VENDOR_HGST, VENDOR_HITACHI, VENDOR_HP, VENDOR_IBM, VENDOR_PLEXTOR, VENDOR_QUALSTAR, VENDOR_QUANTUM, VENDOR_SAMSUNG, VENDOR_SEAGATE, VENDOR_SMART, VENDOR_ATA, VENDOR_UNKNOWN } fw_vendor_t; /* * FW_TUR_READY: The drive must return good status for a test unit ready. * * FW_TUR_NOT_READY: The drive must return not ready status for a test unit * ready. You may want this in a removable media drive. * * FW_TUR_NA: It doesn't matter whether the drive is ready or not. * This may be the case for a removable media drive. */ typedef enum { FW_TUR_NONE, FW_TUR_READY, FW_TUR_NOT_READY, FW_TUR_NA } fw_tur_status; /* * FW_TIMEOUT_DEFAULT: Attempt to probe for a WRITE BUFFER timeout * value from the drive. If we get an answer, * use the Recommended timeout. Otherwise, * use the default value from the table. * * FW_TIMEOUT_DEV_REPORTED: The timeout value was probed directly from * the device. * * FW_TIMEOUT_NO_PROBE: Do not ask the device for a WRITE BUFFER * timeout value. Use the device-specific * value. * * FW_TIMEOUT_USER_SPEC: The user specified a timeout on the command * line with the -t option. This overrides any * probe or default timeout. */ typedef enum { FW_TIMEOUT_DEFAULT, FW_TIMEOUT_DEV_REPORTED, FW_TIMEOUT_NO_PROBE, FW_TIMEOUT_USER_SPEC } fw_timeout_type; /* * type: Enumeration for the particular vendor. * * pattern: Pattern to match for the Vendor ID from the SCSI * Inquiry data. * * dev_type: SCSI device type to match, or T_ANY to match any * device from the given vendor. Note that if there * is a specific device type listed for a particular * vendor, it must be listed before a T_ANY entry. * * max_pkt_size: Maximum packet size when talking to a device. Note * that although large data sizes may be supported by * the target device, they may not be supported by the * OS or the controller. * * cdb_byte2: This specifies byte 2 (byte 1 when counting from 0) * of the CDB. This is generally the WRITE BUFFER mode. * * cdb_byte2_last: This specifies byte 2 for the last chunk of the * download. * * inc_cdb_buffer_id: Increment the buffer ID by 1 for each chunk sent * down to the drive. * * inc_cdb_offset: Increment the offset field in the CDB with the byte * offset into the firmware file. * * tur_status: Pay attention to whether the device is ready before * upgrading the firmware, or not. See above for the * values. */ struct fw_vendor { fw_vendor_t type; const char *pattern; int dev_type; int max_pkt_size; u_int8_t cdb_byte2; u_int8_t cdb_byte2_last; int inc_cdb_buffer_id; int inc_cdb_offset; fw_tur_status tur_status; int timeout_ms; fw_timeout_type timeout_type; }; /* * Vendor notes: * * HGST: The packets need to be sent in multiples of 4K. * * IBM: For LTO and TS drives, the buffer ID is ignored in mode 7 (and * some other modes). It treats the request as a firmware download. * The offset (and therefore the length of each chunk sent) needs * to be a multiple of the offset boundary specified for firmware * (buffer ID 4) in the read buffer command. At least for LTO-6, * that seems to be 0, but using a 32K chunk size should satisfy * most any alignment requirement. * * SmrtStor: Mode 5 is also supported, but since the firmware is 400KB or * so, we can't fit it in a single request in most cases. */ static struct fw_vendor vendors_list[] = { {VENDOR_HGST, "HGST", T_DIRECT, 0x1000, 0x07, 0x07, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_HITACHI, "HITACHI", T_ANY, 0x8000, 0x05, 0x05, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_HP, "HP", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_IBM, "IBM", T_SEQUENTIAL, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_NA, 300 * 1000, FW_TIMEOUT_DEFAULT}, {VENDOR_IBM, "IBM", T_ANY, 0x8000, 0x05, 0x05, 1, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_PLEXTOR, "PLEXTOR", T_ANY, 0x2000, 0x04, 0x05, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_QUALSTAR, "QUALSTAR", T_ANY, 0x2030, 0x05, 0x05, 0, 0, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_QUANTUM, "QUANTUM", T_ANY, 0x2000, 0x04, 0x05, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SAMSUNG, "SAMSUNG", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SEAGATE, "SEAGATE", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, {VENDOR_SMART, "SmrtStor", T_DIRECT, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_DEFAULT}, /* * We match any ATA device. This is really just a placeholder, * since we won't actually send a WRITE BUFFER with any of the * listed parameters. If a SATA device is behind a SAS controller, * the SCSI to ATA translation code (at least for LSI) doesn't * generally translate a SCSI WRITE BUFFER into an ATA DOWNLOAD * MICROCODE command. So, we use the SCSI ATA PASS_THROUGH command * to send the ATA DOWNLOAD MICROCODE command instead. */ {VENDOR_ATA, "ATA", T_ANY, 0x8000, 0x07, 0x07, 0, 1, FW_TUR_READY, WB_TIMEOUT, FW_TIMEOUT_NO_PROBE}, {VENDOR_UNKNOWN, NULL, T_ANY, 0x0000, 0x00, 0x00, 0, 0, FW_TUR_NONE, WB_TIMEOUT, FW_TIMEOUT_DEFAULT} }; struct fw_timeout_desc { fw_timeout_type timeout_type; const char *timeout_desc; }; static const struct fw_timeout_desc fw_timeout_desc_table[] = { { FW_TIMEOUT_DEFAULT, "the default" }, { FW_TIMEOUT_DEV_REPORTED, "recommended by this particular device" }, { FW_TIMEOUT_NO_PROBE, "the default" }, { FW_TIMEOUT_USER_SPEC, "what was specified on the command line" } }; #ifndef ATA_DOWNLOAD_MICROCODE #define ATA_DOWNLOAD_MICROCODE 0x92 #endif #define USE_OFFSETS_FEATURE 0x3 #ifndef LOW_SECTOR_SIZE #define LOW_SECTOR_SIZE 512 #endif #define ATA_MAKE_LBA(o, p) \ ((((((o) / LOW_SECTOR_SIZE) >> 8) & 0xff) << 16) | \ ((((o) / LOW_SECTOR_SIZE) & 0xff) << 8) | \ ((((p) / LOW_SECTOR_SIZE) >> 8) & 0xff)) #define ATA_MAKE_SECTORS(p) (((p) / 512) & 0xff) #ifndef UNKNOWN_MAX_PKT_SIZE #define UNKNOWN_MAX_PKT_SIZE 0x8000 #endif static struct fw_vendor *fw_get_vendor(struct cam_device *cam_dev, struct ata_params *ident_buf); static int fw_get_timeout(struct cam_device *cam_dev, struct fw_vendor *vp, int retry_count, int timeout); static int fw_validate_ibm(struct cam_device *dev, int retry_count, int timeout, int fd, char *buf, const char *fw_img_path, int quiet); static char *fw_read_img(struct cam_device *dev, int retry_count, int timeout, int quiet, const char *fw_img_path, struct fw_vendor *vp, int *num_bytes); static int fw_check_device_ready(struct cam_device *dev, camcontrol_devtype devtype, struct fw_vendor *vp, int printerrors, int timeout); static int fw_download_img(struct cam_device *cam_dev, struct fw_vendor *vp, char *buf, int img_size, int sim_mode, int printerrors, int quiet, int retry_count, int timeout, const char */*name*/, camcontrol_devtype devtype); /* * Find entry in vendors list that belongs to * the vendor of given cam device. */ static struct fw_vendor * fw_get_vendor(struct cam_device *cam_dev, struct ata_params *ident_buf) { char vendor[42]; struct fw_vendor *vp; if (cam_dev == NULL) return (NULL); if (ident_buf != NULL) { cam_strvis((u_char *)vendor, ident_buf->model, sizeof(ident_buf->model), sizeof(vendor)); for (vp = vendors_list; vp->pattern != NULL; vp++) { if (vp->type == VENDOR_ATA) return (vp); } } else { cam_strvis((u_char *)vendor, (u_char *)cam_dev->inq_data.vendor, sizeof(cam_dev->inq_data.vendor), sizeof(vendor)); } for (vp = vendors_list; vp->pattern != NULL; vp++) { if (!cam_strmatch((const u_char *)vendor, (const u_char *)vp->pattern, strlen(vendor))) { if ((vp->dev_type == T_ANY) || (vp->dev_type == SID_TYPE(&cam_dev->inq_data))) break; } } return (vp); } static int fw_get_timeout(struct cam_device *cam_dev, struct fw_vendor *vp, int retry_count, int timeout) { struct scsi_report_supported_opcodes_one *one; struct scsi_report_supported_opcodes_timeout *td; uint8_t *buf = NULL; uint32_t fill_len = 0, cdb_len = 0, rec_timeout = 0; int retval = 0; /* * If the user has specified a timeout on the command line, we let * him override any default or probed value. */ if (timeout != 0) { vp->timeout_type = FW_TIMEOUT_USER_SPEC; vp->timeout_ms = timeout; goto bailout; } /* * Check to see whether we should probe for a timeout for this * device. */ if (vp->timeout_type == FW_TIMEOUT_NO_PROBE) goto bailout; retval = scsigetopcodes(/*device*/ cam_dev, /*opcode_set*/ 1, /*opcode*/ WRITE_BUFFER, /*show_sa_errors*/ 1, /*sa_set*/ 0, /*service_action*/ 0, /*timeout_desc*/ 1, /*retry_count*/ retry_count, /*timeout*/ 10000, /*verbose*/ 0, /*fill_len*/ &fill_len, /*data_ptr*/ &buf); /* * It isn't an error if we can't get a timeout descriptor. We just * continue on with the default timeout. */ if (retval != 0) { retval = 0; goto bailout; } /* * Even if the drive didn't return a SCSI error, if we don't have * enough data to contain the one opcode descriptor, the CDB * structure and a timeout descriptor, we don't have the timeout * value we're looking for. So we'll just fall back to the * default value. */ if (fill_len < (sizeof(*one) + sizeof(struct scsi_write_buffer) + sizeof(*td))) goto bailout; one = (struct scsi_report_supported_opcodes_one *)buf; /* * If the drive claims to not support the WRITE BUFFER command... * fall back to the default timeout value and let things fail on * the actual firmware download. */ if ((one->support & RSO_ONE_SUP_MASK) == RSO_ONE_SUP_NOT_SUP) goto bailout; cdb_len = scsi_2btoul(one->cdb_length); td = (struct scsi_report_supported_opcodes_timeout *) &buf[sizeof(*one) + cdb_len]; rec_timeout = scsi_4btoul(td->recommended_time); /* * If the recommended timeout is 0, then the device has probably * returned a bogus value. */ if (rec_timeout == 0) goto bailout; /* CAM timeouts are in ms */ rec_timeout *= 1000; vp->timeout_ms = rec_timeout; vp->timeout_type = FW_TIMEOUT_DEV_REPORTED; bailout: return (retval); } #define SVPD_IBM_FW_DESIGNATION 0x03 /* * IBM LTO and TS tape drives have an INQUIRY VPD page 0x3 with the following * format: */ struct fw_ibm_tape_fw_designation { uint8_t device; uint8_t page_code; uint8_t reserved; uint8_t length; uint8_t ascii_length; uint8_t reserved2[3]; uint8_t load_id[4]; uint8_t fw_rev[4]; uint8_t ptf_number[4]; uint8_t patch_number[4]; uint8_t ru_name[8]; uint8_t lib_seq_num[5]; }; /* * The firmware for IBM tape drives has the following header format. The * load_id and ru_name in the header file should match what is returned in * VPD page 0x3. */ struct fw_ibm_tape_fw_header { uint8_t unspec[4]; uint8_t length[4]; /* Firmware and header! */ uint8_t load_id[4]; uint8_t fw_rev[4]; uint8_t reserved[8]; uint8_t ru_name[8]; }; static int fw_validate_ibm(struct cam_device *dev, int retry_count, int timeout, int fd, char *buf, const char *fw_img_path, int quiet) { union ccb *ccb; struct fw_ibm_tape_fw_designation vpd_page; struct fw_ibm_tape_fw_header *header; char drive_rev[sizeof(vpd_page.fw_rev) + 1]; char file_rev[sizeof(vpd_page.fw_rev) + 1]; int retval = 1; ccb = cam_getccb(dev); if (ccb == NULL) { warnx("couldn't allocate CCB"); goto bailout; } /* cam_getccb cleans up the header, caller has to zero the payload */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); bzero(&vpd_page, sizeof(vpd_page)); scsi_inquiry(&ccb->csio, /*retries*/ retry_count, /*cbfcnp*/ NULL, /* tag_action */ MSG_SIMPLE_Q_TAG, /* inq_buf */ (u_int8_t *)&vpd_page, /* inq_len */ sizeof(vpd_page), /* evpd */ 1, /* page_code */ SVPD_IBM_FW_DESIGNATION, /* sense_len */ SSD_FULL_SIZE, /* timeout */ timeout ? timeout : 5000); /* Disable freezing the device queue */ ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; if (retry_count != 0) ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; if (cam_send_ccb(dev, ccb) < 0) { warn("error getting firmware designation page"); cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); cam_freeccb(ccb); ccb = NULL; goto bailout; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); goto bailout; } /* * Read the firmware header only. */ if (read(fd, buf, sizeof(*header)) != sizeof(*header)) { warn("unable to read %zu bytes from %s", sizeof(*header), fw_img_path); goto bailout; } /* Rewind the file back to 0 for the full file read. */ if (lseek(fd, 0, SEEK_SET) == -1) { warn("Unable to lseek"); goto bailout; } header = (struct fw_ibm_tape_fw_header *)buf; bzero(drive_rev, sizeof(drive_rev)); bcopy(vpd_page.fw_rev, drive_rev, sizeof(vpd_page.fw_rev)); bzero(file_rev, sizeof(file_rev)); bcopy(header->fw_rev, file_rev, sizeof(header->fw_rev)); if (quiet == 0) { fprintf(stdout, "Current Drive Firmware version: %s\n", drive_rev); fprintf(stdout, "Firmware File version: %s\n", file_rev); } /* * For IBM tape drives the load ID and RU name reported by the * drive should match what is in the firmware file. */ if (bcmp(vpd_page.load_id, header->load_id, MIN(sizeof(vpd_page.load_id), sizeof(header->load_id))) != 0) { warnx("Drive Firmware load ID 0x%x does not match firmware " "file load ID 0x%x", scsi_4btoul(vpd_page.load_id), scsi_4btoul(header->load_id)); goto bailout; } if (bcmp(vpd_page.ru_name, header->ru_name, MIN(sizeof(vpd_page.ru_name), sizeof(header->ru_name))) != 0) { warnx("Drive Firmware RU name 0x%jx does not match firmware " "file RU name 0x%jx", (uintmax_t)scsi_8btou64(vpd_page.ru_name), (uintmax_t)scsi_8btou64(header->ru_name)); goto bailout; } if (quiet == 0) fprintf(stdout, "Firmware file is valid for this drive.\n"); retval = 0; bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * Allocate a buffer and read fw image file into it * from given path. Number of bytes read is stored * in num_bytes. */ static char * fw_read_img(struct cam_device *dev, int retry_count, int timeout, int quiet, const char *fw_img_path, struct fw_vendor *vp, int *num_bytes) { int fd; struct stat stbuf; char *buf; off_t img_size; int skip_bytes = 0; if ((fd = open(fw_img_path, O_RDONLY)) < 0) { warn("Could not open image file %s", fw_img_path); return (NULL); } if (fstat(fd, &stbuf) < 0) { warn("Could not stat image file %s", fw_img_path); goto bailout1; } if ((img_size = stbuf.st_size) == 0) { warnx("Zero length image file %s", fw_img_path); goto bailout1; } if ((buf = malloc(img_size)) == NULL) { warnx("Could not allocate buffer to read image file %s", fw_img_path); goto bailout1; } /* Skip headers if applicable. */ switch (vp->type) { case VENDOR_SEAGATE: if (read(fd, buf, 16) != 16) { warn("Could not read image file %s", fw_img_path); goto bailout; } if (lseek(fd, 0, SEEK_SET) == -1) { warn("Unable to lseek"); goto bailout; } if ((strncmp(buf, "SEAGATE,SEAGATE ", 16) == 0) || (img_size % 512 == 80)) skip_bytes = 80; break; case VENDOR_QUALSTAR: skip_bytes = img_size % 1030; break; case VENDOR_IBM: { if (vp->dev_type != T_SEQUENTIAL) break; if (fw_validate_ibm(dev, retry_count, timeout, fd, buf, fw_img_path, quiet) != 0) goto bailout; break; } default: break; } if (skip_bytes != 0) { fprintf(stdout, "Skipping %d byte header.\n", skip_bytes); if (lseek(fd, skip_bytes, SEEK_SET) == -1) { warn("Could not lseek"); goto bailout; } img_size -= skip_bytes; } /* Read image into a buffer. */ if (read(fd, buf, img_size) != img_size) { warn("Could not read image file %s", fw_img_path); goto bailout; } *num_bytes = img_size; close(fd); return (buf); bailout: free(buf); bailout1: close(fd); *num_bytes = 0; return (NULL); } /* * Returns 0 for "success", where success means that the device has met the * requirement in the vendor structure for being ready or not ready when * firmware is downloaded. * * Returns 1 for a failure to be ready to accept a firmware download. * (e.g., a drive needs to be ready, but returns not ready) * * Returns -1 for any other failure. */ static int fw_check_device_ready(struct cam_device *dev, camcontrol_devtype devtype, struct fw_vendor *vp, int printerrors, int timeout) { union ccb *ccb; int retval = 0; int16_t *ptr = NULL; size_t dxfer_len = 0; if ((ccb = cam_getccb(dev)) == NULL) { warnx("Could not allocate CCB"); retval = -1; goto bailout; } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); if (devtype != CC_DT_SCSI) { dxfer_len = sizeof(struct ata_params); ptr = (uint16_t *)malloc(dxfer_len); if (ptr == NULL) { warnx("can't malloc memory for identify"); retval = -1; goto bailout; } bzero(ptr, dxfer_len); } switch (devtype) { case CC_DT_SCSI: scsi_test_unit_ready(&ccb->csio, /*retries*/ 0, /*cbfcnp*/ NULL, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ 5000); break; case CC_DT_ATA_BEHIND_SCSI: case CC_DT_ATA: { - build_ata_cmd(ccb, + retval = build_ata_cmd(ccb, /*retries*/ 1, /*flags*/ CAM_DIR_IN, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*protocol*/ AP_PROTO_PIO_IN, /*ata_flags*/ AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_FROM_DEV, /*features*/ 0, /*sector_count*/ (uint8_t) dxfer_len, /*lba*/ 0, /*command*/ ATA_ATA_IDENTIFY, + /*auxiliary*/ 0, /*data_ptr*/ (uint8_t *)ptr, /*dxfer_len*/ dxfer_len, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : 30 * 1000, /*is48bit*/ 0, /*devtype*/ devtype); + if (retval != 0) { + retval = -1; + warnx("%s: build_ata_cmd() failed, likely " + "programmer error", __func__); + goto bailout; + } break; } default: warnx("Unknown disk type %d", devtype); retval = -1; goto bailout; break; /*NOTREACHED*/ } ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; retval = cam_send_ccb(dev, ccb); if (retval != 0) { warn("error sending %s CCB", (devtype == CC_DT_SCSI) ? "Test Unit Ready" : "Identify"); retval = -1; goto bailout; } if (((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) && (vp->tur_status == FW_TUR_READY)) { warnx("Device is not ready"); if (printerrors) cam_error_print(dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) && (vp->tur_status == FW_TUR_NOT_READY)) { warnx("Device cannot have media loaded when firmware is " "downloaded"); retval = 1; goto bailout; } bailout: if (ccb != NULL) cam_freeccb(ccb); return (retval); } /* * Download firmware stored in buf to cam_dev. If simulation mode * is enabled, only show what packet sizes would be sent to the * device but do not sent any actual packets */ static int fw_download_img(struct cam_device *cam_dev, struct fw_vendor *vp, char *buf, int img_size, int sim_mode, int printerrors, int quiet, int retry_count, int timeout, const char *imgname, camcontrol_devtype devtype) { struct scsi_write_buffer cdb; progress_t progress; int size = 0; union ccb *ccb = NULL; int pkt_count = 0; int max_pkt_size; u_int32_t pkt_size = 0; char *pkt_ptr = buf; u_int32_t offset; int last_pkt = 0; int retval = 0; /* * Check to see whether the device is ready to accept a firmware * download. */ retval = fw_check_device_ready(cam_dev, devtype, vp, printerrors, timeout); if (retval != 0) goto bailout; if ((ccb = cam_getccb(cam_dev)) == NULL) { warnx("Could not allocate CCB"); retval = 1; goto bailout; } bzero(&(&ccb->ccb_h)[1], sizeof(union ccb) - sizeof(struct ccb_hdr)); max_pkt_size = vp->max_pkt_size; if (max_pkt_size == 0) max_pkt_size = UNKNOWN_MAX_PKT_SIZE; pkt_size = max_pkt_size; progress_init(&progress, imgname, size = img_size); /* Download single fw packets. */ do { if (img_size <= max_pkt_size) { last_pkt = 1; pkt_size = img_size; } progress_update(&progress, size - img_size); if (((sim_mode == 0) && (quiet == 0)) || ((sim_mode != 0) && (printerrors == 0))) progress_draw(&progress); bzero(&cdb, sizeof(cdb)); switch (devtype) { case CC_DT_SCSI: cdb.opcode = WRITE_BUFFER; cdb.control = 0; /* Parameter list length. */ scsi_ulto3b(pkt_size, &cdb.length[0]); offset = vp->inc_cdb_offset ? (pkt_ptr - buf) : 0; scsi_ulto3b(offset, &cdb.offset[0]); cdb.byte2 = last_pkt ? vp->cdb_byte2_last : vp->cdb_byte2; cdb.buffer_id = vp->inc_cdb_buffer_id ? pkt_count : 0; /* Zero out payload of ccb union after ccb header. */ bzero(&(&ccb->ccb_h)[1], sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); /* * Copy previously constructed cdb into ccb_scsiio * struct. */ bcopy(&cdb, &ccb->csio.cdb_io.cdb_bytes[0], sizeof(struct scsi_write_buffer)); /* Fill rest of ccb_scsiio struct. */ cam_fill_csio(&ccb->csio, /* ccb_scsiio*/ retry_count, /* retries*/ NULL, /* cbfcnp*/ CAM_DIR_OUT | CAM_DEV_QFRZDIS, /* flags*/ CAM_TAG_ACTION_NONE, /* tag_action*/ (u_char *)pkt_ptr, /* data_ptr*/ pkt_size, /* dxfer_len*/ SSD_FULL_SIZE, /* sense_len*/ sizeof(struct scsi_write_buffer), /* cdb_len*/ timeout ? timeout : WB_TIMEOUT); /* timeout*/ break; case CC_DT_ATA: case CC_DT_ATA_BEHIND_SCSI: { uint32_t off; off = (uint32_t)(pkt_ptr - buf); - build_ata_cmd(ccb, + retval = build_ata_cmd(ccb, /*retry_count*/ retry_count, /*flags*/ CAM_DIR_OUT | CAM_DEV_QFRZDIS, /*tag_action*/ CAM_TAG_ACTION_NONE, /*protocol*/ AP_PROTO_PIO_OUT, /*ata_flags*/ AP_FLAG_BYT_BLOK_BYTES | AP_FLAG_TLEN_SECT_CNT | AP_FLAG_TDIR_TO_DEV, /*features*/ USE_OFFSETS_FEATURE, /*sector_count*/ ATA_MAKE_SECTORS(pkt_size), /*lba*/ ATA_MAKE_LBA(off, pkt_size), /*command*/ ATA_DOWNLOAD_MICROCODE, + /*auxiliary*/ 0, /*data_ptr*/ (uint8_t *)pkt_ptr, /*dxfer_len*/ pkt_size, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ timeout ? timeout : WB_TIMEOUT, /*is48bit*/ 0, /*devtype*/ devtype); + + if (retval != 0) { + warnx("%s: build_ata_cmd() failed, likely " + "programmer error", __func__); + goto bailout; + } break; } default: warnx("Unknown device type %d", devtype); retval = 1; goto bailout; break; /*NOTREACHED*/ } if (!sim_mode) { /* Execute the command. */ if (cam_send_ccb(cam_dev, ccb) < 0 || (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { warnx("Error writing image to device"); if (printerrors) cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); retval = 1; goto bailout; } } else if (printerrors) { cam_error_print(cam_dev, ccb, CAM_ESF_COMMAND, 0, stdout); } /* Prepare next round. */ pkt_count++; pkt_ptr += pkt_size; img_size -= pkt_size; } while(!last_pkt); bailout: if (quiet == 0) progress_complete(&progress, size - img_size); if (ccb != NULL) cam_freeccb(ccb); return (retval); } int fwdownload(struct cam_device *device, int argc, char **argv, char *combinedopt, int printerrors, int retry_count, int timeout) { struct fw_vendor *vp; char *fw_img_path = NULL; struct ata_params *ident_buf = NULL; camcontrol_devtype devtype; char *buf = NULL; int img_size; int c; int sim_mode = 0; int confirmed = 0; int quiet = 0; int retval = 0; while ((c = getopt(argc, argv, combinedopt)) != -1) { switch (c) { case 'f': fw_img_path = optarg; break; case 'q': quiet = 1; break; case 's': sim_mode = 1; break; case 'y': confirmed = 1; break; default: break; } } if (fw_img_path == NULL) errx(1, "you must specify a firmware image file using -f " "option"); retval = get_device_type(device, retry_count, timeout, printerrors, &devtype); if (retval != 0) errx(1, "Unable to determine device type"); if ((devtype == CC_DT_ATA) || (devtype == CC_DT_ATA_BEHIND_SCSI)) { union ccb *ccb; ccb = cam_getccb(device); if (ccb == NULL) { warnx("couldn't allocate CCB"); retval = 1; goto bailout; } if (ata_do_identify(device, retry_count, timeout, ccb, &ident_buf) != 0) { cam_freeccb(ccb); retval = 1; goto bailout; } } else if (devtype != CC_DT_SCSI) errx(1, "Unsupported device type %d", devtype); vp = fw_get_vendor(device, ident_buf); /* * Bail out if we have an unknown vendor and this isn't an ATA * disk. For a SCSI disk, we have no chance of working properly * with the default values in the VENDOR_UNKNOWN case. For an ATA * disk connected via an ATA transport, we may work for drives that * support the ATA_DOWNLOAD_MICROCODE command. */ if (((vp == NULL) || (vp->type == VENDOR_UNKNOWN)) && (devtype == CC_DT_SCSI)) errx(1, "Unsupported device"); retval = fw_get_timeout(device, vp, retry_count, timeout); if (retval != 0) { warnx("Unable to get a firmware download timeout value"); goto bailout; } buf = fw_read_img(device, retry_count, timeout, quiet, fw_img_path, vp, &img_size); if (buf == NULL) { retval = 1; goto bailout; } if (!confirmed) { fprintf(stdout, "You are about to download firmware image (%s)" " into the following device:\n", fw_img_path); if (devtype == CC_DT_SCSI) { if (scsidoinquiry(device, argc, argv, combinedopt, 0, 5000) != 0) { warnx("Error sending inquiry"); retval = 1; goto bailout; } } else { printf("%s%d: ", device->device_name, device->dev_unit_num); ata_print_ident(ident_buf); camxferrate(device); free(ident_buf); } fprintf(stdout, "Using a timeout of %u ms, which is %s.\n", vp->timeout_ms, fw_timeout_desc_table[vp->timeout_type].timeout_desc); fprintf(stdout, "\nIt may damage your drive. "); if (!get_confirmation()) { retval = 1; goto bailout; } } if ((sim_mode != 0) && (quiet == 0)) fprintf(stdout, "Running in simulation mode\n"); if (fw_download_img(device, vp, buf, img_size, sim_mode, printerrors, quiet, retry_count, vp->timeout_ms, fw_img_path, devtype) != 0) { fprintf(stderr, "Firmware download failed\n"); retval = 1; goto bailout; } else if (quiet == 0) fprintf(stdout, "Firmware download successful\n"); bailout: free(buf); return (retval); } Index: head/sbin/camcontrol/zone.c =================================================================== --- head/sbin/camcontrol/zone.c (nonexistent) +++ head/sbin/camcontrol/zone.c (revision 300207) @@ -0,0 +1,676 @@ +/*- + * Copyright (c) 2015, 2016 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * Authors: Ken Merry (Spectra Logic Corporation) + */ +/* + * SCSI and ATA Shingled Media Recording (SMR) support for camcontrol(8). + * This is an implementation of the SCSI ZBC and ATA ZAC specs. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "camcontrol.h" + +static struct scsi_nv zone_cmd_map[] = { + { "rz", ZBC_IN_SA_REPORT_ZONES }, + { "reportzones", ZBC_IN_SA_REPORT_ZONES }, + { "close", ZBC_OUT_SA_CLOSE }, + { "finish", ZBC_OUT_SA_FINISH }, + { "open", ZBC_OUT_SA_OPEN }, + { "rwp", ZBC_OUT_SA_RWP } +}; + +static struct scsi_nv zone_rep_opts[] = { + { "all", ZBC_IN_REP_ALL_ZONES }, + { "empty", ZBC_IN_REP_EMPTY }, + { "imp_open", ZBC_IN_REP_IMP_OPEN }, + { "exp_open", ZBC_IN_REP_EXP_OPEN }, + { "closed", ZBC_IN_REP_CLOSED }, + { "full", ZBC_IN_REP_FULL }, + { "readonly", ZBC_IN_REP_READONLY }, + { "ro", ZBC_IN_REP_READONLY }, + { "offline", ZBC_IN_REP_OFFLINE }, + { "rwp", ZBC_IN_REP_RESET }, + { "reset", ZBC_IN_REP_RESET }, + { "nonseq", ZBC_IN_REP_NON_SEQ }, + { "nonwp", ZBC_IN_REP_NON_WP } +}; + +typedef enum { + ZONE_OF_NORMAL = 0x00, + ZONE_OF_SUMMARY = 0x01, + ZONE_OF_SCRIPT = 0x02 +} zone_output_flags; + +static struct scsi_nv zone_print_opts[] = { + { "normal", ZONE_OF_NORMAL }, + { "summary", ZONE_OF_SUMMARY }, + { "script", ZONE_OF_SCRIPT } +}; + +#define ZAC_ATA_SECTOR_COUNT(bcount) (((bcount) / 512) & 0xffff) + +typedef enum { + ZONE_PRINT_OK, + ZONE_PRINT_MORE_DATA, + ZONE_PRINT_ERROR +} zone_print_status; + +typedef enum { + ZONE_FW_START, + ZONE_FW_LEN, + ZONE_FW_WP, + ZONE_FW_TYPE, + ZONE_FW_COND, + ZONE_FW_SEQ, + ZONE_FW_RESET, + ZONE_NUM_FIELDS +} zone_field_widths; + +zone_print_status zone_rz_print(uint8_t *data_ptr, uint32_t valid_len, + int ata_format, zone_output_flags out_flags, + int first_pass, uint64_t *next_start_lba); + + +zone_print_status +zone_rz_print(uint8_t *data_ptr, uint32_t valid_len, int ata_format, + zone_output_flags out_flags, int first_pass, + uint64_t *next_start_lba) +{ + struct scsi_report_zones_hdr *hdr = NULL; + struct scsi_report_zones_desc *desc = NULL; + uint32_t hdr_len, len; + uint64_t max_lba, next_lba = 0; + int more_data = 0; + zone_print_status status = ZONE_PRINT_OK; + char tmpstr[80]; + int field_widths[ZONE_NUM_FIELDS]; + char word_sep; + + if (valid_len < sizeof(*hdr)) { + status = ZONE_PRINT_ERROR; + goto bailout; + } + + hdr = (struct scsi_report_zones_hdr *)data_ptr; + + field_widths[ZONE_FW_START] = 11; + field_widths[ZONE_FW_LEN] = 6; + field_widths[ZONE_FW_WP] = 11; + field_widths[ZONE_FW_TYPE] = 13; + field_widths[ZONE_FW_COND] = 13; + field_widths[ZONE_FW_SEQ] = 14; + field_widths[ZONE_FW_RESET] = 16; + + if (ata_format == 0) { + hdr_len = scsi_4btoul(hdr->length); + max_lba = scsi_8btou64(hdr->maximum_lba); + } else { + hdr_len = le32dec(hdr->length); + max_lba = le64dec(hdr->maximum_lba); + } + + if (hdr_len > (valid_len + sizeof(*hdr))) { + more_data = 1; + status = ZONE_PRINT_MORE_DATA; + } + + len = MIN(valid_len - sizeof(*hdr), hdr_len); + + if (out_flags == ZONE_OF_SCRIPT) + word_sep = '_'; + else + word_sep = ' '; + + if ((out_flags != ZONE_OF_SCRIPT) + && (first_pass != 0)) { + printf("%zu zones, Maximum LBA %#jx (%ju)\n", + hdr_len / sizeof(*desc), (uintmax_t)max_lba, + (uintmax_t)max_lba); + + switch (hdr->byte4 & SRZ_SAME_MASK) { + case SRZ_SAME_ALL_DIFFERENT: + printf("Zone lengths and types may vary\n"); + break; + case SRZ_SAME_ALL_SAME: + printf("Zone lengths and types are all the same\n"); + break; + case SRZ_SAME_LAST_DIFFERENT: + printf("Zone types are the same, last zone length " + "differs\n"); + break; + case SRZ_SAME_TYPES_DIFFERENT: + printf("Zone lengths are the same, types vary\n"); + break; + default: + printf("Unknown SAME field value %#x\n", + hdr->byte4 & SRZ_SAME_MASK); + break; + } + } + if (out_flags == ZONE_OF_SUMMARY) { + status = ZONE_PRINT_OK; + goto bailout; + } + + if ((out_flags == ZONE_OF_NORMAL) + && (first_pass != 0)) { + printf("%*s %*s %*s %*s %*s %*s %*s\n", + field_widths[ZONE_FW_START], "Start LBA", + field_widths[ZONE_FW_LEN], "Length", + field_widths[ZONE_FW_WP], "WP LBA", + field_widths[ZONE_FW_TYPE], "Zone Type", + field_widths[ZONE_FW_COND], "Condition", + field_widths[ZONE_FW_SEQ], "Sequential", + field_widths[ZONE_FW_RESET], "Reset"); + } + + for (desc = &hdr->desc_list[0]; len >= sizeof(*desc); + len -= sizeof(*desc), desc++) { + uint64_t length, start_lba, wp_lba; + + if (ata_format == 0) { + length = scsi_8btou64(desc->zone_length); + start_lba = scsi_8btou64(desc->zone_start_lba); + wp_lba = scsi_8btou64(desc->write_pointer_lba); + } else { + length = le64dec(desc->zone_length); + start_lba = le64dec(desc->zone_start_lba); + wp_lba = le64dec(desc->write_pointer_lba); + } + + printf("%#*jx, %*ju, %#*jx, ", field_widths[ZONE_FW_START], + (uintmax_t)start_lba, field_widths[ZONE_FW_LEN], + (uintmax_t)length, field_widths[ZONE_FW_WP], + (uintmax_t)wp_lba); + + switch (desc->zone_type & SRZ_TYPE_MASK) { + case SRZ_TYPE_CONVENTIONAL: + snprintf(tmpstr, sizeof(tmpstr), "Conventional"); + break; + case SRZ_TYPE_SEQ_PREFERRED: + case SRZ_TYPE_SEQ_REQUIRED: + snprintf(tmpstr, sizeof(tmpstr), "Seq%c%s", + word_sep, ((desc->zone_type & SRZ_TYPE_MASK) == + SRZ_TYPE_SEQ_PREFERRED) ? "Preferred" : + "Required"); + break; + default: + snprintf(tmpstr, sizeof(tmpstr), "Zone%ctype%c%#x", + word_sep, word_sep,desc->zone_type & + SRZ_TYPE_MASK); + break; + } + printf("%*s, ", field_widths[ZONE_FW_TYPE], tmpstr); + + switch (desc->zone_flags & SRZ_ZONE_COND_MASK) { + case SRZ_ZONE_COND_NWP: + snprintf(tmpstr, sizeof(tmpstr), "NWP"); + break; + case SRZ_ZONE_COND_EMPTY: + snprintf(tmpstr, sizeof(tmpstr), "Empty"); + break; + case SRZ_ZONE_COND_IMP_OPEN: + snprintf(tmpstr, sizeof(tmpstr), "Implicit%cOpen", + word_sep); + break; + case SRZ_ZONE_COND_EXP_OPEN: + snprintf(tmpstr, sizeof(tmpstr), "Explicit%cOpen", + word_sep); + break; + case SRZ_ZONE_COND_CLOSED: + snprintf(tmpstr, sizeof(tmpstr), "Closed"); + break; + case SRZ_ZONE_COND_READONLY: + snprintf(tmpstr, sizeof(tmpstr), "Readonly"); + break; + case SRZ_ZONE_COND_FULL: + snprintf(tmpstr, sizeof(tmpstr), "Full"); + break; + case SRZ_ZONE_COND_OFFLINE: + snprintf(tmpstr, sizeof(tmpstr), "Offline"); + break; + default: + snprintf(tmpstr, sizeof(tmpstr), "%#x", + desc->zone_flags & SRZ_ZONE_COND_MASK); + break; + } + + printf("%*s, ", field_widths[ZONE_FW_COND], tmpstr); + + if (desc->zone_flags & SRZ_ZONE_NON_SEQ) + snprintf(tmpstr, sizeof(tmpstr), "Non%cSequential", + word_sep); + else + snprintf(tmpstr, sizeof(tmpstr), "Sequential"); + + printf("%*s, ", field_widths[ZONE_FW_SEQ], tmpstr); + + if (desc->zone_flags & SRZ_ZONE_RESET) + snprintf(tmpstr, sizeof(tmpstr), "Reset%cNeeded", + word_sep); + else + snprintf(tmpstr, sizeof(tmpstr), "No%cReset%cNeeded", + word_sep, word_sep); + + printf("%*s\n", field_widths[ZONE_FW_RESET], tmpstr); + + next_lba = start_lba + length; + } +bailout: + *next_start_lba = next_lba; + + return (status); +} + +int +zone(struct cam_device *device, int argc, char **argv, char *combinedopt, + int retry_count, int timeout, int verbosemode __unused) +{ + union ccb *ccb = NULL; + int action = -1, rep_option = -1; + int all_zones = 0; + uint64_t lba = 0; + int error = 0; + uint8_t *data_ptr = NULL; + uint32_t alloc_len = 65536, valid_len = 0; + camcontrol_devtype devtype; + int ata_format = 0, use_ncq = 0; + int first_pass = 1; + zone_print_status zp_status; + zone_output_flags out_flags = ZONE_OF_NORMAL; + uint8_t *cdb_storage = NULL; + int cdb_storage_len = 32; + int c; + + ccb = cam_getccb(device); + if (ccb == NULL) { + warnx("%s: error allocating CCB", __func__); + error = 1; + goto bailout; + } + + bzero(&(&ccb->ccb_h)[1], + sizeof(union ccb) - sizeof(struct ccb_hdr)); + + while ((c = getopt(argc, argv, combinedopt)) != -1) { + switch (c) { + case 'a': + all_zones = 1; + break; + case 'c': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_cmd_map, + (sizeof(zone_cmd_map) / sizeof(zone_cmd_map[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + action = zone_cmd_map[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "zone command", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'l': { + char *endptr; + + lba = strtoull(optarg, &endptr, 0); + if (*endptr != '\0') { + warnx("%s: invalid lba argument %s", __func__, + optarg); + error = 1; + goto bailout; + } + break; + } + case 'N': + use_ncq = 1; + break; + case 'o': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_rep_opts, + (sizeof(zone_rep_opts) /sizeof(zone_rep_opts[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + rep_option = zone_rep_opts[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "report zones", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'P': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_print_opts, + (sizeof(zone_print_opts) / + sizeof(zone_print_opts[0])), optarg, &entry_num, + SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + out_flags = zone_print_opts[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "print", + optarg); + error = 1; + goto bailout; + } + break; + } + default: + break; + } + } + if (action == -1) { + warnx("%s: must specify -c ", __func__); + error = 1; + goto bailout; + } + error = get_device_type(device, retry_count, timeout, + /*printerrors*/ 1, &devtype); + if (error != 0) + errx(1, "Unable to determine device type"); + + if (action == ZBC_IN_SA_REPORT_ZONES) { + + data_ptr = malloc(alloc_len); + if (data_ptr == NULL) + err(1, "unable to allocate %u bytes", alloc_len); + +restart_report: + bzero(data_ptr, alloc_len); + + switch (devtype) { + case CC_DT_SCSI: + scsi_zbc_in(&ccb->csio, + /*retries*/ retry_count, + /*cbfcnp*/ NULL, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*service_action*/ action, + /*zone_start_lba*/ lba, + /*zone_options*/ (rep_option != -1) ? + rep_option : 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ alloc_len, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000); + break; + case CC_DT_ATA: + case CC_DT_ATA_BEHIND_SCSI: { + uint8_t command = 0; + uint8_t protocol = 0; + uint16_t features = 0, sector_count = 0; + uint32_t auxiliary = 0; + + /* + * XXX KDM support the partial bit? + */ + if (use_ncq == 0) { + command = ATA_ZAC_MANAGEMENT_IN; + features = action; + if (rep_option != -1) + features |= (rep_option << 8); + sector_count = ZAC_ATA_SECTOR_COUNT(alloc_len); + protocol = AP_PROTO_DMA; + } else { + cdb_storage = calloc(cdb_storage_len, 1); + if (cdb_storage == NULL) + err(1, "couldn't allocate memory"); + + command = ATA_RECV_FPDMA_QUEUED; + features = ZAC_ATA_SECTOR_COUNT(alloc_len); + sector_count = ATA_RFPDMA_ZAC_MGMT_IN << 8; + auxiliary = action & 0xf; + if (rep_option != -1) + auxiliary |= rep_option << 8; + protocol = AP_PROTO_FPDMA; + } + + error = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_IN | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ protocol, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TLEN_SECT_CNT | + AP_FLAG_TDIR_FROM_DEV, + /*features*/ features, + /*sector_count*/ sector_count, + /*lba*/ lba, + /*command*/ command, + /*auxiliary*/ auxiliary, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ ZAC_ATA_SECTOR_COUNT(alloc_len)*512, + /*cdb_storage*/ cdb_storage, + /*cdb_storage_len*/ cdb_storage_len, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 1, + /*devtype*/ devtype); + + if (error != 0) { + warnx("%s: build_ata_cmd() failed, likely " + "programmer error", __func__); + goto bailout; + } + + ata_format = 1; + + break; + } + default: + warnx("%s: Unknown device type %d", __func__,devtype); + error = 1; + goto bailout; + break; /*NOTREACHED*/ + } + } else { + /* + * XXX KDM the current methodology is to always send ATA + * commands to ATA devices. Need to figure out how to + * detect whether a SCSI to ATA translation layer will + * translate ZBC IN/OUT commands to the appropriate ZAC + * command. + */ + switch (devtype) { + case CC_DT_SCSI: + scsi_zbc_out(&ccb->csio, + /*retries*/ retry_count, + /*cbfcnp*/ NULL, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*service_action*/ action, + /*zone_id*/ lba, + /*zone_flags*/ (all_zones != 0) ? ZBC_OUT_ALL : 0, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000); + break; + case CC_DT_ATA: + case CC_DT_ATA_BEHIND_SCSI: { + uint8_t command = 0; + uint8_t protocol = 0; + uint16_t features = 0, sector_count = 0; + uint32_t auxiliary = 0; + + /* + * Note that we're taking advantage of the fact + * that the action numbers are the same between the + * ZBC and ZAC specs. + */ + + if (use_ncq == 0) { + protocol = AP_PROTO_NON_DATA; + command = ATA_ZAC_MANAGEMENT_OUT; + features = action & 0xf; + if (all_zones != 0) + features |= (ZBC_OUT_ALL << 8); + } else { + cdb_storage = calloc(cdb_storage_len, 1); + if (cdb_storage == NULL) + err(1, "couldn't allocate memory"); + + protocol = AP_PROTO_FPDMA; + command = ATA_NCQ_NON_DATA; + features = ATA_NCQ_ZAC_MGMT_OUT; + auxiliary = action & 0xf; + if (all_zones != 0) + auxiliary |= (ZBC_OUT_ALL << 8); + } + + + error = build_ata_cmd(ccb, + /*retry_count*/ retry_count, + /*flags*/ CAM_DIR_NONE | CAM_DEV_QFRZDIS, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*protocol*/ AP_PROTO_NON_DATA, + /*ata_flags*/ AP_FLAG_BYT_BLOK_BYTES | + AP_FLAG_TLEN_NO_DATA, + /*features*/ features, + /*sector_count*/ sector_count, + /*lba*/ lba, + /*command*/ command, + /*auxiliary*/ auxiliary, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*cdb_storage*/ cdb_storage, + /*cdb_storage_len*/ cdb_storage_len, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout ? timeout : 60000, + /*is48bit*/ 1, + /*devtype*/ devtype); + if (error != 0) { + warnx("%s: build_ata_cmd() failed, likely " + "programmer error", __func__); + goto bailout; + } + ata_format = 1; + break; + } + default: + warnx("%s: Unknown device type %d", __func__,devtype); + error = 1; + goto bailout; + break; /*NOTREACHED*/ + } + } + + ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; + if (retry_count > 0) + ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; + + error = cam_send_ccb(device, ccb); + if (error != 0) { + warn("error sending %s %s CCB", (devtype == CC_DT_SCSI) ? + "ZBC" : "ZAC Management", + (action == ZBC_IN_SA_REPORT_ZONES) ? "In" : "Out"); + error = -1; + goto bailout; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + cam_error_print(device, ccb, CAM_ESF_ALL, CAM_EPF_ALL,stderr); + error = 1; + goto bailout; + } + + /* + * If we aren't reading the list of zones, we're done. + */ + if (action != ZBC_IN_SA_REPORT_ZONES) + goto bailout; + + if (ccb->ccb_h.func_code == XPT_SCSI_IO) + valid_len = ccb->csio.dxfer_len - ccb->csio.resid; + else + valid_len = ccb->ataio.dxfer_len - ccb->ataio.resid; + + zp_status = zone_rz_print(data_ptr, valid_len, ata_format, out_flags, + first_pass, &lba); + + if (zp_status == ZONE_PRINT_MORE_DATA) { + bzero(ccb, sizeof(*ccb)); + first_pass = 0; + goto restart_report; + } else if (zp_status == ZONE_PRINT_ERROR) + error = 1; +bailout: + if (ccb != NULL) + cam_freeccb(ccb); + + free(data_ptr); + free(cdb_storage); + + return (error); +} Property changes on: head/sbin/camcontrol/zone.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/cam/ata/ata_all.c =================================================================== --- head/sys/cam/ata/ata_all.c (revision 300206) +++ head/sys/cam/ata/ata_all.c (revision 300207) @@ -1,895 +1,1120 @@ /*- * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #else #include #include #include #include #ifndef min #define min(a,b) (((a)<(b))?(a):(b)) #endif #endif #include #include #include #include #include #include #include #include int ata_version(int ver) { int bit; if (ver == 0xffff) return 0; for (bit = 15; bit >= 0; bit--) if (ver & (1<control & 0x04) return ("SOFT_RESET"); switch (cmd->command) { case 0x00: switch (cmd->features) { case 0x00: return ("NOP FLUSHQUEUE"); case 0x01: return ("NOP AUTOPOLL"); } return ("NOP"); case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR"); case 0x06: switch (cmd->features) { case 0x01: return ("DSM TRIM"); } return "DSM"; case 0x08: return ("DEVICE_RESET"); case 0x20: return ("READ"); case 0x24: return ("READ48"); case 0x25: return ("READ_DMA48"); case 0x26: return ("READ_DMA_QUEUED48"); case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); case 0x29: return ("READ_MUL48"); case 0x2a: return ("READ_STREAM_DMA48"); case 0x2b: return ("READ_STREAM48"); case 0x2f: return ("READ_LOG_EXT"); case 0x30: return ("WRITE"); case 0x34: return ("WRITE48"); case 0x35: return ("WRITE_DMA48"); case 0x36: return ("WRITE_DMA_QUEUED48"); case 0x37: return ("SET_MAX_ADDRESS48"); case 0x39: return ("WRITE_MUL48"); case 0x3a: return ("WRITE_STREAM_DMA48"); case 0x3b: return ("WRITE_STREAM48"); case 0x3d: return ("WRITE_DMA_FUA48"); case 0x3e: return ("WRITE_DMA_QUEUED_FUA48"); case 0x3f: return ("WRITE_LOG_EXT"); case 0x40: return ("READ_VERIFY"); case 0x42: return ("READ_VERIFY48"); + case 0x44: return ("ZERO_EXT"); case 0x45: switch (cmd->features) { case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO"); case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED"); } return "WRITE_UNCORRECTABLE48"; + case 0x47: return ("READ_LOG_DMA_EXT"); + case 0x4a: return ("ZAC_MANAGEMENT_IN"); case 0x51: return ("CONFIGURE_STREAM"); case 0x60: return ("READ_FPDMA_QUEUED"); case 0x61: return ("WRITE_FPDMA_QUEUED"); - case 0x63: return ("NCQ_NON_DATA"); - case 0x64: return ("SEND_FPDMA_QUEUED"); - case 0x65: return ("RECEIVE_FPDMA_QUEUED"); + case 0x63: + switch (cmd->features & 0xf) { + case 0x00: return ("NCQ_NON_DATA ABORT NCQ QUEUE"); + case 0x01: return ("NCQ_NON_DATA DEADLINE HANDLING"); + case 0x05: return ("NCQ_NON_DATA SET FEATURES"); + /* + * XXX KDM need common decoding between NCQ and non-NCQ + * versions of SET FEATURES. + */ + case 0x06: return ("NCQ_NON_DATA ZERO EXT"); + case 0x07: return ("NCQ_NON_DATA ZAC MANAGEMENT OUT"); + } + return ("NCQ_NON_DATA"); + case 0x64: + switch (cmd->sector_count_exp & 0xf) { + case 0x00: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT"); + case 0x02: return ("SEND_FPDMA_QUEUED WRITE LOG DMA EXT"); + case 0x03: return ("SEND_FPDMA_QUEUED ZAC MANAGEMENT OUT"); + case 0x04: return ("SEND_FPDMA_QUEUED DATA SET MANAGEMENT XL"); + } + return ("SEND_FPDMA_QUEUED"); + case 0x65: + switch (cmd->sector_count_exp & 0xf) { + case 0x01: return ("RECEIVE_FPDMA_QUEUED READ LOG DMA EXT"); + case 0x02: return ("RECEIVE_FPDMA_QUEUED ZAC MANAGEMENT IN"); + } + return ("RECEIVE_FPDMA_QUEUED"); case 0x67: if (cmd->features == 0xec) return ("SEP_ATTN IDENTIFY"); switch (cmd->lba_low) { case 0x00: return ("SEP_ATTN READ BUFFER"); case 0x02: return ("SEP_ATTN RECEIVE DIAGNOSTIC RESULTS"); case 0x80: return ("SEP_ATTN WRITE BUFFER"); case 0x82: return ("SEP_ATTN SEND DIAGNOSTIC"); } return ("SEP_ATTN"); case 0x70: return ("SEEK"); case 0x87: return ("CFA_TRANSLATE_SECTOR"); case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC"); case 0x92: return ("DOWNLOAD_MICROCODE"); + case 0x9a: return ("ZAC_MANAGEMENT_OUT"); case 0xa0: return ("PACKET"); case 0xa1: return ("ATAPI_IDENTIFY"); case 0xa2: return ("SERVICE"); case 0xb0: switch(cmd->features) { case 0xd0: return ("SMART READ ATTR VALUES"); case 0xd1: return ("SMART READ ATTR THRESHOLDS"); case 0xd3: return ("SMART SAVE ATTR VALUES"); case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE"); case 0xd5: return ("SMART READ LOG DATA"); case 0xd8: return ("SMART ENABLE OPERATION"); case 0xd9: return ("SMART DISABLE OPERATION"); case 0xda: return ("SMART RETURN STATUS"); } return ("SMART"); case 0xb1: return ("DEVICE CONFIGURATION"); case 0xc0: return ("CFA_ERASE"); case 0xc4: return ("READ_MUL"); case 0xc5: return ("WRITE_MUL"); case 0xc6: return ("SET_MULTI"); case 0xc7: return ("READ_DMA_QUEUED"); case 0xc8: return ("READ_DMA"); case 0xca: return ("WRITE_DMA"); case 0xcc: return ("WRITE_DMA_QUEUED"); case 0xcd: return ("CFA_WRITE_MULTIPLE_WITHOUT_ERASE"); case 0xce: return ("WRITE_MUL_FUA48"); case 0xd1: return ("CHECK_MEDIA_CARD_TYPE"); case 0xda: return ("GET_MEDIA_STATUS"); case 0xde: return ("MEDIA_LOCK"); case 0xdf: return ("MEDIA_UNLOCK"); case 0xe0: return ("STANDBY_IMMEDIATE"); case 0xe1: return ("IDLE_IMMEDIATE"); case 0xe2: return ("STANDBY"); case 0xe3: return ("IDLE"); case 0xe4: return ("READ_BUFFER/PM"); case 0xe5: return ("CHECK_POWER_MODE"); case 0xe6: return ("SLEEP"); case 0xe7: return ("FLUSHCACHE"); case 0xe8: return ("WRITE_PM"); case 0xea: return ("FLUSHCACHE48"); case 0xec: return ("ATA_IDENTIFY"); case 0xed: return ("MEDIA_EJECT"); case 0xef: + /* + * XXX KDM need common decoding between NCQ and non-NCQ + * versions of SET FEATURES. + */ switch (cmd->features) { - case 0x03: return ("SETFEATURES SET TRANSFER MODE"); - case 0x02: return ("SETFEATURES ENABLE WCACHE"); - case 0x82: return ("SETFEATURES DISABLE WCACHE"); - case 0x06: return ("SETFEATURES ENABLE PUIS"); - case 0x86: return ("SETFEATURES DISABLE PUIS"); - case 0x07: return ("SETFEATURES SPIN-UP"); - case 0x10: return ("SETFEATURES ENABLE SATA FEATURE"); - case 0x90: return ("SETFEATURES DISABLE SATA FEATURE"); - case 0xaa: return ("SETFEATURES ENABLE RCACHE"); - case 0x55: return ("SETFEATURES DISABLE RCACHE"); + case 0x02: return ("SETFEATURES ENABLE WCACHE"); + case 0x03: return ("SETFEATURES SET TRANSFER MODE"); + case 0x04: return ("SETFEATURES ENABLE APM"); + case 0x06: return ("SETFEATURES ENABLE PUIS"); + case 0x07: return ("SETFEATURES SPIN-UP"); + case 0x0b: return ("SETFEATURES ENABLE WRITE READ VERIFY"); + case 0x0c: return ("SETFEATURES ENABLE DEVICE LIFE CONTROL"); + case 0x10: return ("SETFEATURES ENABLE SATA FEATURE"); + case 0x41: return ("SETFEATURES ENABLE FREEFALL CONTROL"); + case 0x43: return ("SETFEATURES SET MAX HOST INT SECT TIMES"); + case 0x45: return ("SETFEATURES SET RATE BASIS"); + case 0x4a: return ("SETFEATURES EXTENDED POWER CONDITIONS"); + case 0x55: return ("SETFEATURES DISABLE RCACHE"); case 0x5d: return ("SETFEATURES ENABLE RELIRQ"); - case 0xdd: return ("SETFEATURES DISABLE RELIRQ"); case 0x5e: return ("SETFEATURES ENABLE SRVIRQ"); + case 0x62: return ("SETFEATURES LONG PHYS SECT ALIGN ERC"); + case 0x63: return ("SETFEATURES DSN"); + case 0x66: return ("SETFEATURES DISABLE DEFAULTS"); + case 0x82: return ("SETFEATURES DISABLE WCACHE"); + case 0x85: return ("SETFEATURES DISABLE APM"); + case 0x86: return ("SETFEATURES DISABLE PUIS"); + case 0x8b: return ("SETFEATURES DISABLE WRITE READ VERIFY"); + case 0x8c: return ("SETFEATURES DISABLE DEVICE LIFE CONTROL"); + case 0x90: return ("SETFEATURES DISABLE SATA FEATURE"); + case 0xaa: return ("SETFEATURES ENABLE RCACHE"); + case 0xC1: return ("SETFEATURES DISABLE FREEFALL CONTROL"); + case 0xC3: return ("SETFEATURES SENSE DATA REPORTING"); + case 0xC4: return ("SETFEATURES NCQ SENSE DATA RETURN"); + case 0xCC: return ("SETFEATURES ENABLE DEFAULTS"); + case 0xdd: return ("SETFEATURES DISABLE RELIRQ"); case 0xde: return ("SETFEATURES DISABLE SRVIRQ"); - } - return "SETFEATURES"; + } + return "SETFEATURES"; case 0xf1: return ("SECURITY_SET_PASSWORD"); case 0xf2: return ("SECURITY_UNLOCK"); case 0xf3: return ("SECURITY_ERASE_PREPARE"); case 0xf4: return ("SECURITY_ERASE_UNIT"); case 0xf5: return ("SECURITY_FREEZE_LOCK"); case 0xf6: return ("SECURITY_DISABLE_PASSWORD"); case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); case 0xf9: return ("SET_MAX_ADDRESS"); } return "UNKNOWN"; } char * ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, cmd_string, len, SBUF_FIXEDLEN); ata_cmd_sbuf(cmd, &sb); error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } void ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb) { sbuf_printf(sb, "%02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x %02x", cmd->command, cmd->features, cmd->lba_low, cmd->lba_mid, cmd->lba_high, cmd->device, cmd->lba_low_exp, cmd->lba_mid_exp, cmd->lba_high_exp, cmd->features_exp, cmd->sector_count, cmd->sector_count_exp); } char * ata_res_string(struct ata_res *res, char *res_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, res_string, len, SBUF_FIXEDLEN); ata_res_sbuf(res, &sb); error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } int ata_res_sbuf(struct ata_res *res, struct sbuf *sb) { sbuf_printf(sb, "%02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x %02x", res->status, res->error, res->lba_low, res->lba_mid, res->lba_high, res->device, res->lba_low_exp, res->lba_mid_exp, res->lba_high_exp, res->sector_count, res->sector_count_exp); return (0); } /* * ata_command_sbuf() returns 0 for success and -1 for failure. */ int ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb) { sbuf_printf(sb, "%s. ACB: ", ata_op_string(&ataio->cmd)); ata_cmd_sbuf(&ataio->cmd, sb); return(0); } /* * ata_status_abuf() returns 0 for success and -1 for failure. */ int ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb) { sbuf_printf(sb, "ATA status: %02x (%s%s%s%s%s%s%s%s)", ataio->res.status, (ataio->res.status & 0x80) ? "BSY " : "", (ataio->res.status & 0x40) ? "DRDY " : "", (ataio->res.status & 0x20) ? "DF " : "", (ataio->res.status & 0x10) ? "SERV " : "", (ataio->res.status & 0x08) ? "DRQ " : "", (ataio->res.status & 0x04) ? "CORR " : "", (ataio->res.status & 0x02) ? "IDX " : "", (ataio->res.status & 0x01) ? "ERR" : ""); if (ataio->res.status & 1) { sbuf_printf(sb, ", error: %02x (%s%s%s%s%s%s%s%s)", ataio->res.error, (ataio->res.error & 0x80) ? "ICRC " : "", (ataio->res.error & 0x40) ? "UNC " : "", (ataio->res.error & 0x20) ? "MC " : "", (ataio->res.error & 0x10) ? "IDNF " : "", (ataio->res.error & 0x08) ? "MCR " : "", (ataio->res.error & 0x04) ? "ABRT " : "", (ataio->res.error & 0x02) ? "NM " : "", (ataio->res.error & 0x01) ? "ILI" : ""); } return(0); } void ata_print_ident(struct ata_params *ident_data) { const char *proto; char product[48], revision[16], ata[12], sata[12]; cam_strvis(product, ident_data->model, sizeof(ident_data->model), sizeof(product)); cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision), sizeof(revision)); proto = (ident_data->config == ATA_PROTO_CFA) ? "CFA" : (ident_data->config & ATA_PROTO_ATAPI) ? "ATAPI" : "ATA"; if (ata_version(ident_data->version_major) == 0) { snprintf(ata, sizeof(ata), "%s", proto); } else if (ata_version(ident_data->version_major) <= 7) { snprintf(ata, sizeof(ata), "%s-%d", proto, ata_version(ident_data->version_major)); } else if (ata_version(ident_data->version_major) == 8) { snprintf(ata, sizeof(ata), "%s8-ACS", proto); } else { snprintf(ata, sizeof(ata), "ACS-%d %s", ata_version(ident_data->version_major) - 7, proto); } if (ident_data->satacapabilities && ident_data->satacapabilities != 0xffff) { if (ident_data->satacapabilities & ATA_SATA_GEN3) snprintf(sata, sizeof(sata), " SATA 3.x"); else if (ident_data->satacapabilities & ATA_SATA_GEN2) snprintf(sata, sizeof(sata), " SATA 2.x"); else if (ident_data->satacapabilities & ATA_SATA_GEN1) snprintf(sata, sizeof(sata), " SATA 1.x"); else snprintf(sata, sizeof(sata), " SATA"); } else sata[0] = 0; printf("<%s %s> %s%s device\n", product, revision, ata, sata); } void ata_print_ident_short(struct ata_params *ident_data) { char product[48], revision[16]; cam_strvis(product, ident_data->model, sizeof(ident_data->model), sizeof(product)); cam_strvis(revision, ident_data->revision, sizeof(ident_data->revision), sizeof(revision)); printf("<%s %s>", product, revision); } void semb_print_ident(struct sep_identify_data *ident_data) { char vendor[9], product[17], revision[5], fw[5], in[7], ins[5]; cam_strvis(vendor, ident_data->vendor_id, 8, sizeof(vendor)); cam_strvis(product, ident_data->product_id, 16, sizeof(product)); cam_strvis(revision, ident_data->product_rev, 4, sizeof(revision)); cam_strvis(fw, ident_data->firmware_rev, 4, sizeof(fw)); cam_strvis(in, ident_data->interface_id, 6, sizeof(in)); cam_strvis(ins, ident_data->interface_rev, 4, sizeof(ins)); printf("<%s %s %s %s> SEMB %s %s device\n", vendor, product, revision, fw, in, ins); } void semb_print_ident_short(struct sep_identify_data *ident_data) { char vendor[9], product[17], revision[5], fw[5]; cam_strvis(vendor, ident_data->vendor_id, 8, sizeof(vendor)); cam_strvis(product, ident_data->product_id, 16, sizeof(product)); cam_strvis(revision, ident_data->product_rev, 4, sizeof(revision)); cam_strvis(fw, ident_data->firmware_rev, 4, sizeof(fw)); printf("<%s %s %s %s>", vendor, product, revision, fw); } uint32_t ata_logical_sector_size(struct ata_params *ident_data) { if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE && (ident_data->pss & ATA_PSS_LSSABOVE512)) { return (((u_int32_t)ident_data->lss_1 | ((u_int32_t)ident_data->lss_2 << 16)) * 2); } return (512); } uint64_t ata_physical_sector_size(struct ata_params *ident_data) { if ((ident_data->pss & ATA_PSS_VALID_MASK) == ATA_PSS_VALID_VALUE) { if (ident_data->pss & ATA_PSS_MULTLS) { return ((uint64_t)ata_logical_sector_size(ident_data) * (1 << (ident_data->pss & ATA_PSS_LSPPS))); } else { return (uint64_t)ata_logical_sector_size(ident_data); } } return (512); } uint64_t ata_logical_sector_offset(struct ata_params *ident_data) { if ((ident_data->lsalign & 0xc000) == 0x4000) { return ((uint64_t)ata_logical_sector_size(ident_data) * (ident_data->lsalign & 0x3fff)); } return (0); } void ata_28bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features, uint32_t lba, uint8_t sector_count) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = 0; if (cmd == ATA_READ_DMA || cmd == ATA_READ_DMA_QUEUED || cmd == ATA_WRITE_DMA || cmd == ATA_WRITE_DMA_QUEUED) ataio->cmd.flags |= CAM_ATAIO_DMA; ataio->cmd.command = cmd; ataio->cmd.features = features; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA | ((lba >> 24) & 0x0f); ataio->cmd.sector_count = sector_count; } void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features, uint64_t lba, uint16_t sector_count) { ataio->cmd.flags = CAM_ATAIO_48BIT; if (cmd == ATA_READ_DMA48 || cmd == ATA_READ_DMA_QUEUED48 || cmd == ATA_READ_STREAM_DMA48 || cmd == ATA_WRITE_DMA48 || cmd == ATA_WRITE_DMA_FUA48 || cmd == ATA_WRITE_DMA_QUEUED48 || cmd == ATA_WRITE_DMA_QUEUED_FUA48 || cmd == ATA_WRITE_STREAM_DMA48 || - cmd == ATA_DATA_SET_MANAGEMENT) + cmd == ATA_DATA_SET_MANAGEMENT || + cmd == ATA_READ_LOG_DMA_EXT) ataio->cmd.flags |= CAM_ATAIO_DMA; ataio->cmd.command = cmd; ataio->cmd.features = features; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA; ataio->cmd.lba_low_exp = lba >> 24; ataio->cmd.lba_mid_exp = lba >> 32; ataio->cmd.lba_high_exp = lba >> 40; ataio->cmd.features_exp = features >> 8; ataio->cmd.sector_count = sector_count; ataio->cmd.sector_count_exp = sector_count >> 8; ataio->cmd.control = 0; } void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint64_t lba, uint16_t sector_count) { ataio->cmd.flags = CAM_ATAIO_48BIT | CAM_ATAIO_FPDMA; ataio->cmd.command = cmd; ataio->cmd.features = sector_count; ataio->cmd.lba_low = lba; ataio->cmd.lba_mid = lba >> 8; ataio->cmd.lba_high = lba >> 16; ataio->cmd.device = ATA_DEV_LBA; ataio->cmd.lba_low_exp = lba >> 24; ataio->cmd.lba_mid_exp = lba >> 32; ataio->cmd.lba_high_exp = lba >> 40; ataio->cmd.features_exp = sector_count >> 8; ataio->cmd.sector_count = 0; ataio->cmd.sector_count_exp = 0; ataio->cmd.control = 0; } void ata_reset_cmd(struct ccb_ataio *ataio) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT; ataio->cmd.control = 0x04; } void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_NEEDRESULT; ataio->cmd.command = ATA_READ_PM; ataio->cmd.features = reg; ataio->cmd.device = port & 0x0f; } void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val) { bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = 0; ataio->cmd.command = ATA_WRITE_PM; ataio->cmd.features = reg; ataio->cmd.sector_count = val; ataio->cmd.lba_low = val >> 8; ataio->cmd.lba_mid = val >> 16; ataio->cmd.lba_high = val >> 24; ataio->cmd.device = port & 0x0f; } void +ata_read_log(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint32_t log_address, uint32_t page_number, uint16_t block_count, + uint32_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, + uint32_t timeout) +{ + uint64_t lba; + + cam_fill_ataio(ataio, + /*retries*/ 1, + /*cbfcnp*/ cbfcnp, + /*flags*/ CAM_DIR_IN, + /*tag_action*/ 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ dxfer_len, + /*timeout*/ timeout); + + lba = (((uint64_t)page_number & 0xff00) << 32) | + ((page_number & 0x00ff) << 8) | + (log_address & 0xff); + + ata_48bit_cmd(ataio, + /*cmd*/ (protocol & CAM_ATAIO_DMA) ? ATA_READ_LOG_DMA_EXT : + ATA_READ_LOG_EXT, + /*features*/ 0, + /*lba*/ lba, + /*sector_count*/ block_count); +} + +void ata_bswap(int8_t *buf, int len) { u_int16_t *ptr = (u_int16_t*)(buf + len); while (--ptr >= (u_int16_t*)buf) *ptr = be16toh(*ptr); } void ata_btrim(int8_t *buf, int len) { int8_t *ptr; for (ptr = buf; ptr < buf+len; ++ptr) if (!*ptr || *ptr == '_') *ptr = ' '; for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) *ptr = 0; } void ata_bpack(int8_t *src, int8_t *dst, int len) { int i, j, blank; for (i = j = blank = 0 ; i < len; i++) { if (blank && src[i] == ' ') continue; if (blank && src[i] != ' ') { dst[j++] = src[i]; blank = 0; continue; } if (src[i] == ' ') { blank = 1; if (i == 0) continue; } dst[j++] = src[i]; } while (j < len) dst[j++] = 0x00; } int ata_max_pmode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_64_70) { if (ap->apiomodes & 0x02) return ATA_PIO4; if (ap->apiomodes & 0x01) return ATA_PIO3; } if (ap->mwdmamodes & 0x04) return ATA_PIO4; if (ap->mwdmamodes & 0x02) return ATA_PIO3; if (ap->mwdmamodes & 0x01) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) return ATA_PIO2; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) return ATA_PIO1; if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) return ATA_PIO0; return ATA_PIO0; } int ata_max_wmode(struct ata_params *ap) { if (ap->mwdmamodes & 0x04) return ATA_WDMA2; if (ap->mwdmamodes & 0x02) return ATA_WDMA1; if (ap->mwdmamodes & 0x01) return ATA_WDMA0; return -1; } int ata_max_umode(struct ata_params *ap) { if (ap->atavalid & ATA_FLAG_88) { if (ap->udmamodes & 0x40) return ATA_UDMA6; if (ap->udmamodes & 0x20) return ATA_UDMA5; if (ap->udmamodes & 0x10) return ATA_UDMA4; if (ap->udmamodes & 0x08) return ATA_UDMA3; if (ap->udmamodes & 0x04) return ATA_UDMA2; if (ap->udmamodes & 0x02) return ATA_UDMA1; if (ap->udmamodes & 0x01) return ATA_UDMA0; } return -1; } int ata_max_mode(struct ata_params *ap, int maxmode) { if (maxmode == 0) maxmode = ATA_DMA_MAX; if (maxmode >= ATA_UDMA0 && ata_max_umode(ap) > 0) return (min(maxmode, ata_max_umode(ap))); if (maxmode >= ATA_WDMA0 && ata_max_wmode(ap) > 0) return (min(maxmode, ata_max_wmode(ap))); return (min(maxmode, ata_max_pmode(ap))); } char * ata_mode2string(int mode) { switch (mode) { case -1: return "UNSUPPORTED"; case 0: return "NONE"; case ATA_PIO0: return "PIO0"; case ATA_PIO1: return "PIO1"; case ATA_PIO2: return "PIO2"; case ATA_PIO3: return "PIO3"; case ATA_PIO4: return "PIO4"; case ATA_WDMA0: return "WDMA0"; case ATA_WDMA1: return "WDMA1"; case ATA_WDMA2: return "WDMA2"; case ATA_UDMA0: return "UDMA0"; case ATA_UDMA1: return "UDMA1"; case ATA_UDMA2: return "UDMA2"; case ATA_UDMA3: return "UDMA3"; case ATA_UDMA4: return "UDMA4"; case ATA_UDMA5: return "UDMA5"; case ATA_UDMA6: return "UDMA6"; default: if (mode & ATA_DMA_MASK) return "BIOSDMA"; else return "BIOSPIO"; } } int ata_string2mode(char *str) { if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); return (-1); } u_int ata_mode2speed(int mode) { switch (mode) { case ATA_PIO0: default: return (3300); case ATA_PIO1: return (5200); case ATA_PIO2: return (8300); case ATA_PIO3: return (11100); case ATA_PIO4: return (16700); case ATA_WDMA0: return (4200); case ATA_WDMA1: return (13300); case ATA_WDMA2: return (16700); case ATA_UDMA0: return (16700); case ATA_UDMA1: return (25000); case ATA_UDMA2: return (33300); case ATA_UDMA3: return (44400); case ATA_UDMA4: return (66700); case ATA_UDMA5: return (100000); case ATA_UDMA6: return (133000); } } u_int ata_revision2speed(int revision) { switch (revision) { case 1: default: return (150000); case 2: return (300000); case 3: return (600000); } } int ata_speed2revision(u_int speed) { switch (speed) { case 0: return (0); case 150000: return (1); case 300000: return (2); case 600000: return (3); default: return (-1); } } int ata_identify_match(caddr_t identbuffer, caddr_t table_entry) { struct scsi_inquiry_pattern *entry; struct ata_params *ident; entry = (struct scsi_inquiry_pattern *)table_entry; ident = (struct ata_params *)identbuffer; if ((cam_strmatch(ident->model, entry->product, sizeof(ident->model)) == 0) && (cam_strmatch(ident->revision, entry->revision, sizeof(ident->revision)) == 0)) { return (0); } return (-1); } int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry) { struct scsi_static_inquiry_pattern *entry; struct ata_params *ident; entry = (struct scsi_static_inquiry_pattern *)table_entry; ident = (struct ata_params *)identbuffer; if ((cam_strmatch(ident->model, entry->product, sizeof(ident->model)) == 0) && (cam_strmatch(ident->revision, entry->revision, sizeof(ident->revision)) == 0)) { return (0); } return (-1); } void semb_receive_diagnostic_results(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, pcv ? page_code : 0, 0x02, length / 4); } void semb_send_diagnostic(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, length > 0 ? data_ptr[0] : 0, 0x82, length / 4); } void semb_read_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, uint8_t page_code, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, page_code, 0x00, length / 4); } void semb_write_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t length, uint32_t timeout) { length = min(length, 1020); length = (length + 3) & ~3; cam_fill_ataio(ataio, retries, cbfcnp, /*flags*/length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, length, timeout); ata_28bit_cmd(ataio, ATA_SEP_ATTN, length > 0 ? data_ptr[0] : 0, 0x80, length / 4); } + +void +ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + int use_ncq, uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr, + uint32_t dxfer_len, uint32_t timeout) +{ + uint8_t command_out, ata_flags; + uint16_t features_out, sectors_out; + uint32_t auxiliary; + + if (use_ncq == 0) { + command_out = ATA_ZAC_MANAGEMENT_OUT; + features_out = (zm_action & 0xf) | (zone_flags << 8); + if (dxfer_len == 0) { + ata_flags = 0; + sectors_out = 0; + } else { + ata_flags = CAM_ATAIO_DMA; + /* XXX KDM use sector count? */ + sectors_out = ((dxfer_len >> 9) & 0xffff); + } + auxiliary = 0; + } else { + if (dxfer_len == 0) { + command_out = ATA_NCQ_NON_DATA; + features_out = ATA_NCQ_ZAC_MGMT_OUT; + sectors_out = 0; + } else { + command_out = ATA_SEND_FPDMA_QUEUED; + + /* Note that we're defaulting to normal priority */ + sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; + + /* + * For SEND FPDMA QUEUED, the transfer length is + * encoded in the FEATURE register, and 0 means + * that 65536 512 byte blocks are to be tranferred. + * In practice, it seems unlikely that we'll see + * a transfer that large. + */ + if (dxfer_len == (65536 * 512)) { + features_out = 0; + } else { + /* + * Yes, the caller can theoretically send a + * transfer larger than we can handle. + * Anyone using this function needs enough + * knowledge to avoid doing that. + */ + features_out = ((dxfer_len >> 9) & 0xffff); + } + } + auxiliary = (zm_action & 0xf) | (zone_flags << 8); + + ata_flags = CAM_ATAIO_FPDMA; + } + + cam_fill_ataio(ataio, + /*retries*/ retries, + /*cbfcnp*/ cbfcnp, + /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, + /*tag_action*/ 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ dxfer_len, + /*timeout*/ timeout); + + ata_48bit_cmd(ataio, + /*cmd*/ command_out, + /*features*/ features_out, + /*lba*/ zone_id, + /*sector_count*/ sectors_out); + + ataio->cmd.flags |= ata_flags; + if (auxiliary != 0) { + ataio->ata_flags |= ATA_FLAG_AUX; + ataio->aux = auxiliary; + } +} + +void +ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + int use_ncq, uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, + uint32_t timeout) +{ + uint8_t command_out, ata_flags; + uint16_t features_out, sectors_out; + uint32_t auxiliary; + + if (use_ncq == 0) { + command_out = ATA_ZAC_MANAGEMENT_IN; + /* XXX KDM put a macro here */ + features_out = (zm_action & 0xf) | (zone_flags << 8); + ata_flags = CAM_ATAIO_DMA; + sectors_out = ((dxfer_len >> 9) & 0xffff); + auxiliary = 0; + } else { + command_out = ATA_RECV_FPDMA_QUEUED; + sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; + auxiliary = (zm_action & 0xf) | (zone_flags << 8), + ata_flags = CAM_ATAIO_FPDMA; + /* + * For RECEIVE FPDMA QUEUED, the transfer length is + * encoded in the FEATURE register, and 0 means + * that 65536 512 byte blocks are to be tranferred. + * In practice, it is unlikely we will see a transfer that + * large. + */ + if (dxfer_len == (65536 * 512)) { + features_out = 0; + } else { + /* + * Yes, the caller can theoretically request a + * transfer larger than we can handle. + * Anyone using this function needs enough + * knowledge to avoid doing that. + */ + features_out = ((dxfer_len >> 9) & 0xffff); + } + } + + cam_fill_ataio(ataio, + /*retries*/ retries, + /*cbfcnp*/ cbfcnp, + /*flags*/ CAM_DIR_IN, + /*tag_action*/ 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ dxfer_len, + /*timeout*/ timeout); + + ata_48bit_cmd(ataio, + /*cmd*/ command_out, + /*features*/ features_out, + /*lba*/ zone_id, + /*sector_count*/ sectors_out); + + ataio->cmd.flags |= ata_flags; + if (auxiliary != 0) { + ataio->ata_flags |= ATA_FLAG_AUX; + ataio->aux = auxiliary; + } +} Index: head/sys/cam/ata/ata_all.h =================================================================== --- head/sys/cam/ata/ata_all.h (revision 300206) +++ head/sys/cam/ata/ata_all.h (revision 300207) @@ -1,170 +1,187 @@ /*- * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef CAM_ATA_ALL_H #define CAM_ATA_ALL_H 1 #include struct ccb_ataio; struct cam_periph; union ccb; #define SID_DMA48 0x01 /* Abuse inq_flags bit to track enabled DMA48. */ #define SID_AEN 0x04 /* Abuse inq_flags bit to track enabled AEN. */ #define SID_DMA 0x10 /* Abuse inq_flags bit to track enabled DMA. */ struct ata_cmd { u_int8_t flags; /* ATA command flags */ #define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */ #define CAM_ATAIO_FPDMA 0x02 /* FPDMA command */ #define CAM_ATAIO_CONTROL 0x04 /* Control, not a command */ #define CAM_ATAIO_NEEDRESULT 0x08 /* Request requires result. */ #define CAM_ATAIO_DMA 0x10 /* DMA command */ u_int8_t command; u_int8_t features; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t lba_low_exp; u_int8_t lba_mid_exp; u_int8_t lba_high_exp; u_int8_t features_exp; u_int8_t sector_count; u_int8_t sector_count_exp; u_int8_t control; }; struct ata_res { u_int8_t flags; /* ATA command flags */ #define CAM_ATAIO_48BIT 0x01 /* Command has 48-bit format */ u_int8_t status; u_int8_t error; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t lba_low_exp; u_int8_t lba_mid_exp; u_int8_t lba_high_exp; u_int8_t sector_count; u_int8_t sector_count_exp; }; struct sep_identify_data { uint8_t length; /* Enclosure descriptor length */ uint8_t subenc_id; /* Sub-enclosure identifier */ uint8_t logical_id[8]; /* Enclosure logical identifier (WWN) */ uint8_t vendor_id[8]; /* Vendor identification string */ uint8_t product_id[16]; /* Product identification string */ uint8_t product_rev[4]; /* Product revision string */ uint8_t channel_id; /* Channel identifier */ uint8_t firmware_rev[4];/* Firmware revision */ uint8_t interface_id[6];/* Interface spec ("S-E-S "/"SAF-TE")*/ uint8_t interface_rev[4];/* Interface spec revision */ uint8_t vend_spec[11]; /* Vendor specific information */ }; int ata_version(int ver); char * ata_op_string(struct ata_cmd *cmd); char * ata_cmd_string(struct ata_cmd *cmd, char *cmd_string, size_t len); void ata_cmd_sbuf(struct ata_cmd *cmd, struct sbuf *sb); char * ata_res_string(struct ata_res *res, char *res_string, size_t len); int ata_command_sbuf(struct ccb_ataio *ataio, struct sbuf *sb); int ata_status_sbuf(struct ccb_ataio *ataio, struct sbuf *sb); int ata_res_sbuf(struct ata_res *res, struct sbuf *sb); void ata_print_ident(struct ata_params *ident_data); void ata_print_ident_short(struct ata_params *ident_data); uint32_t ata_logical_sector_size(struct ata_params *ident_data); uint64_t ata_physical_sector_size(struct ata_params *ident_data); uint64_t ata_logical_sector_offset(struct ata_params *ident_data); void ata_28bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint8_t features, uint32_t lba, uint8_t sector_count); void ata_48bit_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint16_t features, uint64_t lba, uint16_t sector_count); void ata_ncq_cmd(struct ccb_ataio *ataio, uint8_t cmd, uint64_t lba, uint16_t sector_count); void ata_reset_cmd(struct ccb_ataio *ataio); void ata_pm_read_cmd(struct ccb_ataio *ataio, int reg, int port); void ata_pm_write_cmd(struct ccb_ataio *ataio, int reg, int port, uint32_t val); +void ata_read_log(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint32_t log_address, uint32_t page_number, + uint16_t block_count, uint32_t protocol, + uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout); void ata_bswap(int8_t *buf, int len); void ata_btrim(int8_t *buf, int len); void ata_bpack(int8_t *src, int8_t *dst, int len); int ata_max_pmode(struct ata_params *ap); int ata_max_wmode(struct ata_params *ap); int ata_max_umode(struct ata_params *ap); int ata_max_mode(struct ata_params *ap, int maxmode); char * ata_mode2string(int mode); int ata_string2mode(char *str); u_int ata_mode2speed(int mode); u_int ata_revision2speed(int revision); int ata_speed2revision(u_int speed); int ata_identify_match(caddr_t identbuffer, caddr_t table_entry); int ata_static_identify_match(caddr_t identbuffer, caddr_t table_entry); void semb_print_ident(struct sep_identify_data *ident_data); void semb_print_ident_short(struct sep_identify_data *ident_data); void semb_receive_diagnostic_results(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint32_t timeout); void semb_send_diagnostic(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length, uint32_t timeout); void semb_read_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint32_t timeout); void semb_write_buffer(struct ccb_ataio *ataio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t *data_ptr, uint16_t param_list_length, + uint32_t timeout); + +void ata_zac_mgmt_out(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + int use_ncq __unused, uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint16_t sector_count, uint8_t *data_ptr, + uint32_t dxfer_len, uint32_t timeout); + +void ata_zac_mgmt_in(struct ccb_ataio *ataio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + int use_ncq __unused, uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, uint32_t timeout); #endif Index: head/sys/cam/ata/ata_da.c =================================================================== --- head/sys/cam/ata/ata_da.c (revision 300206) +++ head/sys/cam/ata/ata_da.c (revision 300207) @@ -1,2428 +1,3478 @@ /*- * Copyright (c) 2009 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ada.h" #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include +#include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include +#include +#include #include #include #include #include /* geometry translation */ #ifdef _KERNEL #define ATA_MAX_28BIT_LBA 268435455UL extern int iosched_debug; typedef enum { ADA_STATE_RAHEAD, ADA_STATE_WCACHE, + ADA_STATE_LOGDIR, + ADA_STATE_IDDIR, + ADA_STATE_SUP_CAP, + ADA_STATE_ZONE, ADA_STATE_NORMAL } ada_state; typedef enum { - ADA_FLAG_CAN_48BIT = 0x0002, - ADA_FLAG_CAN_FLUSHCACHE = 0x0004, - ADA_FLAG_CAN_NCQ = 0x0008, - ADA_FLAG_CAN_DMA = 0x0010, - ADA_FLAG_NEED_OTAG = 0x0020, - ADA_FLAG_WAS_OTAG = 0x0040, - ADA_FLAG_CAN_TRIM = 0x0080, - ADA_FLAG_OPEN = 0x0100, - ADA_FLAG_SCTX_INIT = 0x0200, - ADA_FLAG_CAN_CFA = 0x0400, - ADA_FLAG_CAN_POWERMGT = 0x0800, - ADA_FLAG_CAN_DMA48 = 0x1000, - ADA_FLAG_DIRTY = 0x2000, - ADA_FLAG_CAN_NCQ_TRIM = 0x4000, /* CAN_TRIM also set */ - ADA_FLAG_PIM_CAN_NCQ_TRIM = 0x8000 + ADA_FLAG_CAN_48BIT = 0x00000002, + ADA_FLAG_CAN_FLUSHCACHE = 0x00000004, + ADA_FLAG_CAN_NCQ = 0x00000008, + ADA_FLAG_CAN_DMA = 0x00000010, + ADA_FLAG_NEED_OTAG = 0x00000020, + ADA_FLAG_WAS_OTAG = 0x00000040, + ADA_FLAG_CAN_TRIM = 0x00000080, + ADA_FLAG_OPEN = 0x00000100, + ADA_FLAG_SCTX_INIT = 0x00000200, + ADA_FLAG_CAN_CFA = 0x00000400, + ADA_FLAG_CAN_POWERMGT = 0x00000800, + ADA_FLAG_CAN_DMA48 = 0x00001000, + ADA_FLAG_CAN_LOG = 0x00002000, + ADA_FLAG_CAN_IDLOG = 0x00004000, + ADA_FLAG_CAN_SUPCAP = 0x00008000, + ADA_FLAG_CAN_ZONE = 0x00010000, + ADA_FLAG_CAN_WCACHE = 0x00020000, + ADA_FLAG_CAN_RAHEAD = 0x00040000, + ADA_FLAG_PROBED = 0x00080000, + ADA_FLAG_ANNOUNCED = 0x00100000, + ADA_FLAG_DIRTY = 0x00200000, + ADA_FLAG_CAN_NCQ_TRIM = 0x00400000, /* CAN_TRIM also set */ + ADA_FLAG_PIM_ATA_EXT = 0x00800000 } ada_flags; typedef enum { ADA_Q_NONE = 0x00, ADA_Q_4K = 0x01, ADA_Q_NCQ_TRIM_BROKEN = 0x02, } ada_quirks; #define ADA_Q_BIT_STRING \ "\020" \ "\0014K" \ "\002NCQ_TRIM_BROKEN" typedef enum { ADA_CCB_RAHEAD = 0x01, ADA_CCB_WCACHE = 0x02, ADA_CCB_BUFFER_IO = 0x03, ADA_CCB_DUMP = 0x05, ADA_CCB_TRIM = 0x06, + ADA_CCB_LOGDIR = 0x07, + ADA_CCB_IDDIR = 0x08, + ADA_CCB_SUP_CAP = 0x09, + ADA_CCB_ZONE = 0x0a, ADA_CCB_TYPE_MASK = 0x0F, } ada_ccb_state; +typedef enum { + ADA_ZONE_NONE = 0x00, + ADA_ZONE_DRIVE_MANAGED = 0x01, + ADA_ZONE_HOST_AWARE = 0x02, + ADA_ZONE_HOST_MANAGED = 0x03 +} ada_zone_mode; + +typedef enum { + ADA_ZONE_FLAG_RZ_SUP = 0x0001, + ADA_ZONE_FLAG_OPEN_SUP = 0x0002, + ADA_ZONE_FLAG_CLOSE_SUP = 0x0004, + ADA_ZONE_FLAG_FINISH_SUP = 0x0008, + ADA_ZONE_FLAG_RWP_SUP = 0x0010, + ADA_ZONE_FLAG_SUP_MASK = (ADA_ZONE_FLAG_RZ_SUP | + ADA_ZONE_FLAG_OPEN_SUP | + ADA_ZONE_FLAG_CLOSE_SUP | + ADA_ZONE_FLAG_FINISH_SUP | + ADA_ZONE_FLAG_RWP_SUP), + ADA_ZONE_FLAG_URSWRZ = 0x0020, + ADA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, + ADA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, + ADA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, + ADA_ZONE_FLAG_SET_MASK = (ADA_ZONE_FLAG_OPT_SEQ_SET | + ADA_ZONE_FLAG_OPT_NONSEQ_SET | + ADA_ZONE_FLAG_MAX_SEQ_SET) +} ada_zone_flags; + +static struct ada_zone_desc { + ada_zone_flags value; + const char *desc; +} ada_zone_desc_table[] = { + {ADA_ZONE_FLAG_RZ_SUP, "Report Zones" }, + {ADA_ZONE_FLAG_OPEN_SUP, "Open" }, + {ADA_ZONE_FLAG_CLOSE_SUP, "Close" }, + {ADA_ZONE_FLAG_FINISH_SUP, "Finish" }, + {ADA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, +}; + + /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 typedef enum { ADA_DELETE_NONE, ADA_DELETE_DISABLE, ADA_DELETE_CFA_ERASE, ADA_DELETE_DSM_TRIM, ADA_DELETE_NCQ_DSM_TRIM, ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE, ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM, } ada_delete_methods; static const char *ada_delete_method_names[] = { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" }; #if 0 static const char *ada_delete_method_desc[] = { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" }; #endif struct disk_params { u_int8_t heads; u_int8_t secs_per_track; u_int32_t cylinders; u_int32_t secsize; /* Number of bytes/logical sector */ u_int64_t sectors; /* Total number sectors */ }; #define TRIM_MAX_BLOCKS 8 #define TRIM_MAX_RANGES (TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES) struct trim_request { uint8_t data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE]; TAILQ_HEAD(, bio) bps; }; struct ada_softc { struct cam_iosched_softc *cam_iosched; int outstanding_cmds; /* Number of active commands */ int refcount; /* Active xpt_action() calls */ ada_state state; ada_flags flags; + ada_zone_mode zone_mode; + ada_zone_flags zone_flags; + struct ata_gp_log_dir ata_logdir; + int valid_logdir_len; + struct ata_identify_log_pages ata_iddir; + int valid_iddir_len; + uint64_t optimal_seq_zones; + uint64_t optimal_nonseq_zones; + uint64_t max_seq_zones; ada_quirks quirks; ada_delete_methods delete_method; int trim_max_ranges; int read_ahead; int write_cache; int unmappedio; int rotating; #ifdef ADA_TEST_FAILURE int force_read_error; int force_write_error; int periodic_read_error; int periodic_read_count; #endif struct disk_params params; struct disk *disk; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; struct trim_request trim_req; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int timeouts; u_int errors; u_int invalidations; #endif }; struct ada_quirk_entry { struct scsi_inquiry_pattern inq_pat; ada_quirks quirks; }; static struct ada_quirk_entry ada_quirk_table[] = { { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Barracuda Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" }, /*quirks*/ADA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green/Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Red Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????EX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" }, /*quirks*/ADA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" }, /*quirks*/ADA_Q_4K }, /* SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Crucial M500 SSDs MU07 firmware * NCQ Trim works */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" }, /*quirks*/0 }, { /* * Crucial M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial M550 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial MX100 SSDs * NCQ Trim doesn't work, but only on MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/ADA_Q_4K }, { /* * FCCT M500 SSDs * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Micron M500 SSDs firmware MU07 * NCQ Trim works? */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" }, /*quirks*/0 }, { /* * Micron M500 SSDs all other firmware * NCQ Trim doesn't work */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * Micron M5[15]0 SSDs * NCQ Trim doesn't work, but only MU01 firmware */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" }, /*quirks*/ADA_Q_NCQ_TRIM_BROKEN }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" }, /*quirks*/ADA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 840 SSDs * 4k optimised, NCQ TRIM Broken (normal TRIM is fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung 850 SSDs * 4k optimised, NCQ TRIM broken (normal TRIM fine) */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised, NCQ believed to be working */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" }, /*quirks*/ADA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * 4k optimised, NCQ believed to be broken since these are * appear to be built with the same controllers as the 840/850. */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * Samsung PM851 Series SSDs Dell OEM * device model "SAMSUNG SSD PM851 mSATA 256GB" * 4k optimised, NCQ broken */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" }, /*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" }, /*quirks*/ADA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" }, /*quirks*/ADA_Q_4K }, { /* Default */ { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, /*vendor*/"*", /*product*/"*", /*revision*/"*" }, /*quirks*/0 }, }; static disk_strategy_t adastrategy; static dumper_t adadump; static periph_init_t adainit; +static void adadiskgonecb(struct disk *dp); +static periph_oninv_t adaoninvalidate; +static periph_dtor_t adacleanup; static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); +static int adazonemodesysctl(SYSCTL_HANDLER_ARGS); +static int adazonesupsysctl(SYSCTL_HANDLER_ARGS); static void adasysctlinit(void *context, int pending); +static int adagetattr(struct bio *bp); +static void adasetflags(struct ada_softc *softc, + struct ccb_getdev *cgd); static periph_ctor_t adaregister; -static periph_dtor_t adacleanup; +static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, + struct ccb_ataio *ataio); +static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, + struct ccb_ataio *ataio); +static int ada_zone_bio_to_ata(int disk_zone_cmd); +static int ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, + struct bio *bp, int *queue_ccb); static periph_start_t adastart; -static periph_oninv_t adaoninvalidate; +static void adaprobedone(struct cam_periph *periph, union ccb *ccb); +static void adazonedone(struct cam_periph *periph, union ccb *ccb); static void adadone(struct cam_periph *periph, union ccb *done_ccb); static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd); static timeout_t adasendorderedtag; static void adashutdown(void *arg, int howto); static void adasuspend(void *arg); static void adaresume(void *arg); #ifndef ADA_DEFAULT_LEGACY_ALIASES #define ADA_DEFAULT_LEGACY_ALIASES 1 #endif #ifndef ADA_DEFAULT_TIMEOUT #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ #endif #ifndef ADA_DEFAULT_RETRY #define ADA_DEFAULT_RETRY 4 #endif #ifndef ADA_DEFAULT_SEND_ORDERED #define ADA_DEFAULT_SEND_ORDERED 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SHUTDOWN #define ADA_DEFAULT_SPINDOWN_SHUTDOWN 1 #endif #ifndef ADA_DEFAULT_SPINDOWN_SUSPEND #define ADA_DEFAULT_SPINDOWN_SUSPEND 1 #endif #ifndef ADA_DEFAULT_READ_AHEAD #define ADA_DEFAULT_READ_AHEAD 1 #endif #ifndef ADA_DEFAULT_WRITE_CACHE #define ADA_DEFAULT_WRITE_CACHE 1 #endif #define ADA_RA (softc->read_ahead >= 0 ? \ softc->read_ahead : ada_read_ahead) #define ADA_WC (softc->write_cache >= 0 ? \ softc->write_cache : ada_write_cache) /* * Most platforms map firmware geometry to actual, but some don't. If * not overridden, default to nothing. */ #ifndef ata_disk_firmware_geom_adjust #define ata_disk_firmware_geom_adjust(disk) #endif static int ada_retry_count = ADA_DEFAULT_RETRY; static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN; static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND; static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD; static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE; static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN, &ada_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &ada_send_ordered, 0, "Send Ordered Tags"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN, &ada_spindown_shutdown, 0, "Spin down upon shutdown"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN, &ada_spindown_suspend, 0, "Spin down upon suspend"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN, &ada_read_ahead, 0, "Enable disk read-ahead"); SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN, &ada_write_cache, 0, "Enable disk write cache"); /* * ADA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef ADA_ORDEREDTAG_INTERVAL #define ADA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver adadriver = { adainit, "ada", TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 }; static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS); PERIPHDRIVER_DECLARE(ada, adadriver); +static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); + static int adaopen(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return(ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaopen\n")); softc = (struct ada_softc *)periph->softc; softc->flags |= ADA_FLAG_OPEN; cam_periph_unhold(periph); cam_periph_unlock(periph); return (0); } static int adaclose(struct disk *dp) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("adaclose\n")); /* We only sync the cache if the drive is capable of it. */ if ((softc->flags & ADA_FLAG_DIRTY) != 0 && (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 && (periph->flags & CAM_PERIPH_INVALID) == 0 && cam_periph_hold(periph, PRIBIO) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/0, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); else softc->flags &= ~ADA_FLAG_DIRTY; xpt_release_ccb(ccb); cam_periph_unhold(periph); } softc->flags &= ~ADA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void adaschedule(struct cam_periph *periph) { struct ada_softc *softc = (struct ada_softc *)periph->softc; if (softc->state != ADA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void adastrategy(struct bio *bp) { struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp)); /* * If the device has been made invalid, error out */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } + + /* + * Zone commands must be ordered, because they can depend on the + * effects of previously issued commands, and they may affect + * commands after them. + */ + if (bp->bio_cmd == BIO_ZONE) + bp->bio_flags |= BIO_ORDERED; /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ adaschedule(periph); cam_periph_unlock(periph); return; } static int adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct ada_softc *softc; u_int secsize; union ccb ccb; struct disk *dp; uint64_t lba; uint16_t count; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct ada_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; lba = offset / secsize; count = length / secsize; if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } if (length > 0) { xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ccb.ataio, 0, adadone, CAM_DIR_OUT, 0, (u_int8_t *) virtual, length, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count >= 256)) { ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 0, lba, count); } else { ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 0, lba, count); } xpt_polled_action(&ccb); error = cam_periph_error(&ccb, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); /* * Tell the drive to flush its internal cache. if we * can't flush in 5s we have big problems. No need to * wait the default 60s to detect problems. */ ccb.ccb_h.ccb_state = ADA_CCB_DUMP; cam_fill_ataio(&ccb.ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, 5*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); xpt_polled_action(&ccb); error = cam_periph_error(&ccb, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static void adainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("ada: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (ada_send_ordered) { /* Register our event handlers */ if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(power_resume, adaresume, NULL, EVENTHANDLER_PRI_LAST)) == NULL) printf("adainit: power event registration failed!\n"); if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("adainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void adadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void adaoninvalidate(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, adaasync, periph, periph->path); #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); disk_gone(softc->disk); } static void adacleanup(struct cam_periph *periph) { struct ada_softc *softc; softc = (struct ada_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void adasetdeletemethod(struct ada_softc *softc) { if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM; else if (softc->flags & ADA_FLAG_CAN_TRIM) softc->delete_method = ADA_DELETE_DSM_TRIM; else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) softc->delete_method = ADA_DELETE_CFA_ERASE; else softc->delete_method = ADA_DELETE_NONE; } static void adaasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct ccb_getdev cgd; struct cam_periph *periph; struct ada_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_ATA) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(adaregister, adaoninvalidate, adacleanup, adastart, "ada", CAM_PERIPH_BIO, path, adaasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("adaasync: Unable to attach to new device " "due to status 0x%x\n", status); break; } case AC_GETDEV_CHANGED: { softc = (struct ada_softc *)periph->softc; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); - if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) && - (cgd.inq_flags & SID_DMA)) - softc->flags |= ADA_FLAG_CAN_DMA; - else - softc->flags &= ~ADA_FLAG_CAN_DMA; - if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { - softc->flags |= ADA_FLAG_CAN_48BIT; - if (cgd.inq_flags & SID_DMA48) - softc->flags |= ADA_FLAG_CAN_DMA48; - else - softc->flags &= ~ADA_FLAG_CAN_DMA48; - } else - softc->flags &= ~(ADA_FLAG_CAN_48BIT | - ADA_FLAG_CAN_DMA48); - if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) && - (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue)) - softc->flags |= ADA_FLAG_CAN_NCQ; - else - softc->flags &= ~ADA_FLAG_CAN_NCQ; + /* + * Set/clear support flags based on the new Identify data. + */ + adasetflags(softc, &cgd); - if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && - (cgd.inq_flags & SID_DMA)) { - softc->flags |= ADA_FLAG_CAN_TRIM; - /* - * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do - * NCQ trims, if we support trims at all. We also need support from - * the sim do do things properly. Perhaps we should look at log 13 - * dword 0 bit 0 and dword 1 bit 0 are set too... - */ - if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && - (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 && - (cgd.ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && - (softc->flags & ADA_FLAG_CAN_TRIM) != 0) - softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; - else - softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; - } else - softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM); - adasetdeletemethod(softc); - cam_periph_async(periph, code, path, arg); break; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct ada_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_SENT_BDR: case AC_BUS_RESET: { softc = (struct ada_softc *)periph->softc; cam_periph_async(periph, code, path, arg); if (softc->state != ADA_STATE_NORMAL) break; xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); - if (ADA_RA >= 0 && - cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) + if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) softc->state = ADA_STATE_RAHEAD; - else if (ADA_WC >= 0 && - cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) + else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) softc->state = ADA_STATE_WCACHE; + else if (softc->flags & ADA_FLAG_CAN_LOG) + softc->state = ADA_STATE_LOGDIR; else break; if (cam_periph_acquire(periph) != CAM_REQ_CMP) softc->state = ADA_STATE_NORMAL; else xpt_schedule(periph, CAM_PRIORITY_DEV); } default: cam_periph_async(periph, code, path, arg); break; } } +static int +adazonemodesysctl(SYSCTL_HANDLER_ARGS) +{ + char tmpbuf[40]; + struct ada_softc *softc; + int error; + + softc = (struct ada_softc *)arg1; + + switch (softc->zone_mode) { + case ADA_ZONE_DRIVE_MANAGED: + snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); + break; + case ADA_ZONE_HOST_AWARE: + snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); + break; + case ADA_ZONE_HOST_MANAGED: + snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); + break; + case ADA_ZONE_NONE: + default: + snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); + break; + } + + error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); + + return (error); +} + +static int +adazonesupsysctl(SYSCTL_HANDLER_ARGS) +{ + char tmpbuf[180]; + struct ada_softc *softc; + struct sbuf sb; + int error, first; + unsigned int i; + + softc = (struct ada_softc *)arg1; + + error = 0; + first = 1; + sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); + + for (i = 0; i < sizeof(ada_zone_desc_table) / + sizeof(ada_zone_desc_table[0]); i++) { + if (softc->zone_flags & ada_zone_desc_table[i].value) { + if (first == 0) + sbuf_printf(&sb, ", "); + else + first = 0; + sbuf_cat(&sb, ada_zone_desc_table[i].desc); + } + } + + if (first == 1) + sbuf_printf(&sb, "None"); + + sbuf_finish(&sb); + + error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); + + return (error); +} + + static void adasysctlinit(void *context, int pending) { struct cam_periph *periph; struct ada_softc *softc; char tmpstr[80], tmpstr2[80]; periph = (struct cam_periph *)context; /* periph was held for us when this task was enqueued */ if ((periph->flags & CAM_PERIPH_INVALID) != 0) { cam_periph_release(periph); return; } softc = (struct ada_softc *)periph->softc; - snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number); + snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d",periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= ADA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (softc->sysctl_tree == NULL) { printf("adasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW, softc, 0, adadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->read_ahead, 0, "Enable disk read ahead."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->write_cache, 0, "Enable disk write cache."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->rotating, 0, "Rotating media"); + SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), + OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, + softc, 0, adazonemodesysctl, "A", + "Zone Mode"); + SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), + OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, + softc, 0, adazonesupsysctl, "A", + "Zone Support"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, + "Optimal Number of Open Sequential Write Preferred Zones"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "optimal_nonseq_zones", CTLFLAG_RD, + &softc->optimal_nonseq_zones, + "Optimal Number of Non-Sequentially Written Sequential Write " + "Preferred Zones"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, + "Maximum Number of Open Sequential Write Required Zones"); + #ifdef ADA_TEST_FAILURE /* * Add a 'door bell' sysctl which allows one to set it from userland * and cause something bad to happen. For the moment, we only allow * whacking the next read or write. */ SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_read_error, 0, "Force a read error for the next N reads."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->force_write_error, 0, "Force a write error for the next N writes."); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE, &softc->periodic_read_error, 0, "Force a read error every N reads (don't set too low)."); #endif #ifdef CAM_IO_STATS softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->errors, 0, "Transport errors reported by the SIM."); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE, &softc->invalidations, 0, "Device pack invalidations."); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int adagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct ada_softc *softc; int i, error, value, methods; softc = (struct ada_softc *)arg1; value = softc->delete_method; if (value < 0 || value > ADA_DELETE_MAX) p = "UNKNOWN"; else p = ada_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = 1 << ADA_DELETE_DISABLE; if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) methods |= 1 << ADA_DELETE_CFA_ERASE; if (softc->flags & ADA_FLAG_CAN_TRIM) methods |= 1 << ADA_DELETE_DSM_TRIM; if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM; for (i = 0; i <= ADA_DELETE_MAX; i++) { if (!(methods & (1 << i)) || strcmp(buf, ada_delete_method_names[i]) != 0) continue; softc->delete_method = i; return (0); } return (EINVAL); } +static void +adasetflags(struct ada_softc *softc, struct ccb_getdev *cgd) +{ + if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) && + (cgd->inq_flags & SID_DMA)) + softc->flags |= ADA_FLAG_CAN_DMA; + else + softc->flags &= ~ADA_FLAG_CAN_DMA; + + if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { + softc->flags |= ADA_FLAG_CAN_48BIT; + if (cgd->inq_flags & SID_DMA48) + softc->flags |= ADA_FLAG_CAN_DMA48; + else + softc->flags &= ~ADA_FLAG_CAN_DMA48; + } else + softc->flags &= ~(ADA_FLAG_CAN_48BIT | ADA_FLAG_CAN_DMA48); + + if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) + softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; + else + softc->flags &= ~ADA_FLAG_CAN_FLUSHCACHE; + + if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT) + softc->flags |= ADA_FLAG_CAN_POWERMGT; + else + softc->flags &= ~ADA_FLAG_CAN_POWERMGT; + + if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) && + (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue)) + softc->flags |= ADA_FLAG_CAN_NCQ; + else + softc->flags &= ~ADA_FLAG_CAN_NCQ; + + if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && + (cgd->inq_flags & SID_DMA)) { + softc->flags |= ADA_FLAG_CAN_TRIM; + softc->trim_max_ranges = TRIM_MAX_RANGES; + if (cgd->ident_data.max_dsm_blocks != 0) { + softc->trim_max_ranges = + min(cgd->ident_data.max_dsm_blocks * + ATA_DSM_BLK_RANGES, softc->trim_max_ranges); + } + /* + * If we can do RCVSND_FPDMA_QUEUED commands, we may be able + * to do NCQ trims, if we support trims at all. We also need + * support from the SIM to do things properly. Perhaps we + * should look at log 13 dword 0 bit 0 and dword 1 bit 0 are + * set too... + */ + if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && + (softc->flags & ADA_FLAG_PIM_ATA_EXT) != 0 && + (cgd->ident_data.satacapabilities2 & + ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && + (softc->flags & ADA_FLAG_CAN_TRIM) != 0) + softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; + else + softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; + } else + softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM); + + if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) + softc->flags |= ADA_FLAG_CAN_CFA; + else + softc->flags &= ~ADA_FLAG_CAN_CFA; + + /* + * Now that we've set the appropriate flags, setup the delete + * method. + */ + adasetdeletemethod(softc); + + if (cgd->ident_data.support.extension & ATA_SUPPORT_GENLOG) + softc->flags |= ADA_FLAG_CAN_LOG; + else + softc->flags &= ~ADA_FLAG_CAN_LOG; + + if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == + ATA_SUPPORT_ZONE_HOST_AWARE) + softc->zone_mode = ADA_ZONE_HOST_AWARE; + else if ((cgd->ident_data.support3 & ATA_SUPPORT_ZONE_MASK) == + ATA_SUPPORT_ZONE_DEV_MANAGED) + softc->zone_mode = ADA_ZONE_DRIVE_MANAGED; + else + softc->zone_mode = ADA_ZONE_NONE; + + if (cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) + softc->flags |= ADA_FLAG_CAN_RAHEAD; + else + softc->flags &= ~ADA_FLAG_CAN_RAHEAD; + + if (cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) + softc->flags |= ADA_FLAG_CAN_WCACHE; + else + softc->flags &= ~ADA_FLAG_CAN_WCACHE; +} + static cam_status adaregister(struct cam_periph *periph, void *arg) { struct ada_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char announce_buf[80]; struct disk_params *dp; caddr_t match; u_int maxio; int quirks; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("adaregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("adaregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("adaregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); return(CAM_REQ_CMP_ERR); } - if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) && - (cgd->inq_flags & SID_DMA)) - softc->flags |= ADA_FLAG_CAN_DMA; - if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) { - softc->flags |= ADA_FLAG_CAN_48BIT; - if (cgd->inq_flags & SID_DMA48) - softc->flags |= ADA_FLAG_CAN_DMA48; - } - if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) - softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; - if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT) - softc->flags |= ADA_FLAG_CAN_POWERMGT; - if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) && - (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue)) - softc->flags |= ADA_FLAG_CAN_NCQ; - if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) && - (cgd->inq_flags & SID_DMA)) { - softc->flags |= ADA_FLAG_CAN_TRIM; - softc->trim_max_ranges = TRIM_MAX_RANGES; - if (cgd->ident_data.max_dsm_blocks != 0) { - softc->trim_max_ranges = - min(cgd->ident_data.max_dsm_blocks * - ATA_DSM_BLK_RANGES, softc->trim_max_ranges); - } - } - if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) - softc->flags |= ADA_FLAG_CAN_CFA; + /* + * Set support flags based on the Identify data. + */ + adasetflags(softc, cgd); - adasetdeletemethod(softc); - periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->ident_data, (caddr_t)ada_quirk_table, nitems(ada_quirk_table), sizeof(*ada_quirk_table), ata_identify_match); if (match != NULL) softc->quirks = ((struct ada_quirk_entry *)match)->quirks; else softc->quirks = ADA_Q_NONE; bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); /* * Register this media as a disk */ (void)cam_periph_hold(periph, PRIBIO); cam_periph_unlock(periph); snprintf(announce_buf, sizeof(announce_buf), "kern.cam.ada.%d.quirks", periph->unit_number); quirks = softc->quirks; TUNABLE_INT_FETCH(announce_buf, &quirks); softc->quirks = quirks; softc->read_ahead = -1; snprintf(announce_buf, sizeof(announce_buf), "kern.cam.ada.%d.read_ahead", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead); softc->write_cache = -1; snprintf(announce_buf, sizeof(announce_buf), "kern.cam.ada.%d.write_cache", periph->unit_number); TUNABLE_INT_FETCH(announce_buf, &softc->write_cache); /* Disable queue sorting for non-rotational media by default. */ if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) { softc->rotating = 0; } else { softc->rotating = 1; } cam_iosched_set_sort_queue(softc->cam_iosched, softc->rotating ? -1 : 0); adagetparams(periph, cgd); softc->disk = disk_alloc(); softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate; softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, softc->params.secsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = adaopen; softc->disk->d_close = adaclose; softc->disk->d_strategy = adastrategy; softc->disk->d_getattr = adagetattr; softc->disk->d_dump = adadump; softc->disk->d_gone = adadiskgonecb; softc->disk->d_name = "ada"; softc->disk->d_drv1 = periph; maxio = cpi.maxio; /* Honor max I/O size of SIM */ if (maxio == 0) maxio = DFLTPHYS; /* traditional default */ else if (maxio > MAXPHYS) maxio = MAXPHYS; /* for safety */ if (softc->flags & ADA_FLAG_CAN_48BIT) maxio = min(maxio, 65536 * softc->params.secsize); else /* 28bit ATA command limit */ maxio = min(maxio, 256 * softc->params.secsize); softc->disk->d_maxsize = maxio; softc->disk->d_unit = periph->unit_number; - softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION; + softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if (softc->flags & ADA_FLAG_CAN_TRIM) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = softc->params.secsize * ATA_DSM_RANGE_MAX * softc->trim_max_ranges; } else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT)) { softc->disk->d_flags |= DISKFLAG_CANDELETE; softc->disk->d_delmaxsize = 256 * softc->params.secsize; } else softc->disk->d_delmaxsize = maxio; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; softc->unmappedio = 1; } - /* - * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do - * NCQ trims, if we support trims at all. We also need support from - * the sim do do things properly. Perhaps we should look at log 13 - * dword 0 bit 0 and dword 1 bit 0 are set too... - */ - if (cpi.hba_misc & PIM_ATA_EXT) - softc->flags |= ADA_FLAG_PIM_CAN_NCQ_TRIM; - if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 && - (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 && - (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 && - (softc->flags & ADA_FLAG_CAN_TRIM) != 0) - softc->flags |= ADA_FLAG_CAN_NCQ_TRIM; strlcpy(softc->disk->d_descr, cgd->ident_data.model, MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model))); strlcpy(softc->disk->d_ident, cgd->ident_data.serial, MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial))); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = (off_t)softc->params.sectors * softc->params.secsize; if (ata_physical_sector_size(&cgd->ident_data) != softc->params.secsize) { softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); softc->disk->d_stripeoffset = (softc->disk->d_stripesize - ata_logical_sector_offset(&cgd->ident_data)) % softc->disk->d_stripesize; } else if (softc->quirks & ADA_Q_4K) { softc->disk->d_stripesize = 4096; softc->disk->d_stripeoffset = 0; } softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; ata_disk_firmware_geom_adjust(softc->disk); - adasetdeletemethod(softc); /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * adadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); - cam_periph_unhold(periph); dp = &softc->params; snprintf(announce_buf, sizeof(announce_buf), "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, ADA_Q_BIT_STRING); /* * Create our sysctl variables, now that we know * we have successfully attached. */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED, adaasync, periph, periph->path); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); - if (ADA_RA >= 0 && - cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) { + if (ADA_RA >= 0 && softc->flags & ADA_FLAG_CAN_RAHEAD) { softc->state = ADA_STATE_RAHEAD; - } else if (ADA_WC >= 0 && - cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) { + } else if (ADA_WC >= 0 && softc->flags & ADA_FLAG_CAN_WCACHE) { softc->state = ADA_STATE_WCACHE; + } else if (softc->flags & ADA_FLAG_CAN_LOG) { + softc->state = ADA_STATE_LOGDIR; } else { - softc->state = ADA_STATE_NORMAL; + /* + * Nothing to probe, so we can just transition to the + * normal state. + */ + adaprobedone(periph, NULL); return(CAM_REQ_CMP); } - if (cam_periph_acquire(periph) != CAM_REQ_CMP) - softc->state = ADA_STATE_NORMAL; - else - xpt_schedule(periph, CAM_PRIORITY_DEV); + + xpt_schedule(periph, CAM_PRIORITY_DEV); + return(CAM_REQ_CMP); } static int ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req) { uint64_t lastlba = (uint64_t)-1; int c, lastcount = 0, off, ranges = 0; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); do { uint64_t lba = bp->bio_pblkno; int count = bp->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = min(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * ATA_DSM_RANGE_SIZE; req->data[off + 6] = lastcount & 0xff; req->data[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = min(count, ATA_DSM_RANGE_MAX); off = ranges * ATA_DSM_RANGE_SIZE; req->data[off + 0] = lba & 0xff; req->data[off + 1] = (lba >> 8) & 0xff; req->data[off + 2] = (lba >> 16) & 0xff; req->data[off + 3] = (lba >> 24) & 0xff; req->data[off + 4] = (lba >> 32) & 0xff; req->data[off + 5] = (lba >> 40) & 0xff; req->data[off + 6] = c & 0xff; req->data[off + 7] = (c >> 8) & 0xff; lba += c; count -= c; lastcount = c; ranges++; /* * Its the caller's responsibility to ensure the * request will fit so we don't need to check for * overrun here */ } lastlba = lba; TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); bp = cam_iosched_next_trim(softc->cam_iosched); if (bp == NULL) break; if (bp->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp); break; } } while (1); return (ranges); } static void ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); } static void ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; int ranges; ranges = ada_dsmtrim_req_create(softc, bp, req); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_OUT, 0, req->data, howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE, ada_default_timeout * 1000); ata_ncq_cmd(ataio, ATA_SEND_FPDMA_QUEUED, 0, howmany(ranges, ATA_DSM_BLK_RANGES)); ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM; ataio->ata_flags |= ATA_FLAG_AUX; ataio->aux = 1; } static void ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio) { struct trim_request *req = &softc->trim_req; uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; bzero(req, sizeof(*req)); TAILQ_INIT(&req->bps); TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue); cam_fill_ataio(ataio, ada_retry_count, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (count >= 256) count = 0; ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); } +static int +ada_zone_bio_to_ata(int disk_zone_cmd) +{ + switch (disk_zone_cmd) { + case DISK_ZONE_OPEN: + return ATA_ZM_OPEN_ZONE; + case DISK_ZONE_CLOSE: + return ATA_ZM_CLOSE_ZONE; + case DISK_ZONE_FINISH: + return ATA_ZM_FINISH_ZONE; + case DISK_ZONE_RWP: + return ATA_ZM_RWP; + } + + return -1; +} + +static int +ada_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, + int *queue_ccb) +{ + struct ada_softc *softc; + int error; + + error = 0; + + if (bp->bio_cmd != BIO_ZONE) { + error = EINVAL; + goto bailout; + } + + softc = periph->softc; + + switch (bp->bio_zone.zone_cmd) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: { + int zone_flags; + int zone_sa; + uint64_t lba; + + zone_sa = ada_zone_bio_to_ata(bp->bio_zone.zone_cmd); + if (zone_sa == -1) { + xpt_print(periph->path, "Cannot translate zone " + "cmd %#x to ATA\n", bp->bio_zone.zone_cmd); + error = EINVAL; + goto bailout; + } + + zone_flags = 0; + lba = bp->bio_zone.zone_params.rwp.id; + + if (bp->bio_zone.zone_params.rwp.flags & + DISK_ZONE_RWP_FLAG_ALL) + zone_flags |= ZBC_OUT_ALL; + + ata_zac_mgmt_out(&ccb->ataio, + /*retries*/ ada_retry_count, + /*cbfcnp*/ adadone, + /*use_ncq*/ (softc->flags & + ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, + /*zm_action*/ zone_sa, + /*zone_id*/ lba, + /*zone_flags*/ zone_flags, + /*sector_count*/ 0, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*timeout*/ ada_default_timeout * 1000); + *queue_ccb = 1; + + break; + } + case DISK_ZONE_REPORT_ZONES: { + uint8_t *rz_ptr; + uint32_t num_entries, alloc_size; + struct disk_zone_report *rep; + + rep = &bp->bio_zone.zone_params.report; + + num_entries = rep->entries_allocated; + if (num_entries == 0) { + xpt_print(periph->path, "No entries allocated for " + "Report Zones request\n"); + error = EINVAL; + goto bailout; + } + alloc_size = sizeof(struct scsi_report_zones_hdr) + + (sizeof(struct scsi_report_zones_desc) * num_entries); + alloc_size = min(alloc_size, softc->disk->d_maxsize); + rz_ptr = malloc(alloc_size, M_ATADA, M_NOWAIT | M_ZERO); + if (rz_ptr == NULL) { + xpt_print(periph->path, "Unable to allocate memory " + "for Report Zones request\n"); + error = ENOMEM; + goto bailout; + } + + ata_zac_mgmt_in(&ccb->ataio, + /*retries*/ ada_retry_count, + /*cbcfnp*/ adadone, + /*use_ncq*/ (softc->flags & + ADA_FLAG_PIM_ATA_EXT) ? 1 : 0, + /*zm_action*/ ATA_ZM_REPORT_ZONES, + /*zone_id*/ rep->starting_id, + /*zone_flags*/ rep->rep_options, + /*data_ptr*/ rz_ptr, + /*dxfer_len*/ alloc_size, + /*timeout*/ ada_default_timeout * 1000); + + /* + * For BIO_ZONE, this isn't normally needed. However, it + * is used by devstat_end_transaction_bio() to determine + * how much data was transferred. + */ + /* + * XXX KDM we have a problem. But I'm not sure how to fix + * it. devstat uses bio_bcount - bio_resid to calculate + * the amount of data transferred. The GEOM disk code + * uses bio_length - bio_resid to calculate the amount of + * data in bio_completed. We have different structure + * sizes above and below the ada(4) driver. So, if we + * use the sizes above, the amount transferred won't be + * quite accurate for devstat. If we use different sizes + * for bio_bcount and bio_length (above and below + * respectively), then the residual needs to match one or + * the other. Everything is calculated after the bio + * leaves the driver, so changing the values around isn't + * really an option. For now, just set the count to the + * passed in length. This means that the calculations + * above (e.g. bio_completed) will be correct, but the + * amount of data reported to devstat will be slightly + * under or overstated. + */ + bp->bio_bcount = bp->bio_length; + + *queue_ccb = 1; + + break; + } + case DISK_ZONE_GET_PARAMS: { + struct disk_zone_disk_params *params; + + params = &bp->bio_zone.zone_params.disk_params; + bzero(params, sizeof(*params)); + + switch (softc->zone_mode) { + case ADA_ZONE_DRIVE_MANAGED: + params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; + break; + case ADA_ZONE_HOST_AWARE: + params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; + break; + case ADA_ZONE_HOST_MANAGED: + params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; + break; + default: + case ADA_ZONE_NONE: + params->zone_mode = DISK_ZONE_MODE_NONE; + break; + } + + if (softc->zone_flags & ADA_ZONE_FLAG_URSWRZ) + params->flags |= DISK_ZONE_DISK_URSWRZ; + + if (softc->zone_flags & ADA_ZONE_FLAG_OPT_SEQ_SET) { + params->optimal_seq_zones = softc->optimal_seq_zones; + params->flags |= DISK_ZONE_OPT_SEQ_SET; + } + + if (softc->zone_flags & ADA_ZONE_FLAG_OPT_NONSEQ_SET) { + params->optimal_nonseq_zones = + softc->optimal_nonseq_zones; + params->flags |= DISK_ZONE_OPT_NONSEQ_SET; + } + + if (softc->zone_flags & ADA_ZONE_FLAG_MAX_SEQ_SET) { + params->max_seq_zones = softc->max_seq_zones; + params->flags |= DISK_ZONE_MAX_SEQ_SET; + } + if (softc->zone_flags & ADA_ZONE_FLAG_RZ_SUP) + params->flags |= DISK_ZONE_RZ_SUP; + + if (softc->zone_flags & ADA_ZONE_FLAG_OPEN_SUP) + params->flags |= DISK_ZONE_OPEN_SUP; + + if (softc->zone_flags & ADA_ZONE_FLAG_CLOSE_SUP) + params->flags |= DISK_ZONE_CLOSE_SUP; + + if (softc->zone_flags & ADA_ZONE_FLAG_FINISH_SUP) + params->flags |= DISK_ZONE_FINISH_SUP; + + if (softc->zone_flags & ADA_ZONE_FLAG_RWP_SUP) + params->flags |= DISK_ZONE_RWP_SUP; + break; + } + default: + break; + } +bailout: + return (error); +} + static void adastart(struct cam_periph *periph, union ccb *start_ccb) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct ccb_ataio *ataio = &start_ccb->ataio; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n")); switch (softc->state) { case ADA_STATE_NORMAL: { struct bio *bp; u_int8_t tag_code; bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { xpt_release_ccb(start_ccb); break; } if ((bp->bio_flags & BIO_ORDERED) != 0 || (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) { softc->flags &= ~ADA_FLAG_NEED_OTAG; softc->flags |= ADA_FLAG_WAS_OTAG; tag_code = 0; } else { tag_code = 1; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { uint64_t lba = bp->bio_pblkno; uint16_t count = bp->bio_bcount / softc->params.secsize; void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= ADA_FLAG_DIRTY; rw_op = CAM_DIR_OUT; } else { rw_op = CAM_DIR_IN; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= CAM_DATA_BIO; data_ptr = bp; } #ifdef ADA_TEST_FAILURE int fail = 0; /* * Support the failure ioctls. If the command is a * read, and there are pending forced read errors, or * if a write and pending write errors, then fail this * operation with EIO. This is useful for testing * purposes. Also, support having every Nth read fail. * * This is a rather blunt tool. */ if (bp->bio_cmd == BIO_READ) { if (softc->force_read_error) { softc->force_read_error--; fail = 1; } if (softc->periodic_read_error > 0) { if (++softc->periodic_read_count >= softc->periodic_read_error) { softc->periodic_read_count = 0; fail = 1; } } } else { if (softc->force_write_error) { softc->force_write_error--; fail = 1; } } if (fail) { biofinish(bp, NULL, EIO); xpt_release_ccb(start_ccb); adaschedule(periph); return; } #endif KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_bcount + bp->bio_ma_offset) / PAGE_SIZE == bp->bio_ma_n, ("Short bio %p", bp)); cam_fill_ataio(ataio, ada_retry_count, adadone, rw_op, 0, data_ptr, bp->bio_bcount, ada_default_timeout*1000); if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { if (bp->bio_cmd == BIO_READ) { ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, lba, count); } else { ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, lba, count); } } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && (lba + count >= ATA_MAX_28BIT_LBA || count > 256)) { if (softc->flags & ADA_FLAG_CAN_DMA48) { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_DMA48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_48bit_cmd(ataio, ATA_READ_MUL48, 0, lba, count); } else { ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 0, lba, count); } } } else { if (count == 256) count = 0; if (softc->flags & ADA_FLAG_CAN_DMA) { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_DMA, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_DMA, 0, lba, count); } } else { if (bp->bio_cmd == BIO_READ) { ata_28bit_cmd(ataio, ATA_READ_MUL, 0, lba, count); } else { ata_28bit_cmd(ataio, ATA_WRITE_MUL, 0, lba, count); } } } break; } case BIO_DELETE: switch (softc->delete_method) { case ADA_DELETE_NCQ_DSM_TRIM: ada_ncq_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_DSM_TRIM: ada_dsmtrim(softc, bp, ataio); break; case ADA_DELETE_CFA_ERASE: ada_cfaerase(softc, bp, ataio); break; default: biofinish(bp, NULL, EOPNOTSUPP); xpt_release_ccb(start_ccb); adaschedule(periph); return; } start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; start_ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); goto out; case BIO_FLUSH: cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); break; + case BIO_ZONE: { + int error, queue_ccb; + + queue_ccb = 0; + + error = ada_zone_cmd(periph, start_ccb, bp, &queue_ccb); + if ((error != 0) + || (queue_ccb == 0)) { + biofinish(bp, NULL, error); + xpt_release_ccb(start_ccb); + return; + } + break; } + } start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; out: start_ccb->ccb_h.ccb_bp = bp; softc->outstanding_cmds++; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ adaschedule(periph); break; } case ADA_STATE_RAHEAD: case ADA_STATE_WCACHE: { cam_fill_ataio(ataio, 1, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->state == ADA_STATE_RAHEAD) { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ? ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD; } else { ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ? ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0); start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE; } start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; xpt_action(start_ccb); break; } + case ADA_STATE_LOGDIR: + { + struct ata_gp_log_dir *log_dir; + + if ((softc->flags & ADA_FLAG_CAN_LOG) == 0) { + adaprobedone(periph, start_ccb); + break; + } + + log_dir = malloc(sizeof(*log_dir), M_ATADA, M_NOWAIT|M_ZERO); + if (log_dir == NULL) { + xpt_print(periph->path, "Couldn't malloc log_dir " + "data\n"); + softc->state = ADA_STATE_NORMAL; + xpt_release_ccb(start_ccb); + break; + } + + + ata_read_log(ataio, + /*retries*/1, + /*cbfcnp*/adadone, + /*log_address*/ ATA_LOG_DIRECTORY, + /*page_number*/ 0, + /*block_count*/ 1, + /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? + CAM_ATAIO_DMA : 0, + /*data_ptr*/ (uint8_t *)log_dir, + /*dxfer_len*/sizeof(*log_dir), + /*timeout*/ada_default_timeout*1000); + + start_ccb->ccb_h.ccb_state = ADA_CCB_LOGDIR; + xpt_action(start_ccb); + break; } + case ADA_STATE_IDDIR: + { + struct ata_identify_log_pages *id_dir; + + id_dir = malloc(sizeof(*id_dir), M_ATADA, M_NOWAIT | M_ZERO); + if (id_dir == NULL) { + xpt_print(periph->path, "Couldn't malloc id_dir " + "data\n"); + adaprobedone(periph, start_ccb); + break; + } + + ata_read_log(ataio, + /*retries*/1, + /*cbfcnp*/adadone, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_PAGE_LIST, + /*block_count*/ 1, + /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? + CAM_ATAIO_DMA : 0, + /*data_ptr*/ (uint8_t *)id_dir, + /*dxfer_len*/ sizeof(*id_dir), + /*timeout*/ada_default_timeout*1000); + + start_ccb->ccb_h.ccb_state = ADA_CCB_IDDIR; + xpt_action(start_ccb); + break; + } + case ADA_STATE_SUP_CAP: + { + struct ata_identify_log_sup_cap *sup_cap; + + sup_cap = malloc(sizeof(*sup_cap), M_ATADA, M_NOWAIT|M_ZERO); + if (sup_cap == NULL) { + xpt_print(periph->path, "Couldn't malloc sup_cap " + "data\n"); + adaprobedone(periph, start_ccb); + break; + } + + ata_read_log(ataio, + /*retries*/1, + /*cbfcnp*/adadone, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_SUP_CAP, + /*block_count*/ 1, + /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? + CAM_ATAIO_DMA : 0, + /*data_ptr*/ (uint8_t *)sup_cap, + /*dxfer_len*/ sizeof(*sup_cap), + /*timeout*/ada_default_timeout*1000); + + start_ccb->ccb_h.ccb_state = ADA_CCB_SUP_CAP; + xpt_action(start_ccb); + break; + } + case ADA_STATE_ZONE: + { + struct ata_zoned_info_log *ata_zone; + + ata_zone = malloc(sizeof(*ata_zone), M_ATADA, M_NOWAIT|M_ZERO); + if (ata_zone == NULL) { + xpt_print(periph->path, "Couldn't malloc ata_zone " + "data\n"); + adaprobedone(periph, start_ccb); + break; + } + + ata_read_log(ataio, + /*retries*/1, + /*cbfcnp*/adadone, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_ZDI, + /*block_count*/ 1, + /*protocol*/ softc->flags & ADA_FLAG_CAN_DMA ? + CAM_ATAIO_DMA : 0, + /*data_ptr*/ (uint8_t *)ata_zone, + /*dxfer_len*/ sizeof(*ata_zone), + /*timeout*/ada_default_timeout*1000); + + start_ccb->ccb_h.ccb_state = ADA_CCB_ZONE; + xpt_action(start_ccb); + break; + } + } } static void +adaprobedone(struct cam_periph *periph, union ccb *ccb) +{ + struct ada_softc *softc; + + softc = (struct ada_softc *)periph->softc; + + if (ccb != NULL) + xpt_release_ccb(ccb); + + softc->state = ADA_STATE_NORMAL; + softc->flags |= ADA_FLAG_PROBED; + adaschedule(periph); + if ((softc->flags & ADA_FLAG_ANNOUNCED) == 0) { + softc->flags |= ADA_FLAG_ANNOUNCED; + cam_periph_unhold(periph); + } else { + cam_periph_release_locked(periph); + } +} + +static void +adazonedone(struct cam_periph *periph, union ccb *ccb) +{ + struct ada_softc *softc; + struct bio *bp; + + softc = periph->softc; + bp = (struct bio *)ccb->ccb_h.ccb_bp; + + switch (bp->bio_zone.zone_cmd) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: + break; + case DISK_ZONE_REPORT_ZONES: { + uint32_t avail_len; + struct disk_zone_report *rep; + struct scsi_report_zones_hdr *hdr; + struct scsi_report_zones_desc *desc; + struct disk_zone_rep_entry *entry; + uint32_t num_alloced, hdr_len, num_avail; + uint32_t num_to_fill, i; + + rep = &bp->bio_zone.zone_params.report; + avail_len = ccb->ataio.dxfer_len - ccb->ataio.resid; + /* + * Note that bio_resid isn't normally used for zone + * commands, but it is used by devstat_end_transaction_bio() + * to determine how much data was transferred. Because + * the size of the SCSI/ATA data structures is different + * than the size of the BIO interface structures, the + * amount of data actually transferred from the drive will + * be different than the amount of data transferred to + * the user. + */ + num_alloced = rep->entries_allocated; + hdr = (struct scsi_report_zones_hdr *)ccb->ataio.data_ptr; + if (avail_len < sizeof(*hdr)) { + /* + * Is there a better error than EIO here? We asked + * for at least the header, and we got less than + * that. + */ + bp->bio_error = EIO; + bp->bio_flags |= BIO_ERROR; + bp->bio_resid = bp->bio_bcount; + break; + } + + hdr_len = le32dec(hdr->length); + if (hdr_len > 0) + rep->entries_available = hdr_len / sizeof(*desc); + else + rep->entries_available = 0; + /* + * NOTE: using the same values for the BIO version of the + * same field as the SCSI/ATA values. This means we could + * get some additional values that aren't defined in bio.h + * if more values of the same field are defined later. + */ + rep->header.same = hdr->byte4 & SRZ_SAME_MASK; + rep->header.maximum_lba = le64dec(hdr->maximum_lba); + /* + * If the drive reports no entries that match the query, + * we're done. + */ + if (hdr_len == 0) { + rep->entries_filled = 0; + bp->bio_resid = bp->bio_bcount; + break; + } + + num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), + hdr_len / sizeof(*desc)); + /* + * If the drive didn't return any data, then we're done. + */ + if (num_avail == 0) { + rep->entries_filled = 0; + bp->bio_resid = bp->bio_bcount; + break; + } + + num_to_fill = min(num_avail, rep->entries_allocated); + /* + * If the user didn't allocate any entries for us to fill, + * we're done. + */ + if (num_to_fill == 0) { + rep->entries_filled = 0; + bp->bio_resid = bp->bio_bcount; + break; + } + + for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; + i < num_to_fill; i++, desc++, entry++) { + /* + * NOTE: we're mapping the values here directly + * from the SCSI/ATA bit definitions to the bio.h + * definitions. There is also a warning in + * disk_zone.h, but the impact is that if + * additional values are added in the SCSI/ATA + * specs these will be visible to consumers of + * this interface. + */ + entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; + entry->zone_condition = + (desc->zone_flags & SRZ_ZONE_COND_MASK) >> + SRZ_ZONE_COND_SHIFT; + entry->zone_flags |= desc->zone_flags & + (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); + entry->zone_length = le64dec(desc->zone_length); + entry->zone_start_lba = le64dec(desc->zone_start_lba); + entry->write_pointer_lba = + le64dec(desc->write_pointer_lba); + } + rep->entries_filled = num_to_fill; + /* + * Note that this residual is accurate from the user's + * standpoint, but the amount transferred isn't accurate + * from the standpoint of what actually came back from the + * drive. + */ + bp->bio_resid = bp->bio_bcount - (num_to_fill * sizeof(*entry)); + break; + } + case DISK_ZONE_GET_PARAMS: + default: + /* + * In theory we should not get a GET_PARAMS bio, since it + * should be handled without queueing the command to the + * drive. + */ + panic("%s: Invalid zone command %d", __func__, + bp->bio_zone.zone_cmd); + break; + } + + if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) + free(ccb->ataio.data_ptr, M_ATADA); +} + + +static void adadone(struct cam_periph *periph, union ccb *done_ccb) { struct ada_softc *softc; struct ccb_ataio *ataio; - struct ccb_getdev *cgd; struct cam_path *path; + uint32_t priority; int state; softc = (struct ada_softc *)periph->softc; ataio = &done_ccb->ataio; path = done_ccb->ccb_h.path; + priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n")); state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK; switch (state) { case ADA_CCB_BUFFER_IO: case ADA_CCB_TRIM: { struct bio *bp; int error; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { error = adaerror(done_ccb, 0, 0); if (error == ERESTART) { /* A retry was scheduled, so just return. */ cam_periph_unlock(periph); return; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); /* * If we get an error on an NCQ DSM TRIM, fall back * to a non-NCQ DSM TRIM forever. Please note that if * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too. * However, for this one trim, we treat it as advisory * and return success up the stack. */ if (state == ADA_CCB_TRIM && error != 0 && (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) { softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM; error = 0; adasetdeletemethod(softc); } } else { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); + error = 0; } bp->bio_error = error; if (error != 0) { bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } else { - if (state == ADA_CCB_TRIM) + if (bp->bio_cmd == BIO_ZONE) + adazonedone(periph, done_ccb); + else if (state == ADA_CCB_TRIM) bp->bio_resid = 0; else bp->bio_resid = ataio->resid; - if (bp->bio_resid > 0) + + if ((bp->bio_resid > 0) + && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; } softc->outstanding_cmds--; if (softc->outstanding_cmds == 0) softc->flags |= ADA_FLAG_WAS_OTAG; cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == ADA_CCB_TRIM) { TAILQ_HEAD(, bio) queue; struct bio *bp1; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue); /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * trim_running set to 0 before the call above to allow * other I/O to progress when many BIO_DELETE requests * are pushed down. We set trim_running to 0 and call * daschedule again so that we don't stall if there are * no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); adaschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = error; if (error != 0) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { adaschedule(periph); cam_periph_unlock(periph); biodone(bp); } return; } case ADA_CCB_RAHEAD: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { -out: /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the reference on the peripheral. * The peripheral will only go away once the last reference * is removed, and we need it around for the CCB release * operation. */ - cgd = (struct ccb_getdev *)done_ccb; - xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL); - cgd->ccb_h.func_code = XPT_GDEV_TYPE; - xpt_action((union ccb *)cgd); - if (ADA_WC >= 0 && - cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) { - softc->state = ADA_STATE_WCACHE; - xpt_release_ccb(done_ccb); - xpt_schedule(periph, CAM_PRIORITY_DEV); - goto out; - } - softc->state = ADA_STATE_NORMAL; + xpt_release_ccb(done_ccb); + softc->state = ADA_STATE_WCACHE; + xpt_schedule(periph, priority); /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); - adaschedule(periph); - cam_periph_release_locked(periph); return; } case ADA_CCB_WCACHE: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (adaerror(done_ccb, 0, 0) == ERESTART) { - goto out; + /* Drop freeze taken due to CAM_DEV_QFREEZE */ + cam_release_devq(path, 0, 0, 0, FALSE); + return; } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { cam_release_devq(path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } - softc->state = ADA_STATE_NORMAL; - /* - * Since our peripheral may be invalidated by an error - * above or an external event, we must release our CCB - * before releasing the reference on the peripheral. - * The peripheral will only go away once the last reference - * is removed, and we need it around for the CCB release - * operation. - */ - xpt_release_ccb(done_ccb); /* Drop freeze taken due to CAM_DEV_QFREEZE */ cam_release_devq(path, 0, 0, 0, FALSE); - adaschedule(periph); - cam_periph_release_locked(periph); + + if (softc->flags & ADA_FLAG_CAN_LOG) { + xpt_release_ccb(done_ccb); + softc->state = ADA_STATE_LOGDIR; + xpt_schedule(periph, priority); + } else { + adaprobedone(periph, done_ccb); + } + return; + } + case ADA_CCB_LOGDIR: + { + int error; + + if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + error = 0; + softc->valid_logdir_len = 0; + bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); + softc->valid_logdir_len = + ataio->dxfer_len - ataio->resid; + if (softc->valid_logdir_len > 0) + bcopy(ataio->data_ptr, &softc->ata_logdir, + min(softc->valid_logdir_len, + sizeof(softc->ata_logdir))); + /* + * Figure out whether the Identify Device log is + * supported. The General Purpose log directory + * has a header, and lists the number of pages + * available for each GP log identified by the + * offset into the list. + */ + if ((softc->valid_logdir_len >= + ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) + && (le16dec(softc->ata_logdir.header) == + ATA_GP_LOG_DIR_VERSION) + && (le16dec(&softc->ata_logdir.num_pages[ + (ATA_IDENTIFY_DATA_LOG * + sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ + softc->flags |= ADA_FLAG_CAN_IDLOG; + } else { + softc->flags &= ~ADA_FLAG_CAN_IDLOG; + } + } else { + error = adaerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA log directory, + * then ATA logs are effectively not + * supported even if the bit is set in the + * identify data. + */ + softc->flags &= ~(ADA_FLAG_CAN_LOG | + ADA_FLAG_CAN_IDLOG); + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + + + } + + free(ataio->data_ptr, M_ATADA); + + if ((error == 0) + && (softc->flags & ADA_FLAG_CAN_IDLOG)) { + softc->state = ADA_STATE_IDDIR; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + } else + adaprobedone(periph, done_ccb); + + return; + } + case ADA_CCB_IDDIR: { + int error; + + if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + off_t entries_offset, max_entries; + error = 0; + + softc->valid_iddir_len = 0; + bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); + softc->flags &= ~(ADA_FLAG_CAN_SUPCAP | + ADA_FLAG_CAN_ZONE); + softc->valid_iddir_len = + ataio->dxfer_len - ataio->resid; + if (softc->valid_iddir_len > 0) + bcopy(ataio->data_ptr, &softc->ata_iddir, + min(softc->valid_iddir_len, + sizeof(softc->ata_iddir))); + + entries_offset = + __offsetof(struct ata_identify_log_pages,entries); + max_entries = softc->valid_iddir_len - entries_offset; + if ((softc->valid_iddir_len > (entries_offset + 1)) + && (le64dec(softc->ata_iddir.header) == + ATA_IDLOG_REVISION) + && (softc->ata_iddir.entry_count > 0)) { + int num_entries, i; + + num_entries = softc->ata_iddir.entry_count; + num_entries = min(num_entries, + softc->valid_iddir_len - entries_offset); + for (i = 0; i < num_entries && + i < max_entries; i++) { + if (softc->ata_iddir.entries[i] == + ATA_IDL_SUP_CAP) + softc->flags |= + ADA_FLAG_CAN_SUPCAP; + else if (softc->ata_iddir.entries[i]== + ATA_IDL_ZDI) + softc->flags |= + ADA_FLAG_CAN_ZONE; + + if ((softc->flags & + ADA_FLAG_CAN_SUPCAP) + && (softc->flags & + ADA_FLAG_CAN_ZONE)) + break; + } + } + } else { + error = adaerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA Identify Data log + * directory, then it effectively isn't + * supported even if the ATA Log directory + * a non-zero number of pages present for + * this log. + */ + softc->flags &= ~ADA_FLAG_CAN_IDLOG; + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + + free(ataio->data_ptr, M_ATADA); + + if ((error == 0) + && (softc->flags & ADA_FLAG_CAN_SUPCAP)) { + softc->state = ADA_STATE_SUP_CAP; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + } else + adaprobedone(periph, done_ccb); + return; + } + case ADA_CCB_SUP_CAP: { + int error; + + if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + uint32_t valid_len; + size_t needed_size; + struct ata_identify_log_sup_cap *sup_cap; + error = 0; + + sup_cap = (struct ata_identify_log_sup_cap *) + ataio->data_ptr; + valid_len = ataio->dxfer_len - ataio->resid; + needed_size = + __offsetof(struct ata_identify_log_sup_cap, + sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); + if (valid_len >= needed_size) { + uint64_t zoned, zac_cap; + + zoned = le64dec(sup_cap->zoned_cap); + if (zoned & ATA_ZONED_VALID) { + /* + * This should have already been + * set, because this is also in the + * ATA identify data. + */ + if ((zoned & ATA_ZONED_MASK) == + ATA_SUPPORT_ZONE_HOST_AWARE) + softc->zone_mode = + ADA_ZONE_HOST_AWARE; + else if ((zoned & ATA_ZONED_MASK) == + ATA_SUPPORT_ZONE_DEV_MANAGED) + softc->zone_mode = + ADA_ZONE_DRIVE_MANAGED; + } + + zac_cap = le64dec(sup_cap->sup_zac_cap); + if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { + if (zac_cap & ATA_REPORT_ZONES_SUP) + softc->zone_flags |= + ADA_ZONE_FLAG_RZ_SUP; + if (zac_cap & ATA_ND_OPEN_ZONE_SUP) + softc->zone_flags |= + ADA_ZONE_FLAG_OPEN_SUP; + if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) + softc->zone_flags |= + ADA_ZONE_FLAG_CLOSE_SUP; + if (zac_cap & ATA_ND_FINISH_ZONE_SUP) + softc->zone_flags |= + ADA_ZONE_FLAG_FINISH_SUP; + if (zac_cap & ATA_ND_RWP_SUP) + softc->zone_flags |= + ADA_ZONE_FLAG_RWP_SUP; + } else { + /* + * This field was introduced in + * ACS-4, r08 on April 28th, 2015. + * If the drive firmware was written + * to an earlier spec, it won't have + * the field. So, assume all + * commands are supported. + */ + softc->zone_flags |= + ADA_ZONE_FLAG_SUP_MASK; + } + + } + } else { + error = adaerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA Identify Data + * Supported Capabilities page, clear the + * flag... + */ + softc->flags &= ~ADA_FLAG_CAN_SUPCAP; + /* + * And clear zone capabilities. + */ + softc->zone_flags &= ~ADA_ZONE_FLAG_SUP_MASK; + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + + free(ataio->data_ptr, M_ATADA); + + if ((error == 0) + && (softc->flags & ADA_FLAG_CAN_ZONE)) { + softc->state = ADA_STATE_ZONE; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + } else + adaprobedone(periph, done_ccb); + return; + } + case ADA_CCB_ZONE: { + int error; + + if ((ataio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + struct ata_zoned_info_log *zi_log; + uint32_t valid_len; + size_t needed_size; + + zi_log = (struct ata_zoned_info_log *)ataio->data_ptr; + + valid_len = ataio->dxfer_len - ataio->resid; + needed_size = __offsetof(struct ata_zoned_info_log, + version_info) + 1 + sizeof(zi_log->version_info); + if (valid_len >= needed_size) { + uint64_t tmpvar; + + tmpvar = le64dec(zi_log->zoned_cap); + if (tmpvar & ATA_ZDI_CAP_VALID) { + if (tmpvar & ATA_ZDI_CAP_URSWRZ) + softc->zone_flags |= + ADA_ZONE_FLAG_URSWRZ; + else + softc->zone_flags &= + ~ADA_ZONE_FLAG_URSWRZ; + } + tmpvar = le64dec(zi_log->optimal_seq_zones); + if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { + softc->zone_flags |= + ADA_ZONE_FLAG_OPT_SEQ_SET; + softc->optimal_seq_zones = (tmpvar & + ATA_ZDI_OPT_SEQ_MASK); + } else { + softc->zone_flags &= + ~ADA_ZONE_FLAG_OPT_SEQ_SET; + softc->optimal_seq_zones = 0; + } + + tmpvar =le64dec(zi_log->optimal_nonseq_zones); + if (tmpvar & ATA_ZDI_OPT_NS_VALID) { + softc->zone_flags |= + ADA_ZONE_FLAG_OPT_NONSEQ_SET; + softc->optimal_nonseq_zones = + (tmpvar & ATA_ZDI_OPT_NS_MASK); + } else { + softc->zone_flags &= + ~ADA_ZONE_FLAG_OPT_NONSEQ_SET; + softc->optimal_nonseq_zones = 0; + } + + tmpvar = le64dec(zi_log->max_seq_req_zones); + if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { + softc->zone_flags |= + ADA_ZONE_FLAG_MAX_SEQ_SET; + softc->max_seq_zones = + (tmpvar & ATA_ZDI_MAX_SEQ_MASK); + } else { + softc->zone_flags &= + ~ADA_ZONE_FLAG_MAX_SEQ_SET; + softc->max_seq_zones = 0; + } + } + } else { + error = adaerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + softc->flags &= ~ADA_FLAG_CAN_ZONE; + softc->flags &= ~ADA_ZONE_FLAG_SET_MASK; + + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + + } + free(ataio->data_ptr, M_ATADA); + + adaprobedone(periph, done_ccb); return; } case ADA_CCB_DUMP: /* No-op. We're polling */ return; default: break; } xpt_release_ccb(done_ccb); } static int adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { #ifdef CAM_IO_STATS struct ada_softc *softc; struct cam_periph *periph; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct ada_softc *)periph->softc; switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: case CAM_ATA_STATUS_ERROR: softc->errors++; break; default: break; } #endif return(cam_periph_error(ccb, cam_flags, sense_flags, NULL)); } static void adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) { struct ada_softc *softc = (struct ada_softc *)periph->softc; struct disk_params *dp = &softc->params; u_int64_t lbasize48; u_int32_t lbasize; dp->secsize = ata_logical_sector_size(&cgd->ident_data); if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { dp->heads = cgd->ident_data.current_heads; dp->secs_per_track = cgd->ident_data.current_sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | ((u_int32_t)cgd->ident_data.current_size_2 << 16); } else { dp->heads = cgd->ident_data.heads; dp->secs_per_track = cgd->ident_data.sectors; dp->cylinders = cgd->ident_data.cylinders; dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; } lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | ((u_int32_t)cgd->ident_data.lba_size_2 << 16); /* use the 28bit LBA size if valid or bigger than the CHS mapping */ if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) dp->sectors = lbasize; /* use the 48bit LBA size if valid */ lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && lbasize48 > ATA_MAX_28BIT_LBA) dp->sectors = lbasize48; } static void adasendorderedtag(void *arg) { struct ada_softc *softc = arg; if (ada_send_ordered) { if (softc->outstanding_cmds > 0) { if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0) softc->flags |= ADA_FLAG_NEED_OTAG; softc->flags &= ~ADA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL, adasendorderedtag, softc); } /* * Step through all ADA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void adaflush(void) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { softc = (struct ada_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & ADA_FLAG_OPEN)) { adadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & ADA_FLAG_OPEN) == 0) || (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 0, adadone, CAM_DIR_NONE, 0, NULL, 0, ada_default_timeout*1000); if (softc->flags & ADA_FLAG_CAN_48BIT) ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); else ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void adaspindown(uint8_t cmd, int flags) { struct cam_periph *periph; struct ada_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &adadriver) { /* If we paniced with lock held - not recurse here. */ if (cam_periph_owned(periph)) continue; cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "spin-down\n"); ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); cam_fill_ataio(&ccb->ataio, 0, adadone, CAM_DIR_NONE | flags, 0, NULL, 0, ada_default_timeout*1000); ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0); error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Spin-down disk failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } static void adashutdown(void *arg, int howto) { adaflush(); if (ada_spindown_shutdown != 0 && (howto & (RB_HALT | RB_POWEROFF)) != 0) adaspindown(ATA_STANDBY_IMMEDIATE, 0); } static void adasuspend(void *arg) { adaflush(); if (ada_spindown_suspend != 0) adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE); } static void adaresume(void *arg) { struct cam_periph *periph; struct ada_softc *softc; if (ada_spindown_suspend == 0) return; CAM_PERIPH_FOREACH(periph, &adadriver) { cam_periph_lock(periph); softc = (struct ada_softc *)periph->softc; /* * We only spin-down the drive if it is capable of it.. */ if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) { cam_periph_unlock(periph); continue; } if (bootverbose) xpt_print(periph->path, "resume\n"); /* * Drop freeze taken due to CAM_DEV_QFREEZE flag set on * sleep request. */ cam_release_devq(periph->path, /*relsim_flags*/0, /*openings*/0, /*timeout*/0, /*getcount_only*/0); cam_periph_unlock(periph); } } #endif /* _KERNEL */ Index: head/sys/cam/scsi/scsi_all.c =================================================================== --- head/sys/cam/scsi/scsi_all.c (revision 300206) +++ head/sys/cam/scsi/scsi_all.c (revision 300207) @@ -1,8831 +1,9090 @@ /*- * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998, 2003 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #else #include #include #include #include #include #endif #include #include #include #include #include #include #include #ifdef _KERNEL #include #include #include #include #else #include #include #ifndef FALSE #define FALSE 0 #endif /* FALSE */ #ifndef TRUE #define TRUE 1 #endif /* TRUE */ #define ERESTART -1 /* restart syscall */ #define EJUSTRETURN -2 /* don't modify regs, just return */ #endif /* !_KERNEL */ /* * This is the default number of milliseconds we wait for devices to settle * after a SCSI bus reset. */ #ifndef SCSI_DELAY #define SCSI_DELAY 2000 #endif /* * All devices need _some_ sort of bus settle delay, so we'll set it to * a minimum value of 100ms. Note that this is pertinent only for SPI- * not transport like Fibre Channel or iSCSI where 'delay' is completely * meaningless. */ #ifndef SCSI_MIN_DELAY #define SCSI_MIN_DELAY 100 #endif /* * Make sure the user isn't using seconds instead of milliseconds. */ #if (SCSI_DELAY < SCSI_MIN_DELAY && SCSI_DELAY != 0) #error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value" #endif int scsi_delay; static int ascentrycomp(const void *key, const void *member); static int senseentrycomp(const void *key, const void *member); static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *, const struct sense_key_table_entry **, const struct asc_table_entry **); + #ifdef _KERNEL static void init_scsi_delay(void); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS); static int set_scsi_delay(int delay); #endif #if !defined(SCSI_NO_OP_STRINGS) #define D (1 << T_DIRECT) #define T (1 << T_SEQUENTIAL) #define L (1 << T_PRINTER) #define P (1 << T_PROCESSOR) #define W (1 << T_WORM) #define R (1 << T_CDROM) #define O (1 << T_OPTICAL) #define M (1 << T_CHANGER) #define A (1 << T_STORARRAY) #define E (1 << T_ENCLOSURE) #define B (1 << T_RBC) #define K (1 << T_OCRW) #define V (1 << T_ADC) #define F (1 << T_OSD) #define S (1 << T_SCANNER) #define C (1 << T_COMM) #define ALL (D | T | L | P | W | R | O | M | A | E | B | K | V | F | S | C) static struct op_table_entry plextor_cd_ops[] = { { 0xD8, R, "CD-DA READ" } }; static struct scsi_op_quirk_entry scsi_op_quirk_table[] = { { /* * I believe that 0xD8 is the Plextor proprietary command * to read CD-DA data. I'm not sure which Plextor CDROM * models support the command, though. I know for sure * that the 4X, 8X, and 12X models do, and presumably the * 12-20X does. I don't know about any earlier models, * though. If anyone has any more complete information, * feel free to change this quirk entry. */ {T_CDROM, SIP_MEDIA_REMOVABLE, "PLEXTOR", "CD-ROM PX*", "*"}, nitems(plextor_cd_ops), plextor_cd_ops } }; static struct op_table_entry scsi_op_codes[] = { /* * From: http://www.t10.org/lists/op-num.txt * Modifications by Kenneth Merry (ken@FreeBSD.ORG) * and Jung-uk Kim (jkim@FreeBSD.org) * * Note: order is important in this table, scsi_op_desc() currently * depends on the opcodes in the table being in order to save * search time. * Note: scanner and comm. devices are carried over from the previous * version because they were removed in the latest spec. */ /* File: OP-NUM.TXT * * SCSI Operation Codes * Numeric Sorted Listing * as of 5/26/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC-2) ----------------- * . L - PRINTER DEVICE (SSC) M = Mandatory * . P - PROCESSOR DEVICE (SPC) O = Optional * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) V = Vendor spec. * . . R - CD/DVE DEVICE (MMC-3) Z = Obsolete * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC-2) * . . . A - STORAGE ARRAY DEVICE (SCC-2) * . . . .E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * OP DTLPWROMAEBKVF Description * -- -------------- ---------------------------------------------- */ /* 00 MMMMMMMMMMMMMM TEST UNIT READY */ { 0x00, ALL, "TEST UNIT READY" }, /* 01 M REWIND */ { 0x01, T, "REWIND" }, /* 01 Z V ZZZZ REZERO UNIT */ { 0x01, D | W | R | O | M, "REZERO UNIT" }, /* 02 VVVVVV V */ /* 03 MMMMMMMMMMOMMM REQUEST SENSE */ { 0x03, ALL, "REQUEST SENSE" }, /* 04 M OO FORMAT UNIT */ { 0x04, D | R | O, "FORMAT UNIT" }, /* 04 O FORMAT MEDIUM */ { 0x04, T, "FORMAT MEDIUM" }, /* 04 O FORMAT */ { 0x04, L, "FORMAT" }, /* 05 VMVVVV V READ BLOCK LIMITS */ { 0x05, T, "READ BLOCK LIMITS" }, /* 06 VVVVVV V */ /* 07 OVV O OV REASSIGN BLOCKS */ { 0x07, D | W | O, "REASSIGN BLOCKS" }, /* 07 O INITIALIZE ELEMENT STATUS */ { 0x07, M, "INITIALIZE ELEMENT STATUS" }, /* 08 MOV O OV READ(6) */ { 0x08, D | T | W | O, "READ(6)" }, /* 08 O RECEIVE */ { 0x08, P, "RECEIVE" }, /* 08 GET MESSAGE(6) */ { 0x08, C, "GET MESSAGE(6)" }, /* 09 VVVVVV V */ /* 0A OO O OV WRITE(6) */ { 0x0A, D | T | W | O, "WRITE(6)" }, /* 0A M SEND(6) */ { 0x0A, P, "SEND(6)" }, /* 0A SEND MESSAGE(6) */ { 0x0A, C, "SEND MESSAGE(6)" }, /* 0A M PRINT */ { 0x0A, L, "PRINT" }, /* 0B Z ZOZV SEEK(6) */ { 0x0B, D | W | R | O, "SEEK(6)" }, /* 0B O SET CAPACITY */ { 0x0B, T, "SET CAPACITY" }, /* 0B O SLEW AND PRINT */ { 0x0B, L, "SLEW AND PRINT" }, /* 0C VVVVVV V */ /* 0D VVVVVV V */ /* 0E VVVVVV V */ /* 0F VOVVVV V READ REVERSE(6) */ { 0x0F, T, "READ REVERSE(6)" }, /* 10 VM VVV WRITE FILEMARKS(6) */ { 0x10, T, "WRITE FILEMARKS(6)" }, /* 10 O SYNCHRONIZE BUFFER */ { 0x10, L, "SYNCHRONIZE BUFFER" }, /* 11 VMVVVV SPACE(6) */ { 0x11, T, "SPACE(6)" }, /* 12 MMMMMMMMMMMMMM INQUIRY */ { 0x12, ALL, "INQUIRY" }, /* 13 V VVVV */ /* 13 O VERIFY(6) */ { 0x13, T, "VERIFY(6)" }, /* 14 VOOVVV RECOVER BUFFERED DATA */ { 0x14, T | L, "RECOVER BUFFERED DATA" }, /* 15 OMO O OOOO OO MODE SELECT(6) */ { 0x15, ALL & ~(P | R | B | F), "MODE SELECT(6)" }, /* 16 ZZMZO OOOZ O RESERVE(6) */ { 0x16, ALL & ~(R | B | V | F | C), "RESERVE(6)" }, /* 16 Z RESERVE ELEMENT(6) */ { 0x16, M, "RESERVE ELEMENT(6)" }, /* 17 ZZMZO OOOZ O RELEASE(6) */ { 0x17, ALL & ~(R | B | V | F | C), "RELEASE(6)" }, /* 17 Z RELEASE ELEMENT(6) */ { 0x17, M, "RELEASE ELEMENT(6)" }, /* 18 ZZZZOZO Z COPY */ { 0x18, D | T | L | P | W | R | O | K | S, "COPY" }, /* 19 VMVVVV ERASE(6) */ { 0x19, T, "ERASE(6)" }, /* 1A OMO O OOOO OO MODE SENSE(6) */ { 0x1A, ALL & ~(P | R | B | F), "MODE SENSE(6)" }, /* 1B O OOO O MO O START STOP UNIT */ { 0x1B, D | W | R | O | A | B | K | F, "START STOP UNIT" }, /* 1B O M LOAD UNLOAD */ { 0x1B, T | V, "LOAD UNLOAD" }, /* 1B SCAN */ { 0x1B, S, "SCAN" }, /* 1B O STOP PRINT */ { 0x1B, L, "STOP PRINT" }, /* 1B O OPEN/CLOSE IMPORT/EXPORT ELEMENT */ { 0x1B, M, "OPEN/CLOSE IMPORT/EXPORT ELEMENT" }, /* 1C OOOOO OOOM OOO RECEIVE DIAGNOSTIC RESULTS */ { 0x1C, ALL & ~(R | B), "RECEIVE DIAGNOSTIC RESULTS" }, /* 1D MMMMM MMOM MMM SEND DIAGNOSTIC */ { 0x1D, ALL & ~(R | B), "SEND DIAGNOSTIC" }, /* 1E OO OOOO O O PREVENT ALLOW MEDIUM REMOVAL */ { 0x1E, D | T | W | R | O | M | K | F, "PREVENT ALLOW MEDIUM REMOVAL" }, /* 1F */ /* 20 V VVV V */ /* 21 V VVV V */ /* 22 V VVV V */ /* 23 V V V V */ /* 23 O READ FORMAT CAPACITIES */ { 0x23, R, "READ FORMAT CAPACITIES" }, /* 24 V VV SET WINDOW */ { 0x24, S, "SET WINDOW" }, /* 25 M M M M READ CAPACITY(10) */ { 0x25, D | W | O | B, "READ CAPACITY(10)" }, /* 25 O READ CAPACITY */ { 0x25, R, "READ CAPACITY" }, /* 25 M READ CARD CAPACITY */ { 0x25, K, "READ CARD CAPACITY" }, /* 25 GET WINDOW */ { 0x25, S, "GET WINDOW" }, /* 26 V VV */ /* 27 V VV */ /* 28 M MOM MM READ(10) */ { 0x28, D | W | R | O | B | K | S, "READ(10)" }, /* 28 GET MESSAGE(10) */ { 0x28, C, "GET MESSAGE(10)" }, /* 29 V VVO READ GENERATION */ { 0x29, O, "READ GENERATION" }, /* 2A O MOM MO WRITE(10) */ { 0x2A, D | W | R | O | B | K, "WRITE(10)" }, /* 2A SEND(10) */ { 0x2A, S, "SEND(10)" }, /* 2A SEND MESSAGE(10) */ { 0x2A, C, "SEND MESSAGE(10)" }, /* 2B Z OOO O SEEK(10) */ { 0x2B, D | W | R | O | K, "SEEK(10)" }, /* 2B O LOCATE(10) */ { 0x2B, T, "LOCATE(10)" }, /* 2B O POSITION TO ELEMENT */ { 0x2B, M, "POSITION TO ELEMENT" }, /* 2C V OO ERASE(10) */ { 0x2C, R | O, "ERASE(10)" }, /* 2D O READ UPDATED BLOCK */ { 0x2D, O, "READ UPDATED BLOCK" }, /* 2D V */ /* 2E O OOO MO WRITE AND VERIFY(10) */ { 0x2E, D | W | R | O | B | K, "WRITE AND VERIFY(10)" }, /* 2F O OOO VERIFY(10) */ { 0x2F, D | W | R | O, "VERIFY(10)" }, /* 30 Z ZZZ SEARCH DATA HIGH(10) */ { 0x30, D | W | R | O, "SEARCH DATA HIGH(10)" }, /* 31 Z ZZZ SEARCH DATA EQUAL(10) */ { 0x31, D | W | R | O, "SEARCH DATA EQUAL(10)" }, /* 31 OBJECT POSITION */ { 0x31, S, "OBJECT POSITION" }, /* 32 Z ZZZ SEARCH DATA LOW(10) */ { 0x32, D | W | R | O, "SEARCH DATA LOW(10)" }, /* 33 Z OZO SET LIMITS(10) */ { 0x33, D | W | R | O, "SET LIMITS(10)" }, /* 34 O O O O PRE-FETCH(10) */ { 0x34, D | W | O | K, "PRE-FETCH(10)" }, /* 34 M READ POSITION */ { 0x34, T, "READ POSITION" }, /* 34 GET DATA BUFFER STATUS */ { 0x34, S, "GET DATA BUFFER STATUS" }, /* 35 O OOO MO SYNCHRONIZE CACHE(10) */ { 0x35, D | W | R | O | B | K, "SYNCHRONIZE CACHE(10)" }, /* 36 Z O O O LOCK UNLOCK CACHE(10) */ { 0x36, D | W | O | K, "LOCK UNLOCK CACHE(10)" }, /* 37 O O READ DEFECT DATA(10) */ { 0x37, D | O, "READ DEFECT DATA(10)" }, /* 37 O INITIALIZE ELEMENT STATUS WITH RANGE */ { 0x37, M, "INITIALIZE ELEMENT STATUS WITH RANGE" }, /* 38 O O O MEDIUM SCAN */ { 0x38, W | O | K, "MEDIUM SCAN" }, /* 39 ZZZZOZO Z COMPARE */ { 0x39, D | T | L | P | W | R | O | K | S, "COMPARE" }, /* 3A ZZZZOZO Z COPY AND VERIFY */ { 0x3A, D | T | L | P | W | R | O | K | S, "COPY AND VERIFY" }, /* 3B OOOOOOOOOOMOOO WRITE BUFFER */ { 0x3B, ALL, "WRITE BUFFER" }, /* 3C OOOOOOOOOO OOO READ BUFFER */ { 0x3C, ALL & ~(B), "READ BUFFER" }, /* 3D O UPDATE BLOCK */ { 0x3D, O, "UPDATE BLOCK" }, /* 3E O O O READ LONG(10) */ { 0x3E, D | W | O, "READ LONG(10)" }, /* 3F O O O WRITE LONG(10) */ { 0x3F, D | W | O, "WRITE LONG(10)" }, /* 40 ZZZZOZOZ CHANGE DEFINITION */ { 0x40, D | T | L | P | W | R | O | M | S | C, "CHANGE DEFINITION" }, /* 41 O WRITE SAME(10) */ { 0x41, D, "WRITE SAME(10)" }, /* 42 O UNMAP */ { 0x42, D, "UNMAP" }, /* 42 O READ SUB-CHANNEL */ { 0x42, R, "READ SUB-CHANNEL" }, /* 43 O READ TOC/PMA/ATIP */ { 0x43, R, "READ TOC/PMA/ATIP" }, /* 44 M M REPORT DENSITY SUPPORT */ { 0x44, T | V, "REPORT DENSITY SUPPORT" }, /* 44 READ HEADER */ /* 45 O PLAY AUDIO(10) */ { 0x45, R, "PLAY AUDIO(10)" }, /* 46 M GET CONFIGURATION */ { 0x46, R, "GET CONFIGURATION" }, /* 47 O PLAY AUDIO MSF */ { 0x47, R, "PLAY AUDIO MSF" }, /* 48 */ /* 49 */ /* 4A M GET EVENT STATUS NOTIFICATION */ { 0x4A, R, "GET EVENT STATUS NOTIFICATION" }, /* 4B O PAUSE/RESUME */ { 0x4B, R, "PAUSE/RESUME" }, /* 4C OOOOO OOOO OOO LOG SELECT */ { 0x4C, ALL & ~(R | B), "LOG SELECT" }, /* 4D OOOOO OOOO OMO LOG SENSE */ { 0x4D, ALL & ~(R | B), "LOG SENSE" }, /* 4E O STOP PLAY/SCAN */ { 0x4E, R, "STOP PLAY/SCAN" }, /* 4F */ /* 50 O XDWRITE(10) */ { 0x50, D, "XDWRITE(10)" }, /* 51 O XPWRITE(10) */ { 0x51, D, "XPWRITE(10)" }, /* 51 O READ DISC INFORMATION */ { 0x51, R, "READ DISC INFORMATION" }, /* 52 O XDREAD(10) */ { 0x52, D, "XDREAD(10)" }, /* 52 O READ TRACK INFORMATION */ { 0x52, R, "READ TRACK INFORMATION" }, /* 53 O RESERVE TRACK */ { 0x53, R, "RESERVE TRACK" }, /* 54 O SEND OPC INFORMATION */ { 0x54, R, "SEND OPC INFORMATION" }, /* 55 OOO OMOOOOMOMO MODE SELECT(10) */ { 0x55, ALL & ~(P), "MODE SELECT(10)" }, /* 56 ZZMZO OOOZ RESERVE(10) */ { 0x56, ALL & ~(R | B | K | V | F | C), "RESERVE(10)" }, /* 56 Z RESERVE ELEMENT(10) */ { 0x56, M, "RESERVE ELEMENT(10)" }, /* 57 ZZMZO OOOZ RELEASE(10) */ { 0x57, ALL & ~(R | B | K | V | F | C), "RELEASE(10)" }, /* 57 Z RELEASE ELEMENT(10) */ { 0x57, M, "RELEASE ELEMENT(10)" }, /* 58 O REPAIR TRACK */ { 0x58, R, "REPAIR TRACK" }, /* 59 */ /* 5A OOO OMOOOOMOMO MODE SENSE(10) */ { 0x5A, ALL & ~(P), "MODE SENSE(10)" }, /* 5B O CLOSE TRACK/SESSION */ { 0x5B, R, "CLOSE TRACK/SESSION" }, /* 5C O READ BUFFER CAPACITY */ { 0x5C, R, "READ BUFFER CAPACITY" }, /* 5D O SEND CUE SHEET */ { 0x5D, R, "SEND CUE SHEET" }, /* 5E OOOOO OOOO M PERSISTENT RESERVE IN */ { 0x5E, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE IN" }, /* 5F OOOOO OOOO M PERSISTENT RESERVE OUT */ { 0x5F, ALL & ~(R | B | K | V | C), "PERSISTENT RESERVE OUT" }, /* 7E OO O OOOO O extended CDB */ { 0x7E, D | T | R | M | A | E | B | V, "extended CDB" }, /* 7F O M variable length CDB (more than 16 bytes) */ { 0x7F, D | F, "variable length CDB (more than 16 bytes)" }, /* 80 Z XDWRITE EXTENDED(16) */ { 0x80, D, "XDWRITE EXTENDED(16)" }, /* 80 M WRITE FILEMARKS(16) */ { 0x80, T, "WRITE FILEMARKS(16)" }, /* 81 Z REBUILD(16) */ { 0x81, D, "REBUILD(16)" }, /* 81 O READ REVERSE(16) */ { 0x81, T, "READ REVERSE(16)" }, /* 82 Z REGENERATE(16) */ { 0x82, D, "REGENERATE(16)" }, /* 83 OOOOO O OO EXTENDED COPY */ { 0x83, D | T | L | P | W | O | K | V, "EXTENDED COPY" }, /* 84 OOOOO O OO RECEIVE COPY RESULTS */ { 0x84, D | T | L | P | W | O | K | V, "RECEIVE COPY RESULTS" }, /* 85 O O O ATA COMMAND PASS THROUGH(16) */ { 0x85, D | R | B, "ATA COMMAND PASS THROUGH(16)" }, /* 86 OO OO OOOOOOO ACCESS CONTROL IN */ { 0x86, ALL & ~(L | R | F), "ACCESS CONTROL IN" }, /* 87 OO OO OOOOOOO ACCESS CONTROL OUT */ { 0x87, ALL & ~(L | R | F), "ACCESS CONTROL OUT" }, /* * XXX READ(16)/WRITE(16) were not listed for CD/DVE in op-num.txt * but we had it since r1.40. Do we really want them? */ /* 88 MM O O O READ(16) */ { 0x88, D | T | W | O | B, "READ(16)" }, /* 89 O COMPARE AND WRITE*/ { 0x89, D, "COMPARE AND WRITE" }, /* 8A OM O O O WRITE(16) */ { 0x8A, D | T | W | O | B, "WRITE(16)" }, /* 8B O ORWRITE */ { 0x8B, D, "ORWRITE" }, /* 8C OO O OO O M READ ATTRIBUTE */ { 0x8C, D | T | W | O | M | B | V, "READ ATTRIBUTE" }, /* 8D OO O OO O O WRITE ATTRIBUTE */ { 0x8D, D | T | W | O | M | B | V, "WRITE ATTRIBUTE" }, /* 8E O O O O WRITE AND VERIFY(16) */ { 0x8E, D | W | O | B, "WRITE AND VERIFY(16)" }, /* 8F OO O O O VERIFY(16) */ { 0x8F, D | T | W | O | B, "VERIFY(16)" }, /* 90 O O O O PRE-FETCH(16) */ { 0x90, D | W | O | B, "PRE-FETCH(16)" }, /* 91 O O O O SYNCHRONIZE CACHE(16) */ { 0x91, D | W | O | B, "SYNCHRONIZE CACHE(16)" }, /* 91 O SPACE(16) */ { 0x91, T, "SPACE(16)" }, /* 92 Z O O LOCK UNLOCK CACHE(16) */ { 0x92, D | W | O, "LOCK UNLOCK CACHE(16)" }, /* 92 O LOCATE(16) */ { 0x92, T, "LOCATE(16)" }, /* 93 O WRITE SAME(16) */ { 0x93, D, "WRITE SAME(16)" }, /* 93 M ERASE(16) */ { 0x93, T, "ERASE(16)" }, /* 94 O ZBC OUT */ - { 0x94, D, "ZBC OUT" }, - /* 95 O ZBC OUT */ - { 0x95, D, "ZBC OUT" }, + { 0x94, ALL, "ZBC OUT" }, + /* 95 O ZBC IN */ + { 0x95, ALL, "ZBC IN" }, /* 96 */ /* 97 */ /* 98 */ /* 99 */ /* 9A O WRITE STREAM(16) */ { 0x9A, D, "WRITE STREAM(16)" }, /* 9B OOOOOOOOOO OOO READ BUFFER(16) */ { 0x9B, ALL & ~(B) , "READ BUFFER(16)" }, /* 9C O WRITE ATOMIC(16) */ { 0x9C, D, "WRITE ATOMIC(16)" }, /* 9D SERVICE ACTION BIDIRECTIONAL */ { 0x9D, ALL, "SERVICE ACTION BIDIRECTIONAL" }, /* XXX KDM ALL for this? op-num.txt defines it for none.. */ /* 9E SERVICE ACTION IN(16) */ { 0x9E, ALL, "SERVICE ACTION IN(16)" }, - /* XXX KDM ALL for this? op-num.txt defines it for ADC.. */ /* 9F M SERVICE ACTION OUT(16) */ { 0x9F, ALL, "SERVICE ACTION OUT(16)" }, /* A0 MMOOO OMMM OMO REPORT LUNS */ { 0xA0, ALL & ~(R | B), "REPORT LUNS" }, /* A1 O BLANK */ { 0xA1, R, "BLANK" }, /* A1 O O ATA COMMAND PASS THROUGH(12) */ { 0xA1, D | B, "ATA COMMAND PASS THROUGH(12)" }, /* A2 OO O O SECURITY PROTOCOL IN */ { 0xA2, D | T | R | V, "SECURITY PROTOCOL IN" }, /* A3 OOO O OOMOOOM MAINTENANCE (IN) */ { 0xA3, ALL & ~(P | R | F), "MAINTENANCE (IN)" }, /* A3 O SEND KEY */ { 0xA3, R, "SEND KEY" }, /* A4 OOO O OOOOOOO MAINTENANCE (OUT) */ { 0xA4, ALL & ~(P | R | F), "MAINTENANCE (OUT)" }, /* A4 O REPORT KEY */ { 0xA4, R, "REPORT KEY" }, /* A5 O O OM MOVE MEDIUM */ { 0xA5, T | W | O | M, "MOVE MEDIUM" }, /* A5 O PLAY AUDIO(12) */ { 0xA5, R, "PLAY AUDIO(12)" }, /* A6 O EXCHANGE MEDIUM */ { 0xA6, M, "EXCHANGE MEDIUM" }, /* A6 O LOAD/UNLOAD C/DVD */ { 0xA6, R, "LOAD/UNLOAD C/DVD" }, /* A7 ZZ O O MOVE MEDIUM ATTACHED */ { 0xA7, D | T | W | O, "MOVE MEDIUM ATTACHED" }, /* A7 O SET READ AHEAD */ { 0xA7, R, "SET READ AHEAD" }, /* A8 O OOO READ(12) */ { 0xA8, D | W | R | O, "READ(12)" }, /* A8 GET MESSAGE(12) */ { 0xA8, C, "GET MESSAGE(12)" }, /* A9 O SERVICE ACTION OUT(12) */ { 0xA9, V, "SERVICE ACTION OUT(12)" }, /* AA O OOO WRITE(12) */ { 0xAA, D | W | R | O, "WRITE(12)" }, /* AA SEND MESSAGE(12) */ { 0xAA, C, "SEND MESSAGE(12)" }, /* AB O O SERVICE ACTION IN(12) */ { 0xAB, R | V, "SERVICE ACTION IN(12)" }, /* AC O ERASE(12) */ { 0xAC, O, "ERASE(12)" }, /* AC O GET PERFORMANCE */ { 0xAC, R, "GET PERFORMANCE" }, /* AD O READ DVD STRUCTURE */ { 0xAD, R, "READ DVD STRUCTURE" }, /* AE O O O WRITE AND VERIFY(12) */ { 0xAE, D | W | O, "WRITE AND VERIFY(12)" }, /* AF O OZO VERIFY(12) */ { 0xAF, D | W | R | O, "VERIFY(12)" }, /* B0 ZZZ SEARCH DATA HIGH(12) */ { 0xB0, W | R | O, "SEARCH DATA HIGH(12)" }, /* B1 ZZZ SEARCH DATA EQUAL(12) */ { 0xB1, W | R | O, "SEARCH DATA EQUAL(12)" }, /* B2 ZZZ SEARCH DATA LOW(12) */ { 0xB2, W | R | O, "SEARCH DATA LOW(12)" }, /* B3 Z OZO SET LIMITS(12) */ { 0xB3, D | W | R | O, "SET LIMITS(12)" }, /* B4 ZZ OZO READ ELEMENT STATUS ATTACHED */ { 0xB4, D | T | W | R | O, "READ ELEMENT STATUS ATTACHED" }, /* B5 OO O O SECURITY PROTOCOL OUT */ { 0xB5, D | T | R | V, "SECURITY PROTOCOL OUT" }, /* B5 O REQUEST VOLUME ELEMENT ADDRESS */ { 0xB5, M, "REQUEST VOLUME ELEMENT ADDRESS" }, /* B6 O SEND VOLUME TAG */ { 0xB6, M, "SEND VOLUME TAG" }, /* B6 O SET STREAMING */ { 0xB6, R, "SET STREAMING" }, /* B7 O O READ DEFECT DATA(12) */ { 0xB7, D | O, "READ DEFECT DATA(12)" }, /* B8 O OZOM READ ELEMENT STATUS */ { 0xB8, T | W | R | O | M, "READ ELEMENT STATUS" }, /* B9 O READ CD MSF */ { 0xB9, R, "READ CD MSF" }, /* BA O O OOMO REDUNDANCY GROUP (IN) */ { 0xBA, D | W | O | M | A | E, "REDUNDANCY GROUP (IN)" }, /* BA O SCAN */ { 0xBA, R, "SCAN" }, /* BB O O OOOO REDUNDANCY GROUP (OUT) */ { 0xBB, D | W | O | M | A | E, "REDUNDANCY GROUP (OUT)" }, /* BB O SET CD SPEED */ { 0xBB, R, "SET CD SPEED" }, /* BC O O OOMO SPARE (IN) */ { 0xBC, D | W | O | M | A | E, "SPARE (IN)" }, /* BD O O OOOO SPARE (OUT) */ { 0xBD, D | W | O | M | A | E, "SPARE (OUT)" }, /* BD O MECHANISM STATUS */ { 0xBD, R, "MECHANISM STATUS" }, /* BE O O OOMO VOLUME SET (IN) */ { 0xBE, D | W | O | M | A | E, "VOLUME SET (IN)" }, /* BE O READ CD */ { 0xBE, R, "READ CD" }, /* BF O O OOOO VOLUME SET (OUT) */ { 0xBF, D | W | O | M | A | E, "VOLUME SET (OUT)" }, /* BF O SEND DVD STRUCTURE */ { 0xBF, R, "SEND DVD STRUCTURE" } }; const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { caddr_t match; int i, j; u_int32_t opmask; u_int16_t pd_type; int num_ops[2]; struct op_table_entry *table[2]; int num_tables; /* * If we've got inquiry data, use it to determine what type of * device we're dealing with here. Otherwise, assume direct * access. */ if (inq_data == NULL) { pd_type = T_DIRECT; match = NULL; } else { pd_type = SID_TYPE(inq_data); match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)scsi_op_quirk_table, nitems(scsi_op_quirk_table), sizeof(*scsi_op_quirk_table), scsi_inquiry_match); } if (match != NULL) { table[0] = ((struct scsi_op_quirk_entry *)match)->op_table; num_ops[0] = ((struct scsi_op_quirk_entry *)match)->num_ops; table[1] = scsi_op_codes; num_ops[1] = nitems(scsi_op_codes); num_tables = 2; } else { /* * If this is true, we have a vendor specific opcode that * wasn't covered in the quirk table. */ if ((opcode > 0xBF) || ((opcode > 0x5F) && (opcode < 0x80))) return("Vendor Specific Command"); table[0] = scsi_op_codes; num_ops[0] = nitems(scsi_op_codes); num_tables = 1; } /* RBC is 'Simplified' Direct Access Device */ if (pd_type == T_RBC) pd_type = T_DIRECT; + /* + * Host managed drives are direct access for the most part. + */ + if (pd_type == T_ZBC_HM) + pd_type = T_DIRECT; + /* Map NODEVICE to Direct Access Device to handle REPORT LUNS, etc. */ if (pd_type == T_NODEVICE) pd_type = T_DIRECT; opmask = 1 << pd_type; for (j = 0; j < num_tables; j++) { for (i = 0;i < num_ops[j] && table[j][i].opcode <= opcode; i++){ if ((table[j][i].opcode == opcode) && ((table[j][i].opmask & opmask) != 0)) return(table[j][i].desc); } } /* * If we can't find a match for the command in the table, we just * assume it's a vendor specifc command. */ return("Vendor Specific Command"); } #else /* SCSI_NO_OP_STRINGS */ const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data) { return(""); } #endif #if !defined(SCSI_NO_SENSE_STRINGS) #define SST(asc, ascq, action, desc) \ asc, ascq, action, desc #else const char empty_string[] = ""; #define SST(asc, ascq, action, desc) \ asc, ascq, action, empty_string #endif const struct sense_key_table_entry sense_key_table[] = { { SSD_KEY_NO_SENSE, SS_NOP, "NO SENSE" }, { SSD_KEY_RECOVERED_ERROR, SS_NOP|SSQ_PRINT_SENSE, "RECOVERED ERROR" }, { SSD_KEY_NOT_READY, SS_RDEF, "NOT READY" }, { SSD_KEY_MEDIUM_ERROR, SS_RDEF, "MEDIUM ERROR" }, { SSD_KEY_HARDWARE_ERROR, SS_RDEF, "HARDWARE FAILURE" }, { SSD_KEY_ILLEGAL_REQUEST, SS_FATAL|EINVAL, "ILLEGAL REQUEST" }, { SSD_KEY_UNIT_ATTENTION, SS_FATAL|ENXIO, "UNIT ATTENTION" }, { SSD_KEY_DATA_PROTECT, SS_FATAL|EACCES, "DATA PROTECT" }, { SSD_KEY_BLANK_CHECK, SS_FATAL|ENOSPC, "BLANK CHECK" }, { SSD_KEY_Vendor_Specific, SS_FATAL|EIO, "Vendor Specific" }, { SSD_KEY_COPY_ABORTED, SS_FATAL|EIO, "COPY ABORTED" }, { SSD_KEY_ABORTED_COMMAND, SS_RDEF, "ABORTED COMMAND" }, { SSD_KEY_EQUAL, SS_NOP, "EQUAL" }, { SSD_KEY_VOLUME_OVERFLOW, SS_FATAL|EIO, "VOLUME OVERFLOW" }, { SSD_KEY_MISCOMPARE, SS_NOP, "MISCOMPARE" }, { SSD_KEY_COMPLETED, SS_NOP, "COMPLETED" } }; static struct asc_table_entry quantum_fireball_entries[] = { { SST(0x04, 0x0b, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing cmd. required") } }; static struct asc_table_entry sony_mo_entries[] = { { SST(0x04, 0x00, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, cause not reportable") } }; static struct asc_table_entry hgst_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Vendor Unique - Logical Unit Not Ready") }, { SST(0x0A, 0x01, SS_RDEF, "Unrecovered Super Certification Log Write Error") }, { SST(0x0A, 0x02, SS_RDEF, "Unrecovered Super Certification Log Read Error") }, { SST(0x15, 0x03, SS_RDEF, "Unrecovered Sector Error") }, { SST(0x3E, 0x04, SS_RDEF, "Unrecovered Self-Test Hard-Cache Test Fail") }, { SST(0x3E, 0x05, SS_RDEF, "Unrecovered Self-Test OTF-Cache Fail") }, { SST(0x40, 0x00, SS_RDEF, "Unrecovered SAT No Buffer Overflow Error") }, { SST(0x40, 0x01, SS_RDEF, "Unrecovered SAT Buffer Overflow Error") }, { SST(0x40, 0x02, SS_RDEF, "Unrecovered SAT No Buffer Overflow With ECS Fault") }, { SST(0x40, 0x03, SS_RDEF, "Unrecovered SAT Buffer Overflow With ECS Fault") }, { SST(0x40, 0x81, SS_RDEF, "DRAM Failure") }, { SST(0x44, 0x0B, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF2, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF6, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xF9, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x44, 0xFA, SS_RDEF, "Vendor Unique - Internal Target Failure") }, { SST(0x5D, 0x22, SS_RDEF, "Extreme Over-Temperature Warning") }, { SST(0x5D, 0x50, SS_RDEF, "Load/Unload cycle Count Warning") }, { SST(0x81, 0x00, SS_RDEF, "Vendor Unique - Internal Logic Error") }, { SST(0x85, 0x00, SS_RDEF, "Vendor Unique - Internal Key Seed Error") }, }; static struct asc_table_entry seagate_entries[] = { { SST(0x04, 0xF0, SS_RDEF, "Logical Unit Not Ready, super certify in Progress") }, { SST(0x08, 0x86, SS_RDEF, "Write Fault Data Corruption") }, { SST(0x09, 0x0D, SS_RDEF, "Tracking Failure") }, { SST(0x09, 0x0E, SS_RDEF, "ETF Failure") }, { SST(0x0B, 0x5D, SS_RDEF, "Pre-SMART Warning") }, { SST(0x0B, 0x85, SS_RDEF, "5V Voltage Warning") }, { SST(0x0B, 0x8C, SS_RDEF, "12V Voltage Warning") }, { SST(0x0C, 0xFF, SS_RDEF, "Write Error - Too many error recovery revs") }, { SST(0x11, 0xFF, SS_RDEF, "Unrecovered Read Error - Too many error recovery revs") }, { SST(0x19, 0x0E, SS_RDEF, "Fewer than 1/2 defect list copies") }, { SST(0x20, 0xF3, SS_RDEF, "Illegal CDB linked to skip mask cmd") }, { SST(0x24, 0xF0, SS_RDEF, "Illegal byte in CDB, LBA not matching") }, { SST(0x24, 0xF1, SS_RDEF, "Illegal byte in CDB, LEN not matching") }, { SST(0x24, 0xF2, SS_RDEF, "Mask not matching transfer length") }, { SST(0x24, 0xF3, SS_RDEF, "Drive formatted without plist") }, { SST(0x26, 0x95, SS_RDEF, "Invalid Field Parameter - CAP File") }, { SST(0x26, 0x96, SS_RDEF, "Invalid Field Parameter - RAP File") }, { SST(0x26, 0x97, SS_RDEF, "Invalid Field Parameter - TMS Firmware Tag") }, { SST(0x26, 0x98, SS_RDEF, "Invalid Field Parameter - Check Sum") }, { SST(0x26, 0x99, SS_RDEF, "Invalid Field Parameter - Firmware Tag") }, { SST(0x29, 0x08, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x09, SS_RDEF, "Write Log Dump data") }, { SST(0x29, 0x0A, SS_RDEF, "Reserved disk space") }, { SST(0x29, 0x0B, SS_RDEF, "SDBP") }, { SST(0x29, 0x0C, SS_RDEF, "SDBP") }, { SST(0x31, 0x91, SS_RDEF, "Format Corrupted World Wide Name (WWN) is Invalid") }, { SST(0x32, 0x03, SS_RDEF, "Defect List - Length exceeds Command Allocated Length") }, { SST(0x33, 0x00, SS_RDEF, "Flash not ready for access") }, { SST(0x3F, 0x70, SS_RDEF, "Invalid RAP block") }, { SST(0x3F, 0x71, SS_RDEF, "RAP/ETF mismatch") }, { SST(0x3F, 0x90, SS_RDEF, "Invalid CAP block") }, { SST(0x3F, 0x91, SS_RDEF, "World Wide Name (WWN) Mismatch") }, { SST(0x40, 0x01, SS_RDEF, "DRAM Parity Error") }, { SST(0x40, 0x02, SS_RDEF, "DRAM Parity Error") }, { SST(0x42, 0x0A, SS_RDEF, "Loopback Test") }, { SST(0x42, 0x0B, SS_RDEF, "Loopback Test") }, { SST(0x44, 0xF2, SS_RDEF, "Compare error during data integrity check") }, { SST(0x44, 0xF6, SS_RDEF, "Unrecoverable error during data integrity check") }, { SST(0x47, 0x80, SS_RDEF, "Fibre Channel Sequence Error") }, { SST(0x4E, 0x01, SS_RDEF, "Information Unit Too Short") }, { SST(0x80, 0x00, SS_RDEF, "General Firmware Error / Command Timeout") }, { SST(0x80, 0x01, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x02, SS_RDEF, "Command Timeout") }, { SST(0x80, 0x80, SS_RDEF, "FC FIFO Error During Read Transfer") }, { SST(0x80, 0x81, SS_RDEF, "FC FIFO Error During Write Transfer") }, { SST(0x80, 0x82, SS_RDEF, "DISC FIFO Error During Read Transfer") }, { SST(0x80, 0x83, SS_RDEF, "DISC FIFO Error During Write Transfer") }, { SST(0x80, 0x84, SS_RDEF, "LBA Seeded LRC Error on Read") }, { SST(0x80, 0x85, SS_RDEF, "LBA Seeded LRC Error on Write") }, { SST(0x80, 0x86, SS_RDEF, "IOEDC Error on Read") }, { SST(0x80, 0x87, SS_RDEF, "IOEDC Error on Write") }, { SST(0x80, 0x88, SS_RDEF, "Host Parity Check Failed") }, { SST(0x80, 0x89, SS_RDEF, "IOEDC error on read detected by formatter") }, { SST(0x80, 0x8A, SS_RDEF, "Host Parity Errors / Host FIFO Initialization Failed") }, { SST(0x80, 0x8B, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8C, SS_RDEF, "Host Parity Errors") }, { SST(0x80, 0x8D, SS_RDEF, "Host Parity Errors") }, { SST(0x81, 0x00, SS_RDEF, "LA Check Failed") }, { SST(0x82, 0x00, SS_RDEF, "Internal client detected insufficient buffer") }, { SST(0x84, 0x00, SS_RDEF, "Scheduled Diagnostic And Repair") }, }; static struct scsi_sense_quirk_entry sense_quirk_table[] = { { /* * XXX The Quantum Fireball ST and SE like to return 0x04 0x0b * when they really should return 0x04 0x02. */ {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "FIREBALL S*", "*"}, /*num_sense_keys*/0, nitems(quantum_fireball_entries), /*sense key entries*/NULL, quantum_fireball_entries }, { /* * This Sony MO drive likes to return 0x04, 0x00 when it * isn't spun up. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SONY", "SMO-*", "*"}, /*num_sense_keys*/0, nitems(sony_mo_entries), /*sense key entries*/NULL, sony_mo_entries }, { /* * HGST vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "HGST", "*", "*"}, /*num_sense_keys*/0, nitems(hgst_entries), /*sense key entries*/NULL, hgst_entries }, { /* * SEAGATE vendor-specific error codes */ {T_DIRECT, SIP_MEDIA_FIXED, "SEAGATE", "*", "*"}, /*num_sense_keys*/0, nitems(seagate_entries), /*sense key entries*/NULL, seagate_entries } }; const u_int sense_quirk_table_size = nitems(sense_quirk_table); static struct asc_table_entry asc_table[] = { /* * From: http://www.t10.org/lists/asc-num.txt * Modifications by Jung-uk Kim (jkim@FreeBSD.org) */ /* * File: ASC-NUM.TXT * * SCSI ASC/ASCQ Assignments * Numeric Sorted Listing * as of 8/12/15 * * D - DIRECT ACCESS DEVICE (SBC-2) device column key * .T - SEQUENTIAL ACCESS DEVICE (SSC) ------------------- * . L - PRINTER DEVICE (SSC) blank = reserved * . P - PROCESSOR DEVICE (SPC) not blank = allowed * . .W - WRITE ONCE READ MULTIPLE DEVICE (SBC-2) * . . R - CD DEVICE (MMC) * . . O - OPTICAL MEMORY DEVICE (SBC-2) * . . .M - MEDIA CHANGER DEVICE (SMC) * . . . A - STORAGE ARRAY DEVICE (SCC) * . . . E - ENCLOSURE SERVICES DEVICE (SES) * . . . .B - SIMPLIFIED DIRECT-ACCESS DEVICE (RBC) * . . . . K - OPTICAL CARD READER/WRITER DEVICE (OCRW) * . . . . V - AUTOMATION/DRIVE INTERFACE (ADC) * . . . . .F - OBJECT-BASED STORAGE (OSD) * DTLPWROMAEBKVF * ASC ASCQ Action * Description */ /* DTLPWROMAEBKVF */ { SST(0x00, 0x00, SS_NOP, "No additional sense information") }, /* T */ { SST(0x00, 0x01, SS_RDEF, "Filemark detected") }, /* T */ { SST(0x00, 0x02, SS_RDEF, "End-of-partition/medium detected") }, /* T */ { SST(0x00, 0x03, SS_RDEF, "Setmark detected") }, /* T */ { SST(0x00, 0x04, SS_RDEF, "Beginning-of-partition/medium detected") }, /* TL */ { SST(0x00, 0x05, SS_RDEF, "End-of-data detected") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x06, SS_RDEF, "I/O process terminated") }, /* T */ { SST(0x00, 0x07, SS_RDEF, /* XXX TBD */ "Programmable early warning detected") }, /* R */ { SST(0x00, 0x11, SS_FATAL | EBUSY, "Audio play operation in progress") }, /* R */ { SST(0x00, 0x12, SS_NOP, "Audio play operation paused") }, /* R */ { SST(0x00, 0x13, SS_NOP, "Audio play operation successfully completed") }, /* R */ { SST(0x00, 0x14, SS_RDEF, "Audio play operation stopped due to error") }, /* R */ { SST(0x00, 0x15, SS_NOP, "No current audio status to return") }, /* DTLPWROMAEBKVF */ { SST(0x00, 0x16, SS_FATAL | EBUSY, "Operation in progress") }, /* DTL WROMAEBKVF */ { SST(0x00, 0x17, SS_RDEF, "Cleaning requested") }, /* T */ { SST(0x00, 0x18, SS_RDEF, /* XXX TBD */ "Erase operation in progress") }, /* T */ { SST(0x00, 0x19, SS_RDEF, /* XXX TBD */ "Locate operation in progress") }, /* T */ { SST(0x00, 0x1A, SS_RDEF, /* XXX TBD */ "Rewind operation in progress") }, /* T */ { SST(0x00, 0x1B, SS_RDEF, /* XXX TBD */ "Set capacity operation in progress") }, /* T */ { SST(0x00, 0x1C, SS_RDEF, /* XXX TBD */ "Verify operation in progress") }, /* DT B */ { SST(0x00, 0x1D, SS_RDEF, /* XXX TBD */ "ATA pass through information available") }, /* DT R MAEBKV */ { SST(0x00, 0x1E, SS_RDEF, /* XXX TBD */ "Conflicting SA creation request") }, /* DT B */ { SST(0x00, 0x1F, SS_RDEF, /* XXX TBD */ "Logical unit transitioning to another power condition") }, /* DT P B */ { SST(0x00, 0x20, SS_RDEF, /* XXX TBD */ "Extended copy information available") }, /* D */ { SST(0x00, 0x21, SS_RDEF, /* XXX TBD */ "Atomic command aborted due to ACA") }, /* D W O BK */ { SST(0x01, 0x00, SS_RDEF, "No index/sector signal") }, /* D WRO BK */ { SST(0x02, 0x00, SS_RDEF, "No seek complete") }, /* DTL W O BK */ { SST(0x03, 0x00, SS_RDEF, "Peripheral device write fault") }, /* T */ { SST(0x03, 0x01, SS_RDEF, "No write current") }, /* T */ { SST(0x03, 0x02, SS_RDEF, "Excessive write errors") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x00, SS_RDEF, "Logical unit not ready, cause not reportable") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x01, SS_WAIT | EBUSY, "Logical unit is in process of becoming ready") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x02, SS_START | SSQ_DECREMENT_COUNT | ENXIO, "Logical unit not ready, initializing command required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x03, SS_FATAL | ENXIO, "Logical unit not ready, manual intervention required") }, /* DTL RO B */ { SST(0x04, 0x04, SS_FATAL | EBUSY, "Logical unit not ready, format in progress") }, /* DT W O A BK F */ { SST(0x04, 0x05, SS_FATAL | EBUSY, "Logical unit not ready, rebuild in progress") }, /* DT W O A BK */ { SST(0x04, 0x06, SS_FATAL | EBUSY, "Logical unit not ready, recalculation in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x07, SS_FATAL | EBUSY, "Logical unit not ready, operation in progress") }, /* R */ { SST(0x04, 0x08, SS_FATAL | EBUSY, "Logical unit not ready, long write in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x09, SS_RDEF, /* XXX TBD */ "Logical unit not ready, self-test in progress") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0A, SS_WAIT | ENXIO, "Logical unit not accessible, asymmetric access state transition")}, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0B, SS_FATAL | ENXIO, "Logical unit not accessible, target port in standby state") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x0C, SS_FATAL | ENXIO, "Logical unit not accessible, target port in unavailable state") }, /* F */ { SST(0x04, 0x0D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, structure check required") }, /* DTL WR MAEBKVF */ { SST(0x04, 0x0E, SS_RDEF, /* XXX TBD */ "Logical unit not ready, security session in progress") }, /* DT WROM B */ { SST(0x04, 0x10, SS_RDEF, /* XXX TBD */ "Logical unit not ready, auxiliary memory not accessible") }, /* DT WRO AEB VF */ { SST(0x04, 0x11, SS_WAIT | EBUSY, "Logical unit not ready, notify (enable spinup) required") }, /* M V */ { SST(0x04, 0x12, SS_RDEF, /* XXX TBD */ "Logical unit not ready, offline") }, /* DT R MAEBKV */ { SST(0x04, 0x13, SS_RDEF, /* XXX TBD */ "Logical unit not ready, SA creation in progress") }, /* D B */ { SST(0x04, 0x14, SS_RDEF, /* XXX TBD */ "Logical unit not ready, space allocation in progress") }, /* M */ { SST(0x04, 0x15, SS_RDEF, /* XXX TBD */ "Logical unit not ready, robotics disabled") }, /* M */ { SST(0x04, 0x16, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration required") }, /* M */ { SST(0x04, 0x17, SS_RDEF, /* XXX TBD */ "Logical unit not ready, calibration required") }, /* M */ { SST(0x04, 0x18, SS_RDEF, /* XXX TBD */ "Logical unit not ready, a door is open") }, /* M */ { SST(0x04, 0x19, SS_RDEF, /* XXX TBD */ "Logical unit not ready, operating in sequential mode") }, /* DT B */ { SST(0x04, 0x1A, SS_RDEF, /* XXX TBD */ "Logical unit not ready, START/STOP UNIT command in progress") }, /* D B */ { SST(0x04, 0x1B, SS_RDEF, /* XXX TBD */ "Logical unit not ready, sanitize in progress") }, /* DT MAEB */ { SST(0x04, 0x1C, SS_RDEF, /* XXX TBD */ "Logical unit not ready, additional power use not yet granted") }, /* D */ { SST(0x04, 0x1D, SS_RDEF, /* XXX TBD */ "Logical unit not ready, configuration in progress") }, /* D */ { SST(0x04, 0x1E, SS_FATAL | ENXIO, "Logical unit not ready, microcode activation required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x1F, SS_FATAL | ENXIO, "Logical unit not ready, microcode download required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x20, SS_RDEF, /* XXX TBD */ "Logical unit not ready, logical unit reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x21, SS_RDEF, /* XXX TBD */ "Logical unit not ready, hard reset required") }, /* DTLPWROMAEBKVF */ { SST(0x04, 0x22, SS_RDEF, /* XXX TBD */ "Logical unit not ready, power cycle required") }, /* DTL WROMAEBKVF */ { SST(0x05, 0x00, SS_RDEF, "Logical unit does not respond to selection") }, /* D WROM BK */ { SST(0x06, 0x00, SS_RDEF, "No reference position found") }, /* DTL WROM BK */ { SST(0x07, 0x00, SS_RDEF, "Multiple peripheral devices selected") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x00, SS_RDEF, "Logical unit communication failure") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x01, SS_RDEF, "Logical unit communication time-out") }, /* DTL WROMAEBKVF */ { SST(0x08, 0x02, SS_RDEF, "Logical unit communication parity error") }, /* DT ROM BK */ { SST(0x08, 0x03, SS_RDEF, "Logical unit communication CRC error (Ultra-DMA/32)") }, /* DTLPWRO K */ { SST(0x08, 0x04, SS_RDEF, /* XXX TBD */ "Unreachable copy target") }, /* DT WRO B */ { SST(0x09, 0x00, SS_RDEF, "Track following error") }, /* WRO K */ { SST(0x09, 0x01, SS_RDEF, "Tracking servo failure") }, /* WRO K */ { SST(0x09, 0x02, SS_RDEF, "Focus servo failure") }, /* WRO */ { SST(0x09, 0x03, SS_RDEF, "Spindle servo failure") }, /* DT WRO B */ { SST(0x09, 0x04, SS_RDEF, "Head select fault") }, /* DT RO B */ { SST(0x09, 0x05, SS_RDEF, "Vibration induced tracking error") }, /* DTLPWROMAEBKVF */ { SST(0x0A, 0x00, SS_FATAL | ENOSPC, "Error log overflow") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x00, SS_RDEF, "Warning") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x01, SS_RDEF, "Warning - specified temperature exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x02, SS_RDEF, "Warning - enclosure degraded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x03, SS_RDEF, /* XXX TBD */ "Warning - background self-test failed") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x04, SS_RDEF, /* XXX TBD */ "Warning - background pre-scan detected medium error") }, /* DTLPWRO AEBKVF */ { SST(0x0B, 0x05, SS_RDEF, /* XXX TBD */ "Warning - background medium scan detected medium error") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x06, SS_RDEF, /* XXX TBD */ "Warning - non-volatile cache now volatile") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x07, SS_RDEF, /* XXX TBD */ "Warning - degraded power to non-volatile cache") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x08, SS_RDEF, /* XXX TBD */ "Warning - power loss expected") }, /* D */ { SST(0x0B, 0x09, SS_RDEF, /* XXX TBD */ "Warning - device statistics notification available") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0A, SS_RDEF, /* XXX TBD */ "Warning - High critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0B, SS_RDEF, /* XXX TBD */ "Warning - Low critical temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0C, SS_RDEF, /* XXX TBD */ "Warning - High operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0D, SS_RDEF, /* XXX TBD */ "Warning - Low operating temperature limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0E, SS_RDEF, /* XXX TBD */ "Warning - High citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x0F, SS_RDEF, /* XXX TBD */ "Warning - Low citical humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x10, SS_RDEF, /* XXX TBD */ "Warning - High operating humidity limit exceeded") }, /* DTLPWROMAEBKVF */ { SST(0x0B, 0x11, SS_RDEF, /* XXX TBD */ "Warning - Low operating humidity limit exceeded") }, /* T R */ { SST(0x0C, 0x00, SS_RDEF, "Write error") }, /* K */ { SST(0x0C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Write error - recovered with auto reallocation") }, /* D W O BK */ { SST(0x0C, 0x02, SS_RDEF, "Write error - auto reallocation failed") }, /* D W O BK */ { SST(0x0C, 0x03, SS_RDEF, "Write error - recommend reassignment") }, /* DT W O B */ { SST(0x0C, 0x04, SS_RDEF, "Compression check miscompare error") }, /* DT W O B */ { SST(0x0C, 0x05, SS_RDEF, "Data expansion occurred during compression") }, /* DT W O B */ { SST(0x0C, 0x06, SS_RDEF, "Block not compressible") }, /* R */ { SST(0x0C, 0x07, SS_RDEF, "Write error - recovery needed") }, /* R */ { SST(0x0C, 0x08, SS_RDEF, "Write error - recovery failed") }, /* R */ { SST(0x0C, 0x09, SS_RDEF, "Write error - loss of streaming") }, /* R */ { SST(0x0C, 0x0A, SS_RDEF, "Write error - padding blocks added") }, /* DT WROM B */ { SST(0x0C, 0x0B, SS_RDEF, /* XXX TBD */ "Auxiliary memory write error") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0C, SS_RDEF, /* XXX TBD */ "Write error - unexpected unsolicited data") }, /* DTLPWRO AEBKVF */ { SST(0x0C, 0x0D, SS_RDEF, /* XXX TBD */ "Write error - not enough unsolicited data") }, /* DT W O BK */ { SST(0x0C, 0x0E, SS_RDEF, /* XXX TBD */ "Multiple write errors") }, /* R */ { SST(0x0C, 0x0F, SS_RDEF, /* XXX TBD */ "Defects in error window") }, /* D */ { SST(0x0C, 0x10, SS_RDEF, /* XXX TBD */ "Incomplete multiple atomic write operations") }, /* D */ { SST(0x0C, 0x11, SS_RDEF, /* XXX TBD */ "Write error - recovery scan needed") }, /* D */ { SST(0x0C, 0x12, SS_RDEF, /* XXX TBD */ "Write error - insufficient zone resources") }, /* DTLPWRO A K */ { SST(0x0D, 0x00, SS_RDEF, /* XXX TBD */ "Error detected by third party temporary initiator") }, /* DTLPWRO A K */ { SST(0x0D, 0x01, SS_RDEF, /* XXX TBD */ "Third party device failure") }, /* DTLPWRO A K */ { SST(0x0D, 0x02, SS_RDEF, /* XXX TBD */ "Copy target device not reachable") }, /* DTLPWRO A K */ { SST(0x0D, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect copy target device type") }, /* DTLPWRO A K */ { SST(0x0D, 0x04, SS_RDEF, /* XXX TBD */ "Copy target device data underrun") }, /* DTLPWRO A K */ { SST(0x0D, 0x05, SS_RDEF, /* XXX TBD */ "Copy target device data overrun") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x00, SS_RDEF, /* XXX TBD */ "Invalid information unit") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x01, SS_RDEF, /* XXX TBD */ "Information unit too short") }, /* DT PWROMAEBK F */ { SST(0x0E, 0x02, SS_RDEF, /* XXX TBD */ "Information unit too long") }, /* DT P R MAEBK F */ { SST(0x0E, 0x03, SS_RDEF, /* XXX TBD */ "Invalid field in command information unit") }, /* D W O BK */ { SST(0x10, 0x00, SS_RDEF, "ID CRC or ECC error") }, /* DT W O */ { SST(0x10, 0x01, SS_RDEF, /* XXX TBD */ "Logical block guard check failed") }, /* DT W O */ { SST(0x10, 0x02, SS_RDEF, /* XXX TBD */ "Logical block application tag check failed") }, /* DT W O */ { SST(0x10, 0x03, SS_RDEF, /* XXX TBD */ "Logical block reference tag check failed") }, /* T */ { SST(0x10, 0x04, SS_RDEF, /* XXX TBD */ "Logical block protection error on recovered buffer data") }, /* T */ { SST(0x10, 0x05, SS_RDEF, /* XXX TBD */ "Logical block protection method error") }, /* DT WRO BK */ { SST(0x11, 0x00, SS_FATAL|EIO, "Unrecovered read error") }, /* DT WRO BK */ { SST(0x11, 0x01, SS_FATAL|EIO, "Read retries exhausted") }, /* DT WRO BK */ { SST(0x11, 0x02, SS_FATAL|EIO, "Error too long to correct") }, /* DT W O BK */ { SST(0x11, 0x03, SS_FATAL|EIO, "Multiple read errors") }, /* D W O BK */ { SST(0x11, 0x04, SS_FATAL|EIO, "Unrecovered read error - auto reallocate failed") }, /* WRO B */ { SST(0x11, 0x05, SS_FATAL|EIO, "L-EC uncorrectable error") }, /* WRO B */ { SST(0x11, 0x06, SS_FATAL|EIO, "CIRC unrecovered error") }, /* W O B */ { SST(0x11, 0x07, SS_RDEF, "Data re-synchronization error") }, /* T */ { SST(0x11, 0x08, SS_RDEF, "Incomplete block read") }, /* T */ { SST(0x11, 0x09, SS_RDEF, "No gap found") }, /* DT O BK */ { SST(0x11, 0x0A, SS_RDEF, "Miscorrected error") }, /* D W O BK */ { SST(0x11, 0x0B, SS_FATAL|EIO, "Unrecovered read error - recommend reassignment") }, /* D W O BK */ { SST(0x11, 0x0C, SS_FATAL|EIO, "Unrecovered read error - recommend rewrite the data") }, /* DT WRO B */ { SST(0x11, 0x0D, SS_RDEF, "De-compression CRC error") }, /* DT WRO B */ { SST(0x11, 0x0E, SS_RDEF, "Cannot decompress using declared algorithm") }, /* R */ { SST(0x11, 0x0F, SS_RDEF, "Error reading UPC/EAN number") }, /* R */ { SST(0x11, 0x10, SS_RDEF, "Error reading ISRC number") }, /* R */ { SST(0x11, 0x11, SS_RDEF, "Read error - loss of streaming") }, /* DT WROM B */ { SST(0x11, 0x12, SS_RDEF, /* XXX TBD */ "Auxiliary memory read error") }, /* DTLPWRO AEBKVF */ { SST(0x11, 0x13, SS_RDEF, /* XXX TBD */ "Read error - failed retransmission request") }, /* D */ { SST(0x11, 0x14, SS_RDEF, /* XXX TBD */ "Read error - LBA marked bad by application client") }, /* D */ { SST(0x11, 0x15, SS_RDEF, /* XXX TBD */ "Write after sanitize required") }, /* D W O BK */ { SST(0x12, 0x00, SS_RDEF, "Address mark not found for ID field") }, /* D W O BK */ { SST(0x13, 0x00, SS_RDEF, "Address mark not found for data field") }, /* DTL WRO BK */ { SST(0x14, 0x00, SS_RDEF, "Recorded entity not found") }, /* DT WRO BK */ { SST(0x14, 0x01, SS_RDEF, "Record not found") }, /* T */ { SST(0x14, 0x02, SS_RDEF, "Filemark or setmark not found") }, /* T */ { SST(0x14, 0x03, SS_RDEF, "End-of-data not found") }, /* T */ { SST(0x14, 0x04, SS_RDEF, "Block sequence error") }, /* DT W O BK */ { SST(0x14, 0x05, SS_RDEF, "Record not found - recommend reassignment") }, /* DT W O BK */ { SST(0x14, 0x06, SS_RDEF, "Record not found - data auto-reallocated") }, /* T */ { SST(0x14, 0x07, SS_RDEF, /* XXX TBD */ "Locate operation failure") }, /* DTL WROM BK */ { SST(0x15, 0x00, SS_RDEF, "Random positioning error") }, /* DTL WROM BK */ { SST(0x15, 0x01, SS_RDEF, "Mechanical positioning error") }, /* DT WRO BK */ { SST(0x15, 0x02, SS_RDEF, "Positioning error detected by read of medium") }, /* D W O BK */ { SST(0x16, 0x00, SS_RDEF, "Data synchronization mark error") }, /* D W O BK */ { SST(0x16, 0x01, SS_RDEF, "Data sync error - data rewritten") }, /* D W O BK */ { SST(0x16, 0x02, SS_RDEF, "Data sync error - recommend rewrite") }, /* D W O BK */ { SST(0x16, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Data sync error - data auto-reallocated") }, /* D W O BK */ { SST(0x16, 0x04, SS_RDEF, "Data sync error - recommend reassignment") }, /* DT WRO BK */ { SST(0x17, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with no error correction applied") }, /* DT WRO BK */ { SST(0x17, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries") }, /* DT WRO BK */ { SST(0x17, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with positive head offset") }, /* DT WRO BK */ { SST(0x17, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with negative head offset") }, /* WRO B */ { SST(0x17, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with retries and/or CIRC applied") }, /* D WRO BK */ { SST(0x17, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data using previous sector ID") }, /* D W O BK */ { SST(0x17, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data auto-reallocated") }, /* D WRO BK */ { SST(0x17, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend reassignment") }, /* D WRO BK */ { SST(0x17, 0x08, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - recommend rewrite") }, /* D WRO BK */ { SST(0x17, 0x09, SS_NOP | SSQ_PRINT_SENSE, "Recovered data without ECC - data rewritten") }, /* DT WRO BK */ { SST(0x18, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error correction applied") }, /* D WRO BK */ { SST(0x18, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with error corr. & retries applied") }, /* D WRO BK */ { SST(0x18, 0x02, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - data auto-reallocated") }, /* R */ { SST(0x18, 0x03, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with CIRC") }, /* R */ { SST(0x18, 0x04, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with L-EC") }, /* D WRO BK */ { SST(0x18, 0x05, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend reassignment") }, /* D WRO BK */ { SST(0x18, 0x06, SS_NOP | SSQ_PRINT_SENSE, "Recovered data - recommend rewrite") }, /* D W O BK */ { SST(0x18, 0x07, SS_NOP | SSQ_PRINT_SENSE, "Recovered data with ECC - data rewritten") }, /* R */ { SST(0x18, 0x08, SS_RDEF, /* XXX TBD */ "Recovered data with linking") }, /* D O K */ { SST(0x19, 0x00, SS_RDEF, "Defect list error") }, /* D O K */ { SST(0x19, 0x01, SS_RDEF, "Defect list not available") }, /* D O K */ { SST(0x19, 0x02, SS_RDEF, "Defect list error in primary list") }, /* D O K */ { SST(0x19, 0x03, SS_RDEF, "Defect list error in grown list") }, /* DTLPWROMAEBKVF */ { SST(0x1A, 0x00, SS_RDEF, "Parameter list length error") }, /* DTLPWROMAEBKVF */ { SST(0x1B, 0x00, SS_RDEF, "Synchronous data transfer error") }, /* D O BK */ { SST(0x1C, 0x00, SS_RDEF, "Defect list not found") }, /* D O BK */ { SST(0x1C, 0x01, SS_RDEF, "Primary defect list not found") }, /* D O BK */ { SST(0x1C, 0x02, SS_RDEF, "Grown defect list not found") }, /* DT WRO BK */ { SST(0x1D, 0x00, SS_FATAL, "Miscompare during verify operation") }, /* D B */ { SST(0x1D, 0x01, SS_RDEF, /* XXX TBD */ "Miscomparable verify of unmapped LBA") }, /* D W O BK */ { SST(0x1E, 0x00, SS_NOP | SSQ_PRINT_SENSE, "Recovered ID with ECC correction") }, /* D O K */ { SST(0x1F, 0x00, SS_RDEF, "Partial defect list transfer") }, /* DTLPWROMAEBKVF */ { SST(0x20, 0x00, SS_FATAL | EINVAL, "Invalid command operation code") }, /* DT PWROMAEBK */ { SST(0x20, 0x01, SS_RDEF, /* XXX TBD */ "Access denied - initiator pending-enrolled") }, /* DT PWROMAEBK */ { SST(0x20, 0x02, SS_RDEF, /* XXX TBD */ "Access denied - no access rights") }, /* DT PWROMAEBK */ { SST(0x20, 0x03, SS_RDEF, /* XXX TBD */ "Access denied - invalid mgmt ID key") }, /* T */ { SST(0x20, 0x04, SS_RDEF, /* XXX TBD */ "Illegal command while in write capable state") }, /* T */ { SST(0x20, 0x05, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x20, 0x06, SS_RDEF, /* XXX TBD */ "Illegal command while in explicit address mode") }, /* T */ { SST(0x20, 0x07, SS_RDEF, /* XXX TBD */ "Illegal command while in implicit address mode") }, /* DT PWROMAEBK */ { SST(0x20, 0x08, SS_RDEF, /* XXX TBD */ "Access denied - enrollment conflict") }, /* DT PWROMAEBK */ { SST(0x20, 0x09, SS_RDEF, /* XXX TBD */ "Access denied - invalid LU identifier") }, /* DT PWROMAEBK */ { SST(0x20, 0x0A, SS_RDEF, /* XXX TBD */ "Access denied - invalid proxy token") }, /* DT PWROMAEBK */ { SST(0x20, 0x0B, SS_RDEF, /* XXX TBD */ "Access denied - ACL LUN conflict") }, /* T */ { SST(0x20, 0x0C, SS_FATAL | EINVAL, "Illegal command when not in append-only mode") }, /* DT WRO BK */ { SST(0x21, 0x00, SS_FATAL | EINVAL, "Logical block address out of range") }, /* DT WROM BK */ { SST(0x21, 0x01, SS_FATAL | EINVAL, "Invalid element address") }, /* R */ { SST(0x21, 0x02, SS_RDEF, /* XXX TBD */ "Invalid address for write") }, /* R */ { SST(0x21, 0x03, SS_RDEF, /* XXX TBD */ "Invalid write crossing layer jump") }, /* D */ { SST(0x21, 0x04, SS_RDEF, /* XXX TBD */ "Unaligned write command") }, /* D */ { SST(0x21, 0x05, SS_RDEF, /* XXX TBD */ "Write boundary violation") }, /* D */ { SST(0x21, 0x06, SS_RDEF, /* XXX TBD */ "Attempt to read invalid data") }, /* D */ { SST(0x21, 0x07, SS_RDEF, /* XXX TBD */ "Read boundary violation") }, /* D */ { SST(0x22, 0x00, SS_FATAL | EINVAL, "Illegal function (use 20 00, 24 00, or 26 00)") }, /* DT P B */ { SST(0x23, 0x00, SS_FATAL | EINVAL, "Invalid token operation, cause not reportable") }, /* DT P B */ { SST(0x23, 0x01, SS_FATAL | EINVAL, "Invalid token operation, unsupported token type") }, /* DT P B */ { SST(0x23, 0x02, SS_FATAL | EINVAL, "Invalid token operation, remote token usage not supported") }, /* DT P B */ { SST(0x23, 0x03, SS_FATAL | EINVAL, "Invalid token operation, remote ROD token creation not supported") }, /* DT P B */ { SST(0x23, 0x04, SS_FATAL | EINVAL, "Invalid token operation, token unknown") }, /* DT P B */ { SST(0x23, 0x05, SS_FATAL | EINVAL, "Invalid token operation, token corrupt") }, /* DT P B */ { SST(0x23, 0x06, SS_FATAL | EINVAL, "Invalid token operation, token revoked") }, /* DT P B */ { SST(0x23, 0x07, SS_FATAL | EINVAL, "Invalid token operation, token expired") }, /* DT P B */ { SST(0x23, 0x08, SS_FATAL | EINVAL, "Invalid token operation, token cancelled") }, /* DT P B */ { SST(0x23, 0x09, SS_FATAL | EINVAL, "Invalid token operation, token deleted") }, /* DT P B */ { SST(0x23, 0x0A, SS_FATAL | EINVAL, "Invalid token operation, invalid token length") }, /* DTLPWROMAEBKVF */ { SST(0x24, 0x00, SS_FATAL | EINVAL, "Invalid field in CDB") }, /* DTLPWRO AEBKVF */ { SST(0x24, 0x01, SS_RDEF, /* XXX TBD */ "CDB decryption error") }, /* T */ { SST(0x24, 0x02, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* T */ { SST(0x24, 0x03, SS_RDEF, /* XXX TBD */ "Obsolete") }, /* F */ { SST(0x24, 0x04, SS_RDEF, /* XXX TBD */ "Security audit value frozen") }, /* F */ { SST(0x24, 0x05, SS_RDEF, /* XXX TBD */ "Security working key frozen") }, /* F */ { SST(0x24, 0x06, SS_RDEF, /* XXX TBD */ "NONCE not unique") }, /* F */ { SST(0x24, 0x07, SS_RDEF, /* XXX TBD */ "NONCE timestamp out of range") }, /* DT R MAEBKV */ { SST(0x24, 0x08, SS_RDEF, /* XXX TBD */ "Invalid XCDB") }, /* DTLPWROMAEBKVF */ { SST(0x25, 0x00, SS_FATAL | ENXIO | SSQ_LOST, "Logical unit not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x00, SS_FATAL | EINVAL, "Invalid field in parameter list") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x01, SS_FATAL | EINVAL, "Parameter not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x02, SS_FATAL | EINVAL, "Parameter value invalid") }, /* DTLPWROMAE K */ { SST(0x26, 0x03, SS_FATAL | EINVAL, "Threshold parameters not supported") }, /* DTLPWROMAEBKVF */ { SST(0x26, 0x04, SS_FATAL | EINVAL, "Invalid release of persistent reservation") }, /* DTLPWRO A BK */ { SST(0x26, 0x05, SS_RDEF, /* XXX TBD */ "Data decryption error") }, /* DTLPWRO K */ { SST(0x26, 0x06, SS_FATAL | EINVAL, "Too many target descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x07, SS_FATAL | EINVAL, "Unsupported target descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x08, SS_FATAL | EINVAL, "Too many segment descriptors") }, /* DTLPWRO K */ { SST(0x26, 0x09, SS_FATAL | EINVAL, "Unsupported segment descriptor type code") }, /* DTLPWRO K */ { SST(0x26, 0x0A, SS_FATAL | EINVAL, "Unexpected inexact segment") }, /* DTLPWRO K */ { SST(0x26, 0x0B, SS_FATAL | EINVAL, "Inline data length exceeded") }, /* DTLPWRO K */ { SST(0x26, 0x0C, SS_FATAL | EINVAL, "Invalid operation for copy source or destination") }, /* DTLPWRO K */ { SST(0x26, 0x0D, SS_FATAL | EINVAL, "Copy segment granularity violation") }, /* DT PWROMAEBK */ { SST(0x26, 0x0E, SS_RDEF, /* XXX TBD */ "Invalid parameter while port is enabled") }, /* F */ { SST(0x26, 0x0F, SS_RDEF, /* XXX TBD */ "Invalid data-out buffer integrity check value") }, /* T */ { SST(0x26, 0x10, SS_RDEF, /* XXX TBD */ "Data decryption key fail limit reached") }, /* T */ { SST(0x26, 0x11, SS_RDEF, /* XXX TBD */ "Incomplete key-associated data set") }, /* T */ { SST(0x26, 0x12, SS_RDEF, /* XXX TBD */ "Vendor specific key reference not found") }, /* D */ { SST(0x26, 0x13, SS_RDEF, /* XXX TBD */ "Application tag mode page is invalid") }, /* DT WRO BK */ { SST(0x27, 0x00, SS_FATAL | EACCES, "Write protected") }, /* DT WRO BK */ { SST(0x27, 0x01, SS_FATAL | EACCES, "Hardware write protected") }, /* DT WRO BK */ { SST(0x27, 0x02, SS_FATAL | EACCES, "Logical unit software write protected") }, /* T R */ { SST(0x27, 0x03, SS_FATAL | EACCES, "Associated write protect") }, /* T R */ { SST(0x27, 0x04, SS_FATAL | EACCES, "Persistent write protect") }, /* T R */ { SST(0x27, 0x05, SS_FATAL | EACCES, "Permanent write protect") }, /* R F */ { SST(0x27, 0x06, SS_RDEF, /* XXX TBD */ "Conditional write protect") }, /* D B */ { SST(0x27, 0x07, SS_FATAL | ENOSPC, "Space allocation failed write protect") }, /* D */ { SST(0x27, 0x08, SS_FATAL | EACCES, "Zone is read only") }, /* DTLPWROMAEBKVF */ { SST(0x28, 0x00, SS_FATAL | ENXIO, "Not ready to ready change, medium may have changed") }, /* DT WROM B */ { SST(0x28, 0x01, SS_FATAL | ENXIO, "Import or export element accessed") }, /* R */ { SST(0x28, 0x02, SS_RDEF, /* XXX TBD */ "Format-layer may have changed") }, /* M */ { SST(0x28, 0x03, SS_RDEF, /* XXX TBD */ "Import/export element accessed, medium changed") }, /* * XXX JGibbs - All of these should use the same errno, but I don't * think ENXIO is the correct choice. Should we borrow from * the networking errnos? ECONNRESET anyone? */ /* DTLPWROMAEBKVF */ { SST(0x29, 0x00, SS_FATAL | ENXIO, "Power on, reset, or bus device reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x01, SS_RDEF, "Power on occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x02, SS_RDEF, "SCSI bus reset occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x03, SS_RDEF, "Bus device reset function occurred") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x04, SS_RDEF, "Device internal reset") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x05, SS_RDEF, "Transceiver mode changed to single-ended") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x06, SS_RDEF, "Transceiver mode changed to LVD") }, /* DTLPWROMAEBKVF */ { SST(0x29, 0x07, SS_RDEF, /* XXX TBD */ "I_T nexus loss occurred") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x00, SS_RDEF, "Parameters changed") }, /* DTL WROMAEBKVF */ { SST(0x2A, 0x01, SS_RDEF, "Mode parameters changed") }, /* DTL WROMAE K */ { SST(0x2A, 0x02, SS_RDEF, "Log parameters changed") }, /* DTLPWROMAE K */ { SST(0x2A, 0x03, SS_RDEF, "Reservations preempted") }, /* DTLPWROMAE */ { SST(0x2A, 0x04, SS_RDEF, /* XXX TBD */ "Reservations released") }, /* DTLPWROMAE */ { SST(0x2A, 0x05, SS_RDEF, /* XXX TBD */ "Registrations preempted") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x06, SS_RDEF, /* XXX TBD */ "Asymmetric access state changed") }, /* DTLPWROMAEBKVF */ { SST(0x2A, 0x07, SS_RDEF, /* XXX TBD */ "Implicit asymmetric access state transition failed") }, /* DT WROMAEBKVF */ { SST(0x2A, 0x08, SS_RDEF, /* XXX TBD */ "Priority changed") }, /* D */ { SST(0x2A, 0x09, SS_RDEF, /* XXX TBD */ "Capacity data has changed") }, /* DT */ { SST(0x2A, 0x0A, SS_RDEF, /* XXX TBD */ "Error history I_T nexus cleared") }, /* DT */ { SST(0x2A, 0x0B, SS_RDEF, /* XXX TBD */ "Error history snapshot released") }, /* F */ { SST(0x2A, 0x0C, SS_RDEF, /* XXX TBD */ "Error recovery attributes have changed") }, /* T */ { SST(0x2A, 0x0D, SS_RDEF, /* XXX TBD */ "Data encryption capabilities changed") }, /* DT M E V */ { SST(0x2A, 0x10, SS_RDEF, /* XXX TBD */ "Timestamp changed") }, /* T */ { SST(0x2A, 0x11, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by another I_T nexus") }, /* T */ { SST(0x2A, 0x12, SS_RDEF, /* XXX TBD */ "Data encryption parameters changed by vendor specific event") }, /* T */ { SST(0x2A, 0x13, SS_RDEF, /* XXX TBD */ "Data encryption key instance counter has changed") }, /* DT R MAEBKV */ { SST(0x2A, 0x14, SS_RDEF, /* XXX TBD */ "SA creation capabilities data has changed") }, /* T M V */ { SST(0x2A, 0x15, SS_RDEF, /* XXX TBD */ "Medium removal prevention preempted") }, /* DTLPWRO K */ { SST(0x2B, 0x00, SS_RDEF, "Copy cannot execute since host cannot disconnect") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x00, SS_RDEF, "Command sequence error") }, /* */ { SST(0x2C, 0x01, SS_RDEF, "Too many windows specified") }, /* */ { SST(0x2C, 0x02, SS_RDEF, "Invalid combination of windows specified") }, /* R */ { SST(0x2C, 0x03, SS_RDEF, "Current program area is not empty") }, /* R */ { SST(0x2C, 0x04, SS_RDEF, "Current program area is empty") }, /* B */ { SST(0x2C, 0x05, SS_RDEF, /* XXX TBD */ "Illegal power condition request") }, /* R */ { SST(0x2C, 0x06, SS_RDEF, /* XXX TBD */ "Persistent prevent conflict") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x07, SS_RDEF, /* XXX TBD */ "Previous busy status") }, /* DTLPWROMAEBKVF */ { SST(0x2C, 0x08, SS_RDEF, /* XXX TBD */ "Previous task set full status") }, /* DTLPWROM EBKVF */ { SST(0x2C, 0x09, SS_RDEF, /* XXX TBD */ "Previous reservation conflict status") }, /* F */ { SST(0x2C, 0x0A, SS_RDEF, /* XXX TBD */ "Partition or collection contains user objects") }, /* T */ { SST(0x2C, 0x0B, SS_RDEF, /* XXX TBD */ "Not reserved") }, /* D */ { SST(0x2C, 0x0C, SS_RDEF, /* XXX TBD */ "ORWRITE generation does not match") }, /* D */ { SST(0x2C, 0x0D, SS_RDEF, /* XXX TBD */ "Reset write pointer not allowed") }, /* D */ { SST(0x2C, 0x0E, SS_RDEF, /* XXX TBD */ "Zone is offline") }, /* D */ { SST(0x2C, 0x0F, SS_RDEF, /* XXX TBD */ "Stream not open") }, /* D */ { SST(0x2C, 0x10, SS_RDEF, /* XXX TBD */ "Unwritten data in zone") }, /* T */ { SST(0x2D, 0x00, SS_RDEF, "Overwrite error on update in place") }, /* R */ { SST(0x2E, 0x00, SS_RDEF, /* XXX TBD */ "Insufficient time for operation") }, /* D */ { SST(0x2E, 0x01, SS_RDEF, /* XXX TBD */ "Command timeout before processing") }, /* D */ { SST(0x2E, 0x02, SS_RDEF, /* XXX TBD */ "Command timeout during processing") }, /* D */ { SST(0x2E, 0x03, SS_RDEF, /* XXX TBD */ "Command timeout during processing due to error recovery") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x00, SS_RDEF, "Commands cleared by another initiator") }, /* D */ { SST(0x2F, 0x01, SS_RDEF, /* XXX TBD */ "Commands cleared by power loss notification") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x02, SS_RDEF, /* XXX TBD */ "Commands cleared by device server") }, /* DTLPWROMAEBKVF */ { SST(0x2F, 0x03, SS_RDEF, /* XXX TBD */ "Some commands cleared by queuing layer event") }, /* DT WROM BK */ { SST(0x30, 0x00, SS_RDEF, "Incompatible medium installed") }, /* DT WRO BK */ { SST(0x30, 0x01, SS_RDEF, "Cannot read medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x02, SS_RDEF, "Cannot read medium - incompatible format") }, /* DT R K */ { SST(0x30, 0x03, SS_RDEF, "Cleaning cartridge installed") }, /* DT WRO BK */ { SST(0x30, 0x04, SS_RDEF, "Cannot write medium - unknown format") }, /* DT WRO BK */ { SST(0x30, 0x05, SS_RDEF, "Cannot write medium - incompatible format") }, /* DT WRO B */ { SST(0x30, 0x06, SS_RDEF, "Cannot format medium - incompatible medium") }, /* DTL WROMAEBKVF */ { SST(0x30, 0x07, SS_RDEF, "Cleaning failure") }, /* R */ { SST(0x30, 0x08, SS_RDEF, "Cannot write - application code mismatch") }, /* R */ { SST(0x30, 0x09, SS_RDEF, "Current session not fixated for append") }, /* DT WRO AEBK */ { SST(0x30, 0x0A, SS_RDEF, /* XXX TBD */ "Cleaning request rejected") }, /* T */ { SST(0x30, 0x0C, SS_RDEF, /* XXX TBD */ "WORM medium - overwrite attempted") }, /* T */ { SST(0x30, 0x0D, SS_RDEF, /* XXX TBD */ "WORM medium - integrity check") }, /* R */ { SST(0x30, 0x10, SS_RDEF, /* XXX TBD */ "Medium not formatted") }, /* M */ { SST(0x30, 0x11, SS_RDEF, /* XXX TBD */ "Incompatible volume type") }, /* M */ { SST(0x30, 0x12, SS_RDEF, /* XXX TBD */ "Incompatible volume qualifier") }, /* M */ { SST(0x30, 0x13, SS_RDEF, /* XXX TBD */ "Cleaning volume expired") }, /* DT WRO BK */ { SST(0x31, 0x00, SS_RDEF, "Medium format corrupted") }, /* D L RO B */ { SST(0x31, 0x01, SS_RDEF, "Format command failed") }, /* R */ { SST(0x31, 0x02, SS_RDEF, /* XXX TBD */ "Zoned formatting failed due to spare linking") }, /* D B */ { SST(0x31, 0x03, SS_RDEF, /* XXX TBD */ "SANITIZE command failed") }, /* D W O BK */ { SST(0x32, 0x00, SS_RDEF, "No defect spare location available") }, /* D W O BK */ { SST(0x32, 0x01, SS_RDEF, "Defect list update failure") }, /* T */ { SST(0x33, 0x00, SS_RDEF, "Tape length error") }, /* DTLPWROMAEBKVF */ { SST(0x34, 0x00, SS_RDEF, "Enclosure failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x00, SS_RDEF, "Enclosure services failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x01, SS_RDEF, "Unsupported enclosure function") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x02, SS_RDEF, "Enclosure services unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x03, SS_RDEF, "Enclosure services transfer failure") }, /* DTLPWROMAEBKVF */ { SST(0x35, 0x04, SS_RDEF, "Enclosure services transfer refused") }, /* DTL WROMAEBKVF */ { SST(0x35, 0x05, SS_RDEF, /* XXX TBD */ "Enclosure services checksum error") }, /* L */ { SST(0x36, 0x00, SS_RDEF, "Ribbon, ink, or toner failure") }, /* DTL WROMAEBKVF */ { SST(0x37, 0x00, SS_RDEF, "Rounded parameter") }, /* B */ { SST(0x38, 0x00, SS_RDEF, /* XXX TBD */ "Event status notification") }, /* B */ { SST(0x38, 0x02, SS_RDEF, /* XXX TBD */ "ESN - power management class event") }, /* B */ { SST(0x38, 0x04, SS_RDEF, /* XXX TBD */ "ESN - media class event") }, /* B */ { SST(0x38, 0x06, SS_RDEF, /* XXX TBD */ "ESN - device busy class event") }, /* D */ { SST(0x38, 0x07, SS_RDEF, /* XXX TBD */ "Thin provisioning soft threshold reached") }, /* DTL WROMAE K */ { SST(0x39, 0x00, SS_RDEF, "Saving parameters not supported") }, /* DTL WROM BK */ { SST(0x3A, 0x00, SS_FATAL | ENXIO, "Medium not present") }, /* DT WROM BK */ { SST(0x3A, 0x01, SS_FATAL | ENXIO, "Medium not present - tray closed") }, /* DT WROM BK */ { SST(0x3A, 0x02, SS_FATAL | ENXIO, "Medium not present - tray open") }, /* DT WROM B */ { SST(0x3A, 0x03, SS_RDEF, /* XXX TBD */ "Medium not present - loadable") }, /* DT WRO B */ { SST(0x3A, 0x04, SS_RDEF, /* XXX TBD */ "Medium not present - medium auxiliary memory accessible") }, /* TL */ { SST(0x3B, 0x00, SS_RDEF, "Sequential positioning error") }, /* T */ { SST(0x3B, 0x01, SS_RDEF, "Tape position error at beginning-of-medium") }, /* T */ { SST(0x3B, 0x02, SS_RDEF, "Tape position error at end-of-medium") }, /* L */ { SST(0x3B, 0x03, SS_RDEF, "Tape or electronic vertical forms unit not ready") }, /* L */ { SST(0x3B, 0x04, SS_RDEF, "Slew failure") }, /* L */ { SST(0x3B, 0x05, SS_RDEF, "Paper jam") }, /* L */ { SST(0x3B, 0x06, SS_RDEF, "Failed to sense top-of-form") }, /* L */ { SST(0x3B, 0x07, SS_RDEF, "Failed to sense bottom-of-form") }, /* T */ { SST(0x3B, 0x08, SS_RDEF, "Reposition error") }, /* */ { SST(0x3B, 0x09, SS_RDEF, "Read past end of medium") }, /* */ { SST(0x3B, 0x0A, SS_RDEF, "Read past beginning of medium") }, /* */ { SST(0x3B, 0x0B, SS_RDEF, "Position past end of medium") }, /* T */ { SST(0x3B, 0x0C, SS_RDEF, "Position past beginning of medium") }, /* DT WROM BK */ { SST(0x3B, 0x0D, SS_FATAL | ENOSPC, "Medium destination element full") }, /* DT WROM BK */ { SST(0x3B, 0x0E, SS_RDEF, "Medium source element empty") }, /* R */ { SST(0x3B, 0x0F, SS_RDEF, "End of medium reached") }, /* DT WROM BK */ { SST(0x3B, 0x11, SS_RDEF, "Medium magazine not accessible") }, /* DT WROM BK */ { SST(0x3B, 0x12, SS_RDEF, "Medium magazine removed") }, /* DT WROM BK */ { SST(0x3B, 0x13, SS_RDEF, "Medium magazine inserted") }, /* DT WROM BK */ { SST(0x3B, 0x14, SS_RDEF, "Medium magazine locked") }, /* DT WROM BK */ { SST(0x3B, 0x15, SS_RDEF, "Medium magazine unlocked") }, /* R */ { SST(0x3B, 0x16, SS_RDEF, /* XXX TBD */ "Mechanical positioning or changer error") }, /* F */ { SST(0x3B, 0x17, SS_RDEF, /* XXX TBD */ "Read past end of user object") }, /* M */ { SST(0x3B, 0x18, SS_RDEF, /* XXX TBD */ "Element disabled") }, /* M */ { SST(0x3B, 0x19, SS_RDEF, /* XXX TBD */ "Element enabled") }, /* M */ { SST(0x3B, 0x1A, SS_RDEF, /* XXX TBD */ "Data transfer device removed") }, /* M */ { SST(0x3B, 0x1B, SS_RDEF, /* XXX TBD */ "Data transfer device inserted") }, /* T */ { SST(0x3B, 0x1C, SS_RDEF, /* XXX TBD */ "Too many logical objects on partition to support operation") }, /* DTLPWROMAE K */ { SST(0x3D, 0x00, SS_RDEF, "Invalid bits in IDENTIFY message") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x00, SS_RDEF, "Logical unit has not self-configured yet") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x01, SS_RDEF, "Logical unit failure") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x02, SS_RDEF, "Timeout on logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x03, SS_RDEF, /* XXX TBD */ "Logical unit failed self-test") }, /* DTLPWROMAEBKVF */ { SST(0x3E, 0x04, SS_RDEF, /* XXX TBD */ "Logical unit unable to update self-test log") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x00, SS_RDEF, "Target operating conditions have changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x01, SS_RDEF, "Microcode has been changed") }, /* DTLPWROM BK */ { SST(0x3F, 0x02, SS_RDEF, "Changed operating definition") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x03, SS_RDEF, "INQUIRY data has changed") }, /* DT WROMAEBK */ { SST(0x3F, 0x04, SS_RDEF, "Component device attached") }, /* DT WROMAEBK */ { SST(0x3F, 0x05, SS_RDEF, "Device identifier changed") }, /* DT WROMAEB */ { SST(0x3F, 0x06, SS_RDEF, "Redundancy group created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x07, SS_RDEF, "Redundancy group deleted") }, /* DT WROMAEB */ { SST(0x3F, 0x08, SS_RDEF, "Spare created or modified") }, /* DT WROMAEB */ { SST(0x3F, 0x09, SS_RDEF, "Spare deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0A, SS_RDEF, "Volume set created or modified") }, /* DT WROMAEBK */ { SST(0x3F, 0x0B, SS_RDEF, "Volume set deleted") }, /* DT WROMAEBK */ { SST(0x3F, 0x0C, SS_RDEF, "Volume set deassigned") }, /* DT WROMAEBK */ { SST(0x3F, 0x0D, SS_RDEF, "Volume set reassigned") }, /* DTLPWROMAE */ { SST(0x3F, 0x0E, SS_RDEF | SSQ_RESCAN , "Reported LUNs data has changed") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x0F, SS_RDEF, /* XXX TBD */ "Echo buffer overwritten") }, /* DT WROM B */ { SST(0x3F, 0x10, SS_RDEF, /* XXX TBD */ "Medium loadable") }, /* DT WROM B */ { SST(0x3F, 0x11, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory accessible") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x12, SS_RDEF, /* XXX TBD */ "iSCSI IP address added") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x13, SS_RDEF, /* XXX TBD */ "iSCSI IP address removed") }, /* DTLPWR MAEBK F */ { SST(0x3F, 0x14, SS_RDEF, /* XXX TBD */ "iSCSI IP address changed") }, /* DTLPWR MAEBK */ { SST(0x3F, 0x15, SS_RDEF, /* XXX TBD */ "Inspect referrals sense descriptors") }, /* DTLPWROMAEBKVF */ { SST(0x3F, 0x16, SS_RDEF, /* XXX TBD */ "Microcode has been changed without reset") }, /* D */ { SST(0x3F, 0x17, SS_RDEF, /* XXX TBD */ "Zone transition to full") }, /* D */ { SST(0x40, 0x00, SS_RDEF, "RAM failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x40, 0x80, SS_RDEF, "Diagnostic failure: ASCQ = Component ID") }, /* DTLPWROMAEBKVF */ { SST(0x40, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x80->0xFF */ /* D */ { SST(0x41, 0x00, SS_RDEF, "Data path failure") }, /* deprecated - use 40 NN instead */ /* D */ { SST(0x42, 0x00, SS_RDEF, "Power-on or self-test failure") }, /* deprecated - use 40 NN instead */ /* DTLPWROMAEBKVF */ { SST(0x43, 0x00, SS_RDEF, "Message error") }, /* DTLPWROMAEBKVF */ { SST(0x44, 0x00, SS_RDEF, "Internal target failure") }, /* DT P MAEBKVF */ { SST(0x44, 0x01, SS_RDEF, /* XXX TBD */ "Persistent reservation information lost") }, /* DT B */ { SST(0x44, 0x71, SS_RDEF, /* XXX TBD */ "ATA device failed set features") }, /* DTLPWROMAEBKVF */ { SST(0x45, 0x00, SS_RDEF, "Select or reselect failure") }, /* DTLPWROM BK */ { SST(0x46, 0x00, SS_RDEF, "Unsuccessful soft reset") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x00, SS_RDEF, "SCSI parity error") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x01, SS_RDEF, /* XXX TBD */ "Data phase CRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x02, SS_RDEF, /* XXX TBD */ "SCSI parity error detected during ST data phase") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x03, SS_RDEF, /* XXX TBD */ "Information unit iuCRC error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x04, SS_RDEF, /* XXX TBD */ "Asynchronous information protection error detected") }, /* DTLPWROMAEBKVF */ { SST(0x47, 0x05, SS_RDEF, /* XXX TBD */ "Protocol service CRC error") }, /* DT MAEBKVF */ { SST(0x47, 0x06, SS_RDEF, /* XXX TBD */ "PHY test function in progress") }, /* DT PWROMAEBK */ { SST(0x47, 0x7F, SS_RDEF, /* XXX TBD */ "Some commands cleared by iSCSI protocol event") }, /* DTLPWROMAEBKVF */ { SST(0x48, 0x00, SS_RDEF, "Initiator detected error message received") }, /* DTLPWROMAEBKVF */ { SST(0x49, 0x00, SS_RDEF, "Invalid message error") }, /* DTLPWROMAEBKVF */ { SST(0x4A, 0x00, SS_RDEF, "Command phase error") }, /* DTLPWROMAEBKVF */ { SST(0x4B, 0x00, SS_RDEF, "Data phase error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x01, SS_RDEF, /* XXX TBD */ "Invalid target port transfer tag received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x02, SS_RDEF, /* XXX TBD */ "Too much write data") }, /* DT PWROMAEBK */ { SST(0x4B, 0x03, SS_RDEF, /* XXX TBD */ "ACK/NAK timeout") }, /* DT PWROMAEBK */ { SST(0x4B, 0x04, SS_RDEF, /* XXX TBD */ "NAK received") }, /* DT PWROMAEBK */ { SST(0x4B, 0x05, SS_RDEF, /* XXX TBD */ "Data offset error") }, /* DT PWROMAEBK */ { SST(0x4B, 0x06, SS_RDEF, /* XXX TBD */ "Initiator response timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x07, SS_RDEF, /* XXX TBD */ "Connection lost") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x08, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x09, SS_RDEF, /* XXX TBD */ "Data-in buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0A, SS_RDEF, /* XXX TBD */ "Data-in buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0B, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer size") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0C, SS_RDEF, /* XXX TBD */ "Data-out buffer overflow - data buffer descriptor area") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0D, SS_RDEF, /* XXX TBD */ "Data-out buffer error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0E, SS_RDEF, /* XXX TBD */ "PCIe fabric error") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x0F, SS_RDEF, /* XXX TBD */ "PCIe completion timeout") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x10, SS_RDEF, /* XXX TBD */ "PCIe completer abort") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x11, SS_RDEF, /* XXX TBD */ "PCIe poisoned TLP received") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x12, SS_RDEF, /* XXX TBD */ "PCIe ECRC check failed") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x13, SS_RDEF, /* XXX TBD */ "PCIe unsupported request") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x14, SS_RDEF, /* XXX TBD */ "PCIe ACS violation") }, /* DT PWROMAEBK F */ { SST(0x4B, 0x15, SS_RDEF, /* XXX TBD */ "PCIe TLP prefix blocket") }, /* DTLPWROMAEBKVF */ { SST(0x4C, 0x00, SS_RDEF, "Logical unit failed self-configuration") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0x00, SS_RDEF, "Tagged overlapped commands: ASCQ = Queue tag ID") }, /* DTLPWROMAEBKVF */ { SST(0x4D, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00->0xFF */ /* DTLPWROMAEBKVF */ { SST(0x4E, 0x00, SS_RDEF, "Overlapped commands attempted") }, /* T */ { SST(0x50, 0x00, SS_RDEF, "Write append error") }, /* T */ { SST(0x50, 0x01, SS_RDEF, "Write append position error") }, /* T */ { SST(0x50, 0x02, SS_RDEF, "Position error related to timing") }, /* T RO */ { SST(0x51, 0x00, SS_RDEF, "Erase failure") }, /* R */ { SST(0x51, 0x01, SS_RDEF, /* XXX TBD */ "Erase failure - incomplete erase operation detected") }, /* T */ { SST(0x52, 0x00, SS_RDEF, "Cartridge fault") }, /* DTL WROM BK */ { SST(0x53, 0x00, SS_RDEF, "Media load or eject failed") }, /* T */ { SST(0x53, 0x01, SS_RDEF, "Unload tape failure") }, /* DT WROM BK */ { SST(0x53, 0x02, SS_RDEF, "Medium removal prevented") }, /* M */ { SST(0x53, 0x03, SS_RDEF, /* XXX TBD */ "Medium removal prevented by data transfer element") }, /* T */ { SST(0x53, 0x04, SS_RDEF, /* XXX TBD */ "Medium thread or unthread failure") }, /* M */ { SST(0x53, 0x05, SS_RDEF, /* XXX TBD */ "Volume identifier invalid") }, /* T */ { SST(0x53, 0x06, SS_RDEF, /* XXX TBD */ "Volume identifier missing") }, /* M */ { SST(0x53, 0x07, SS_RDEF, /* XXX TBD */ "Duplicate volume identifier") }, /* M */ { SST(0x53, 0x08, SS_RDEF, /* XXX TBD */ "Element status unknown") }, /* M */ { SST(0x53, 0x09, SS_RDEF, /* XXX TBD */ "Data transfer device error - load failed") }, /* M */ { SST(0x53, 0x0A, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload failed") }, /* M */ { SST(0x53, 0x0B, SS_RDEF, /* XXX TBD */ "Data transfer device error - unload missing") }, /* M */ { SST(0x53, 0x0C, SS_RDEF, /* XXX TBD */ "Data transfer device error - eject failed") }, /* M */ { SST(0x53, 0x0D, SS_RDEF, /* XXX TBD */ "Data transfer device error - library communication failed") }, /* P */ { SST(0x54, 0x00, SS_RDEF, "SCSI to host system interface failure") }, /* P */ { SST(0x55, 0x00, SS_RDEF, "System resource failure") }, /* D O BK */ { SST(0x55, 0x01, SS_FATAL | ENOSPC, "System buffer full") }, /* DTLPWROMAE K */ { SST(0x55, 0x02, SS_RDEF, /* XXX TBD */ "Insufficient reservation resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x03, SS_RDEF, /* XXX TBD */ "Insufficient resources") }, /* DTLPWROMAE K */ { SST(0x55, 0x04, SS_RDEF, /* XXX TBD */ "Insufficient registration resources") }, /* DT PWROMAEBK */ { SST(0x55, 0x05, SS_RDEF, /* XXX TBD */ "Insufficient access control resources") }, /* DT WROM B */ { SST(0x55, 0x06, SS_RDEF, /* XXX TBD */ "Auxiliary memory out of space") }, /* F */ { SST(0x55, 0x07, SS_RDEF, /* XXX TBD */ "Quota error") }, /* T */ { SST(0x55, 0x08, SS_RDEF, /* XXX TBD */ "Maximum number of supplemental decryption keys exceeded") }, /* M */ { SST(0x55, 0x09, SS_RDEF, /* XXX TBD */ "Medium auxiliary memory not accessible") }, /* M */ { SST(0x55, 0x0A, SS_RDEF, /* XXX TBD */ "Data currently unavailable") }, /* DTLPWROMAEBKVF */ { SST(0x55, 0x0B, SS_RDEF, /* XXX TBD */ "Insufficient power for operation") }, /* DT P B */ { SST(0x55, 0x0C, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD") }, /* DT P B */ { SST(0x55, 0x0D, SS_RDEF, /* XXX TBD */ "Insufficient resources to create ROD token") }, /* D */ { SST(0x55, 0x0E, SS_RDEF, /* XXX TBD */ "Insufficient zone resources") }, /* D */ { SST(0x55, 0x0F, SS_RDEF, /* XXX TBD */ "Insufficient zone resources to complete write") }, /* D */ { SST(0x55, 0x10, SS_RDEF, /* XXX TBD */ "Maximum number of streams open") }, /* R */ { SST(0x57, 0x00, SS_RDEF, "Unable to recover table-of-contents") }, /* O */ { SST(0x58, 0x00, SS_RDEF, "Generation does not exist") }, /* O */ { SST(0x59, 0x00, SS_RDEF, "Updated block read") }, /* DTLPWRO BK */ { SST(0x5A, 0x00, SS_RDEF, "Operator request or state change input") }, /* DT WROM BK */ { SST(0x5A, 0x01, SS_RDEF, "Operator medium removal request") }, /* DT WRO A BK */ { SST(0x5A, 0x02, SS_RDEF, "Operator selected write protect") }, /* DT WRO A BK */ { SST(0x5A, 0x03, SS_RDEF, "Operator selected write permit") }, /* DTLPWROM K */ { SST(0x5B, 0x00, SS_RDEF, "Log exception") }, /* DTLPWROM K */ { SST(0x5B, 0x01, SS_RDEF, "Threshold condition met") }, /* DTLPWROM K */ { SST(0x5B, 0x02, SS_RDEF, "Log counter at maximum") }, /* DTLPWROM K */ { SST(0x5B, 0x03, SS_RDEF, "Log list codes exhausted") }, /* D O */ { SST(0x5C, 0x00, SS_RDEF, "RPL status change") }, /* D O */ { SST(0x5C, 0x01, SS_NOP | SSQ_PRINT_SENSE, "Spindles synchronized") }, /* D O */ { SST(0x5C, 0x02, SS_RDEF, "Spindles not synchronized") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0x00, SS_RDEF, "Failure prediction threshold exceeded") }, /* R B */ { SST(0x5D, 0x01, SS_RDEF, /* XXX TBD */ "Media failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x02, SS_RDEF, /* XXX TBD */ "Logical unit failure prediction threshold exceeded") }, /* R */ { SST(0x5D, 0x03, SS_RDEF, /* XXX TBD */ "Spare area exhaustion prediction threshold exceeded") }, /* D B */ { SST(0x5D, 0x10, SS_RDEF, /* XXX TBD */ "Hardware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x11, SS_RDEF, /* XXX TBD */ "Hardware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x12, SS_RDEF, /* XXX TBD */ "Hardware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x13, SS_RDEF, /* XXX TBD */ "Hardware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x14, SS_RDEF, /* XXX TBD */ "Hardware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x15, SS_RDEF, /* XXX TBD */ "Hardware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x16, SS_RDEF, /* XXX TBD */ "Hardware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x17, SS_RDEF, /* XXX TBD */ "Hardware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x18, SS_RDEF, /* XXX TBD */ "Hardware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x19, SS_RDEF, /* XXX TBD */ "Hardware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x1A, SS_RDEF, /* XXX TBD */ "Hardware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x1B, SS_RDEF, /* XXX TBD */ "Hardware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x1C, SS_RDEF, /* XXX TBD */ "Hardware impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x20, SS_RDEF, /* XXX TBD */ "Controller impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x21, SS_RDEF, /* XXX TBD */ "Controller impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x22, SS_RDEF, /* XXX TBD */ "Controller impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x23, SS_RDEF, /* XXX TBD */ "Controller impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x24, SS_RDEF, /* XXX TBD */ "Controller impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x25, SS_RDEF, /* XXX TBD */ "Controller impending failure access times too high") }, /* D B */ { SST(0x5D, 0x26, SS_RDEF, /* XXX TBD */ "Controller impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x27, SS_RDEF, /* XXX TBD */ "Controller impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x28, SS_RDEF, /* XXX TBD */ "Controller impending failure controller detected") }, /* D B */ { SST(0x5D, 0x29, SS_RDEF, /* XXX TBD */ "Controller impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x2A, SS_RDEF, /* XXX TBD */ "Controller impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x2B, SS_RDEF, /* XXX TBD */ "Controller impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x2C, SS_RDEF, /* XXX TBD */ "Controller impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x30, SS_RDEF, /* XXX TBD */ "Data channel impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x31, SS_RDEF, /* XXX TBD */ "Data channel impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x32, SS_RDEF, /* XXX TBD */ "Data channel impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x33, SS_RDEF, /* XXX TBD */ "Data channel impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x34, SS_RDEF, /* XXX TBD */ "Data channel impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x35, SS_RDEF, /* XXX TBD */ "Data channel impending failure access times too high") }, /* D B */ { SST(0x5D, 0x36, SS_RDEF, /* XXX TBD */ "Data channel impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x37, SS_RDEF, /* XXX TBD */ "Data channel impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x38, SS_RDEF, /* XXX TBD */ "Data channel impending failure controller detected") }, /* D B */ { SST(0x5D, 0x39, SS_RDEF, /* XXX TBD */ "Data channel impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x3A, SS_RDEF, /* XXX TBD */ "Data channel impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x3B, SS_RDEF, /* XXX TBD */ "Data channel impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x3C, SS_RDEF, /* XXX TBD */ "Data channel impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x40, SS_RDEF, /* XXX TBD */ "Servo impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x41, SS_RDEF, /* XXX TBD */ "Servo impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x42, SS_RDEF, /* XXX TBD */ "Servo impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x43, SS_RDEF, /* XXX TBD */ "Servo impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x44, SS_RDEF, /* XXX TBD */ "Servo impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x45, SS_RDEF, /* XXX TBD */ "Servo impending failure access times too high") }, /* D B */ { SST(0x5D, 0x46, SS_RDEF, /* XXX TBD */ "Servo impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x47, SS_RDEF, /* XXX TBD */ "Servo impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x48, SS_RDEF, /* XXX TBD */ "Servo impending failure controller detected") }, /* D B */ { SST(0x5D, 0x49, SS_RDEF, /* XXX TBD */ "Servo impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x4A, SS_RDEF, /* XXX TBD */ "Servo impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x4B, SS_RDEF, /* XXX TBD */ "Servo impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x4C, SS_RDEF, /* XXX TBD */ "Servo impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x50, SS_RDEF, /* XXX TBD */ "Spindle impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x51, SS_RDEF, /* XXX TBD */ "Spindle impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x52, SS_RDEF, /* XXX TBD */ "Spindle impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x53, SS_RDEF, /* XXX TBD */ "Spindle impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x54, SS_RDEF, /* XXX TBD */ "Spindle impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x55, SS_RDEF, /* XXX TBD */ "Spindle impending failure access times too high") }, /* D B */ { SST(0x5D, 0x56, SS_RDEF, /* XXX TBD */ "Spindle impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x57, SS_RDEF, /* XXX TBD */ "Spindle impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x58, SS_RDEF, /* XXX TBD */ "Spindle impending failure controller detected") }, /* D B */ { SST(0x5D, 0x59, SS_RDEF, /* XXX TBD */ "Spindle impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x5A, SS_RDEF, /* XXX TBD */ "Spindle impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x5B, SS_RDEF, /* XXX TBD */ "Spindle impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x5C, SS_RDEF, /* XXX TBD */ "Spindle impending failure drive calibration retry count") }, /* D B */ { SST(0x5D, 0x60, SS_RDEF, /* XXX TBD */ "Firmware impending failure general hard drive failure") }, /* D B */ { SST(0x5D, 0x61, SS_RDEF, /* XXX TBD */ "Firmware impending failure drive error rate too high") }, /* D B */ { SST(0x5D, 0x62, SS_RDEF, /* XXX TBD */ "Firmware impending failure data error rate too high") }, /* D B */ { SST(0x5D, 0x63, SS_RDEF, /* XXX TBD */ "Firmware impending failure seek error rate too high") }, /* D B */ { SST(0x5D, 0x64, SS_RDEF, /* XXX TBD */ "Firmware impending failure too many block reassigns") }, /* D B */ { SST(0x5D, 0x65, SS_RDEF, /* XXX TBD */ "Firmware impending failure access times too high") }, /* D B */ { SST(0x5D, 0x66, SS_RDEF, /* XXX TBD */ "Firmware impending failure start unit times too high") }, /* D B */ { SST(0x5D, 0x67, SS_RDEF, /* XXX TBD */ "Firmware impending failure channel parametrics") }, /* D B */ { SST(0x5D, 0x68, SS_RDEF, /* XXX TBD */ "Firmware impending failure controller detected") }, /* D B */ { SST(0x5D, 0x69, SS_RDEF, /* XXX TBD */ "Firmware impending failure throughput performance") }, /* D B */ { SST(0x5D, 0x6A, SS_RDEF, /* XXX TBD */ "Firmware impending failure seek time performance") }, /* D B */ { SST(0x5D, 0x6B, SS_RDEF, /* XXX TBD */ "Firmware impending failure spin-up retry count") }, /* D B */ { SST(0x5D, 0x6C, SS_RDEF, /* XXX TBD */ "Firmware impending failure drive calibration retry count") }, /* DTLPWROMAEBKVF */ { SST(0x5D, 0xFF, SS_RDEF, "Failure prediction threshold exceeded (false)") }, /* DTLPWRO A K */ { SST(0x5E, 0x00, SS_RDEF, "Low power condition on") }, /* DTLPWRO A K */ { SST(0x5E, 0x01, SS_RDEF, "Idle condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x02, SS_RDEF, "Standby condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x03, SS_RDEF, "Idle condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x04, SS_RDEF, "Standby condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x05, SS_RDEF, "Idle-B condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x06, SS_RDEF, "Idle-B condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x07, SS_RDEF, "Idle-C condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x08, SS_RDEF, "Idle-C condition activated by command") }, /* DTLPWRO A K */ { SST(0x5E, 0x09, SS_RDEF, "Standby-Y condition activated by timer") }, /* DTLPWRO A K */ { SST(0x5E, 0x0A, SS_RDEF, "Standby-Y condition activated by command") }, /* B */ { SST(0x5E, 0x41, SS_RDEF, /* XXX TBD */ "Power state change to active") }, /* B */ { SST(0x5E, 0x42, SS_RDEF, /* XXX TBD */ "Power state change to idle") }, /* B */ { SST(0x5E, 0x43, SS_RDEF, /* XXX TBD */ "Power state change to standby") }, /* B */ { SST(0x5E, 0x45, SS_RDEF, /* XXX TBD */ "Power state change to sleep") }, /* BK */ { SST(0x5E, 0x47, SS_RDEF, /* XXX TBD */ "Power state change to device control") }, /* */ { SST(0x60, 0x00, SS_RDEF, "Lamp failure") }, /* */ { SST(0x61, 0x00, SS_RDEF, "Video acquisition error") }, /* */ { SST(0x61, 0x01, SS_RDEF, "Unable to acquire video") }, /* */ { SST(0x61, 0x02, SS_RDEF, "Out of focus") }, /* */ { SST(0x62, 0x00, SS_RDEF, "Scan head positioning error") }, /* R */ { SST(0x63, 0x00, SS_RDEF, "End of user area encountered on this track") }, /* R */ { SST(0x63, 0x01, SS_FATAL | ENOSPC, "Packet does not fit in available space") }, /* R */ { SST(0x64, 0x00, SS_FATAL | ENXIO, "Illegal mode for this track") }, /* R */ { SST(0x64, 0x01, SS_RDEF, "Invalid packet size") }, /* DTLPWROMAEBKVF */ { SST(0x65, 0x00, SS_RDEF, "Voltage fault") }, /* */ { SST(0x66, 0x00, SS_RDEF, "Automatic document feeder cover up") }, /* */ { SST(0x66, 0x01, SS_RDEF, "Automatic document feeder lift up") }, /* */ { SST(0x66, 0x02, SS_RDEF, "Document jam in automatic document feeder") }, /* */ { SST(0x66, 0x03, SS_RDEF, "Document miss feed automatic in document feeder") }, /* A */ { SST(0x67, 0x00, SS_RDEF, "Configuration failure") }, /* A */ { SST(0x67, 0x01, SS_RDEF, "Configuration of incapable logical units failed") }, /* A */ { SST(0x67, 0x02, SS_RDEF, "Add logical unit failed") }, /* A */ { SST(0x67, 0x03, SS_RDEF, "Modification of logical unit failed") }, /* A */ { SST(0x67, 0x04, SS_RDEF, "Exchange of logical unit failed") }, /* A */ { SST(0x67, 0x05, SS_RDEF, "Remove of logical unit failed") }, /* A */ { SST(0x67, 0x06, SS_RDEF, "Attachment of logical unit failed") }, /* A */ { SST(0x67, 0x07, SS_RDEF, "Creation of logical unit failed") }, /* A */ { SST(0x67, 0x08, SS_RDEF, /* XXX TBD */ "Assign failure occurred") }, /* A */ { SST(0x67, 0x09, SS_RDEF, /* XXX TBD */ "Multiply assigned logical unit") }, /* DTLPWROMAEBKVF */ { SST(0x67, 0x0A, SS_RDEF, /* XXX TBD */ "Set target port groups command failed") }, /* DT B */ { SST(0x67, 0x0B, SS_RDEF, /* XXX TBD */ "ATA device feature not enabled") }, /* A */ { SST(0x68, 0x00, SS_RDEF, "Logical unit not configured") }, /* D */ { SST(0x68, 0x01, SS_RDEF, "Subsidiary logical unit not configured") }, /* A */ { SST(0x69, 0x00, SS_RDEF, "Data loss on logical unit") }, /* A */ { SST(0x69, 0x01, SS_RDEF, "Multiple logical unit failures") }, /* A */ { SST(0x69, 0x02, SS_RDEF, "Parity/data mismatch") }, /* A */ { SST(0x6A, 0x00, SS_RDEF, "Informational, refer to log") }, /* A */ { SST(0x6B, 0x00, SS_RDEF, "State change has occurred") }, /* A */ { SST(0x6B, 0x01, SS_RDEF, "Redundancy level got better") }, /* A */ { SST(0x6B, 0x02, SS_RDEF, "Redundancy level got worse") }, /* A */ { SST(0x6C, 0x00, SS_RDEF, "Rebuild failure occurred") }, /* A */ { SST(0x6D, 0x00, SS_RDEF, "Recalculate failure occurred") }, /* A */ { SST(0x6E, 0x00, SS_RDEF, "Command to logical unit failed") }, /* R */ { SST(0x6F, 0x00, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - authentication failure") }, /* R */ { SST(0x6F, 0x01, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not present") }, /* R */ { SST(0x6F, 0x02, SS_RDEF, /* XXX TBD */ "Copy protection key exchange failure - key not established") }, /* R */ { SST(0x6F, 0x03, SS_RDEF, /* XXX TBD */ "Read of scrambled sector without authentication") }, /* R */ { SST(0x6F, 0x04, SS_RDEF, /* XXX TBD */ "Media region code is mismatched to logical unit region") }, /* R */ { SST(0x6F, 0x05, SS_RDEF, /* XXX TBD */ "Drive region must be permanent/region reset count error") }, /* R */ { SST(0x6F, 0x06, SS_RDEF, /* XXX TBD */ "Insufficient block count for binding NONCE recording") }, /* R */ { SST(0x6F, 0x07, SS_RDEF, /* XXX TBD */ "Conflict in binding NONCE recording") }, /* T */ { SST(0x70, 0x00, SS_RDEF, "Decompression exception short: ASCQ = Algorithm ID") }, /* T */ { SST(0x70, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* T */ { SST(0x71, 0x00, SS_RDEF, "Decompression exception long: ASCQ = Algorithm ID") }, /* T */ { SST(0x71, 0xFF, SS_RDEF | SSQ_RANGE, NULL) }, /* Range 0x00 -> 0xFF */ /* R */ { SST(0x72, 0x00, SS_RDEF, "Session fixation error") }, /* R */ { SST(0x72, 0x01, SS_RDEF, "Session fixation error writing lead-in") }, /* R */ { SST(0x72, 0x02, SS_RDEF, "Session fixation error writing lead-out") }, /* R */ { SST(0x72, 0x03, SS_RDEF, "Session fixation error - incomplete track in session") }, /* R */ { SST(0x72, 0x04, SS_RDEF, "Empty or partially written reserved track") }, /* R */ { SST(0x72, 0x05, SS_RDEF, /* XXX TBD */ "No more track reservations allowed") }, /* R */ { SST(0x72, 0x06, SS_RDEF, /* XXX TBD */ "RMZ extension is not allowed") }, /* R */ { SST(0x72, 0x07, SS_RDEF, /* XXX TBD */ "No more test zone extensions are allowed") }, /* R */ { SST(0x73, 0x00, SS_RDEF, "CD control error") }, /* R */ { SST(0x73, 0x01, SS_RDEF, "Power calibration area almost full") }, /* R */ { SST(0x73, 0x02, SS_FATAL | ENOSPC, "Power calibration area is full") }, /* R */ { SST(0x73, 0x03, SS_RDEF, "Power calibration area error") }, /* R */ { SST(0x73, 0x04, SS_RDEF, "Program memory area update failure") }, /* R */ { SST(0x73, 0x05, SS_RDEF, "Program memory area is full") }, /* R */ { SST(0x73, 0x06, SS_RDEF, /* XXX TBD */ "RMA/PMA is almost full") }, /* R */ { SST(0x73, 0x10, SS_RDEF, /* XXX TBD */ "Current power calibration area almost full") }, /* R */ { SST(0x73, 0x11, SS_RDEF, /* XXX TBD */ "Current power calibration area is full") }, /* R */ { SST(0x73, 0x17, SS_RDEF, /* XXX TBD */ "RDZ is full") }, /* T */ { SST(0x74, 0x00, SS_RDEF, /* XXX TBD */ "Security error") }, /* T */ { SST(0x74, 0x01, SS_RDEF, /* XXX TBD */ "Unable to decrypt data") }, /* T */ { SST(0x74, 0x02, SS_RDEF, /* XXX TBD */ "Unencrypted data encountered while decrypting") }, /* T */ { SST(0x74, 0x03, SS_RDEF, /* XXX TBD */ "Incorrect data encryption key") }, /* T */ { SST(0x74, 0x04, SS_RDEF, /* XXX TBD */ "Cryptographic integrity validation failed") }, /* T */ { SST(0x74, 0x05, SS_RDEF, /* XXX TBD */ "Error decrypting data") }, /* T */ { SST(0x74, 0x06, SS_RDEF, /* XXX TBD */ "Unknown signature verification key") }, /* T */ { SST(0x74, 0x07, SS_RDEF, /* XXX TBD */ "Encryption parameters not useable") }, /* DT R M E VF */ { SST(0x74, 0x08, SS_RDEF, /* XXX TBD */ "Digital signature validation failure") }, /* T */ { SST(0x74, 0x09, SS_RDEF, /* XXX TBD */ "Encryption mode mismatch on read") }, /* T */ { SST(0x74, 0x0A, SS_RDEF, /* XXX TBD */ "Encrypted block not raw read enabled") }, /* T */ { SST(0x74, 0x0B, SS_RDEF, /* XXX TBD */ "Incorrect encryption parameters") }, /* DT R MAEBKV */ { SST(0x74, 0x0C, SS_RDEF, /* XXX TBD */ "Unable to decrypt parameter list") }, /* T */ { SST(0x74, 0x0D, SS_RDEF, /* XXX TBD */ "Encryption algorithm disabled") }, /* DT R MAEBKV */ { SST(0x74, 0x10, SS_RDEF, /* XXX TBD */ "SA creation parameter value invalid") }, /* DT R MAEBKV */ { SST(0x74, 0x11, SS_RDEF, /* XXX TBD */ "SA creation parameter value rejected") }, /* DT R MAEBKV */ { SST(0x74, 0x12, SS_RDEF, /* XXX TBD */ "Invalid SA usage") }, /* T */ { SST(0x74, 0x21, SS_RDEF, /* XXX TBD */ "Data encryption configuration prevented") }, /* DT R MAEBKV */ { SST(0x74, 0x30, SS_RDEF, /* XXX TBD */ "SA creation parameter not supported") }, /* DT R MAEBKV */ { SST(0x74, 0x40, SS_RDEF, /* XXX TBD */ "Authentication failed") }, /* V */ { SST(0x74, 0x61, SS_RDEF, /* XXX TBD */ "External data encryption key manager access error") }, /* V */ { SST(0x74, 0x62, SS_RDEF, /* XXX TBD */ "External data encryption key manager error") }, /* V */ { SST(0x74, 0x63, SS_RDEF, /* XXX TBD */ "External data encryption key not found") }, /* V */ { SST(0x74, 0x64, SS_RDEF, /* XXX TBD */ "External data encryption request not authorized") }, /* T */ { SST(0x74, 0x6E, SS_RDEF, /* XXX TBD */ "External data encryption control timeout") }, /* T */ { SST(0x74, 0x6F, SS_RDEF, /* XXX TBD */ "External data encryption control error") }, /* DT R M E V */ { SST(0x74, 0x71, SS_RDEF, /* XXX TBD */ "Logical unit access not authorized") }, /* D */ { SST(0x74, 0x79, SS_RDEF, /* XXX TBD */ "Security conflict in translated device") } }; const u_int asc_table_size = nitems(asc_table); struct asc_key { int asc; int ascq; }; static int ascentrycomp(const void *key, const void *member) { int asc; int ascq; const struct asc_table_entry *table_entry; asc = ((const struct asc_key *)key)->asc; ascq = ((const struct asc_key *)key)->ascq; table_entry = (const struct asc_table_entry *)member; if (asc >= table_entry->asc) { if (asc > table_entry->asc) return (1); if (ascq <= table_entry->ascq) { /* Check for ranges */ if (ascq == table_entry->ascq || ((table_entry->action & SSQ_RANGE) != 0 && ascq >= (table_entry - 1)->ascq)) return (0); return (-1); } return (1); } return (-1); } static int senseentrycomp(const void *key, const void *member) { int sense_key; const struct sense_key_table_entry *table_entry; sense_key = *((const int *)key); table_entry = (const struct sense_key_table_entry *)member; if (sense_key >= table_entry->sense_key) { if (sense_key == table_entry->sense_key) return (0); return (1); } return (-1); } static void fetchtableentries(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const struct sense_key_table_entry **sense_entry, const struct asc_table_entry **asc_entry) { caddr_t match; const struct asc_table_entry *asc_tables[2]; const struct sense_key_table_entry *sense_tables[2]; struct asc_key asc_ascq; size_t asc_tables_size[2]; size_t sense_tables_size[2]; int num_asc_tables; int num_sense_tables; int i; /* Default to failure */ *sense_entry = NULL; *asc_entry = NULL; match = NULL; if (inq_data != NULL) match = cam_quirkmatch((caddr_t)inq_data, (caddr_t)sense_quirk_table, sense_quirk_table_size, sizeof(*sense_quirk_table), scsi_inquiry_match); if (match != NULL) { struct scsi_sense_quirk_entry *quirk; quirk = (struct scsi_sense_quirk_entry *)match; asc_tables[0] = quirk->asc_info; asc_tables_size[0] = quirk->num_ascs; asc_tables[1] = asc_table; asc_tables_size[1] = asc_table_size; num_asc_tables = 2; sense_tables[0] = quirk->sense_key_info; sense_tables_size[0] = quirk->num_sense_keys; sense_tables[1] = sense_key_table; sense_tables_size[1] = nitems(sense_key_table); num_sense_tables = 2; } else { asc_tables[0] = asc_table; asc_tables_size[0] = asc_table_size; num_asc_tables = 1; sense_tables[0] = sense_key_table; sense_tables_size[0] = nitems(sense_key_table); num_sense_tables = 1; } asc_ascq.asc = asc; asc_ascq.ascq = ascq; for (i = 0; i < num_asc_tables; i++) { void *found_entry; found_entry = bsearch(&asc_ascq, asc_tables[i], asc_tables_size[i], sizeof(**asc_tables), ascentrycomp); if (found_entry) { *asc_entry = (struct asc_table_entry *)found_entry; break; } } for (i = 0; i < num_sense_tables; i++) { void *found_entry; found_entry = bsearch(&sense_key, sense_tables[i], sense_tables_size[i], sizeof(**sense_tables), senseentrycomp); if (found_entry) { *sense_entry = (struct sense_key_table_entry *)found_entry; break; } } } void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); if (sense_entry != NULL) *sense_key_desc = sense_entry->desc; else *sense_key_desc = "Invalid Sense Key"; if (asc_entry != NULL) *asc_desc = asc_entry->desc; else if (asc >= 0x80 && asc <= 0xff) *asc_desc = "Vendor Specific ASC"; else if (ascq >= 0x80 && ascq <= 0xff) *asc_desc = "Vendor Specific ASCQ"; else *asc_desc = "Reserved ASC/ASCQ pair"; } /* * Given sense and device type information, return the appropriate action. * If we do not understand the specific error as identified by the ASC/ASCQ * pair, fall back on the more generic actions derived from the sense key. */ scsi_sense_action scsi_error_action(struct ccb_scsiio *csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags) { const struct asc_table_entry *asc_entry; const struct sense_key_table_entry *sense_entry; int error_code, sense_key, asc, ascq; scsi_sense_action action; if (!scsi_extract_sense_ccb((union ccb *)csio, &error_code, &sense_key, &asc, &ascq)) { action = SS_RETRY | SSQ_DECREMENT_COUNT | SSQ_PRINT_SENSE | EIO; } else if ((error_code == SSD_DEFERRED_ERROR) || (error_code == SSD_DESC_DEFERRED_ERROR)) { /* * XXX dufault@FreeBSD.org * This error doesn't relate to the command associated * with this request sense. A deferred error is an error * for a command that has already returned GOOD status * (see SCSI2 8.2.14.2). * * By my reading of that section, it looks like the current * command has been cancelled, we should now clean things up * (hopefully recovering any lost data) and then retry the * current command. There are two easy choices, both wrong: * * 1. Drop through (like we had been doing), thus treating * this as if the error were for the current command and * return and stop the current command. * * 2. Issue a retry (like I made it do) thus hopefully * recovering the current transfer, and ignoring the * fact that we've dropped a command. * * These should probably be handled in a device specific * sense handler or punted back up to a user mode daemon */ action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; } else { fetchtableentries(sense_key, asc, ascq, inq_data, &sense_entry, &asc_entry); /* * Override the 'No additional Sense' entry (0,0) * with the error action of the sense key. */ if (asc_entry != NULL && (asc != 0 || ascq != 0)) action = asc_entry->action; else if (sense_entry != NULL) action = sense_entry->action; else action = SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE; if (sense_key == SSD_KEY_RECOVERED_ERROR) { /* * The action succeeded but the device wants * the user to know that some recovery action * was required. */ action &= ~(SS_MASK|SSQ_MASK|SS_ERRMASK); action |= SS_NOP|SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_ILLEGAL_REQUEST) { if ((sense_flags & SF_QUIET_IR) != 0) action &= ~SSQ_PRINT_SENSE; } else if (sense_key == SSD_KEY_UNIT_ATTENTION) { if ((sense_flags & SF_RETRY_UA) != 0 && (action & SS_MASK) == SS_FAIL) { action &= ~(SS_MASK|SSQ_MASK); action |= SS_RETRY|SSQ_DECREMENT_COUNT| SSQ_PRINT_SENSE; } action |= SSQ_UA; } } if ((action & SS_MASK) >= SS_START && (sense_flags & SF_NO_RECOVERY)) { action &= ~SS_MASK; action |= SS_FAIL; } else if ((action & SS_MASK) == SS_RETRY && (sense_flags & SF_NO_RETRY)) { action &= ~SS_MASK; action |= SS_FAIL; } if ((sense_flags & SF_PRINT_ALWAYS) != 0) action |= SSQ_PRINT_SENSE; else if ((sense_flags & SF_NO_PRINT) != 0) action &= ~SSQ_PRINT_SENSE; return (action); } char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len) { struct sbuf sb; int error; if (len == 0) return (""); sbuf_new(&sb, cdb_string, len, SBUF_FIXEDLEN); scsi_cdb_sbuf(cdb_ptr, &sb); /* ENOMEM just means that the fixed buffer is full, OK to ignore */ error = sbuf_finish(&sb); if (error != 0 && error != ENOMEM) return (""); return(sbuf_data(&sb)); } void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb) { u_int8_t cdb_len; int i; if (cdb_ptr == NULL) return; /* * This is taken from the SCSI-3 draft spec. * (T10/1157D revision 0.3) * The top 3 bits of an opcode are the group code. The next 5 bits * are the command code. * Group 0: six byte commands * Group 1: ten byte commands * Group 2: ten byte commands * Group 3: reserved * Group 4: sixteen byte commands * Group 5: twelve byte commands * Group 6: vendor specific * Group 7: vendor specific */ switch((*cdb_ptr >> 5) & 0x7) { case 0: cdb_len = 6; break; case 1: case 2: cdb_len = 10; break; case 3: case 6: case 7: /* in this case, just print out the opcode */ cdb_len = 1; break; case 4: cdb_len = 16; break; case 5: cdb_len = 12; break; } for (i = 0; i < cdb_len; i++) sbuf_printf(sb, "%02hhx ", cdb_ptr[i]); return; } const char * scsi_status_string(struct ccb_scsiio *csio) { switch(csio->scsi_status) { case SCSI_STATUS_OK: return("OK"); case SCSI_STATUS_CHECK_COND: return("Check Condition"); case SCSI_STATUS_BUSY: return("Busy"); case SCSI_STATUS_INTERMED: return("Intermediate"); case SCSI_STATUS_INTERMED_COND_MET: return("Intermediate-Condition Met"); case SCSI_STATUS_RESERV_CONFLICT: return("Reservation Conflict"); case SCSI_STATUS_CMD_TERMINATED: return("Command Terminated"); case SCSI_STATUS_QUEUE_FULL: return("Queue Full"); case SCSI_STATUS_ACA_ACTIVE: return("ACA Active"); case SCSI_STATUS_TASK_ABORTED: return("Task Aborted"); default: { static char unkstr[64]; snprintf(unkstr, sizeof(unkstr), "Unknown %#x", csio->scsi_status); return(unkstr); } } } /* * scsi_command_string() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb) #else /* !_KERNEL */ int scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb) #endif /* _KERNEL/!_KERNEL */ { struct scsi_inquiry_data *inq_data; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) != 0) { sbuf_printf(sb, "%s. CDB: ", scsi_op_desc(csio->cdb_io.cdb_ptr[0], inq_data)); scsi_cdb_sbuf(csio->cdb_io.cdb_ptr, sb); } else { sbuf_printf(sb, "%s. CDB: ", scsi_op_desc(csio->cdb_io.cdb_bytes[0], inq_data)); scsi_cdb_sbuf(csio->cdb_io.cdb_bytes, sb); } #ifdef _KERNEL xpt_free_ccb((union ccb *)cgd); #endif return(0); } /* * Iterate over sense descriptors. Each descriptor is passed into iter_func(). * If iter_func() returns 0, list traversal continues. If iter_func() * returns non-zero, list traversal is stopped. */ void scsi_desc_iterate(struct scsi_sense_data_desc *sense, u_int sense_len, int (*iter_func)(struct scsi_sense_data_desc *sense, u_int, struct scsi_sense_desc_header *, void *), void *arg) { int cur_pos; int desc_len; /* * First make sure the extra length field is present. */ if (SSD_DESC_IS_PRESENT(sense, sense_len, extra_len) == 0) return; /* * The length of data actually returned may be different than the * extra_len recorded in the structure. */ desc_len = sense_len -offsetof(struct scsi_sense_data_desc, sense_desc); /* * Limit this further by the extra length reported, and the maximum * allowed extra length. */ desc_len = MIN(desc_len, MIN(sense->extra_len, SSD_EXTRA_MAX)); /* * Subtract the size of the header from the descriptor length. * This is to ensure that we have at least the header left, so we * don't have to check that inside the loop. This can wind up * being a negative value. */ desc_len -= sizeof(struct scsi_sense_desc_header); for (cur_pos = 0; cur_pos < desc_len;) { struct scsi_sense_desc_header *header; header = (struct scsi_sense_desc_header *) &sense->sense_desc[cur_pos]; /* * Check to make sure we have the entire descriptor. We * don't call iter_func() unless we do. * * Note that although cur_pos is at the beginning of the * descriptor, desc_len already has the header length * subtracted. So the comparison of the length in the * header (which does not include the header itself) to * desc_len - cur_pos is correct. */ if (header->length > (desc_len - cur_pos)) break; if (iter_func(sense, sense_len, header, arg) != 0) break; cur_pos += sizeof(*header) + header->length; } } struct scsi_find_desc_info { uint8_t desc_type; struct scsi_sense_desc_header *header; }; static int scsi_find_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_find_desc_info *desc_info; desc_info = (struct scsi_find_desc_info *)arg; if (header->desc_type == desc_info->desc_type) { desc_info->header = header; /* We found the descriptor, tell the iterator to stop. */ return (1); } else return (0); } /* * Given a descriptor type, return a pointer to it if it is in the sense * data and not truncated. Avoiding truncating sense data will simplify * things significantly for the caller. */ uint8_t * scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len, uint8_t desc_type) { struct scsi_find_desc_info desc_info; desc_info.desc_type = desc_type; desc_info.header = NULL; scsi_desc_iterate(sense, sense_len, scsi_find_desc_func, &desc_info); return ((uint8_t *)desc_info.header); } /* * Fill in SCSI sense data with the specified parameters. This routine can * fill in either fixed or descriptor type sense data. */ void scsi_set_sense_data_va(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap) { int descriptor_sense; scsi_sense_elem_type elem_type; /* * Determine whether to return fixed or descriptor format sense * data. If the user specifies SSD_TYPE_NONE for some reason, * they'll just get fixed sense data. */ if (sense_format == SSD_TYPE_DESC) descriptor_sense = 1; else descriptor_sense = 0; /* * Zero the sense data, so that we don't pass back any garbage data * to the user. */ memset(sense_data, 0, sizeof(*sense_data)); if (descriptor_sense != 0) { struct scsi_sense_data_desc *sense; sense = (struct scsi_sense_data_desc *)sense_data; /* * The descriptor sense format eliminates the use of the * valid bit. */ if (current_error != 0) sense->error_code = SSD_DESC_CURRENT_ERROR; else sense->error_code = SSD_DESC_DEFERRED_ERROR; sense->sense_key = sense_key; sense->add_sense_code = asc; sense->add_sense_code_qual = ascq; /* * Start off with no extra length, since the above data * fits in the standard descriptor sense information. */ sense->extra_len = 0; while ((elem_type = (scsi_sense_elem_type)va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { int sense_len, len_to_copy; uint8_t *data; if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } sense_len = (int)va_arg(ap, int); len_to_copy = MIN(sense_len, SSD_EXTRA_MAX - sense->extra_len); data = (uint8_t *)va_arg(ap, uint8_t *); /* * We've already consumed the arguments for this one. */ if (elem_type == SSD_ELEM_SKIP) continue; switch (elem_type) { case SSD_ELEM_DESC: { /* * This is a straight descriptor. All we * need to do is copy the data in. */ bcopy(data, &sense->sense_desc[ sense->extra_len], len_to_copy); sense->extra_len += len_to_copy; break; } case SSD_ELEM_SKS: { struct scsi_sense_sks sks; bzero(&sks, sizeof(sks)); /* * This is already-formatted sense key * specific data. We just need to fill out * the header and copy everything in. */ bcopy(data, &sks.sense_key_spec, MIN(len_to_copy, sizeof(sks.sense_key_spec))); sks.desc_type = SSD_DESC_SKS; sks.length = sizeof(sks) - offsetof(struct scsi_sense_sks, reserved1); bcopy(&sks,&sense->sense_desc[sense->extra_len], sizeof(sks)); sense->extra_len += sizeof(sks); break; } case SSD_ELEM_INFO: case SSD_ELEM_COMMAND: { struct scsi_sense_command cmd; struct scsi_sense_info info; uint8_t *data_dest; uint8_t *descriptor; int descriptor_size, i, copy_len; bzero(&cmd, sizeof(cmd)); bzero(&info, sizeof(info)); /* * Command or information data. The * operate in pretty much the same way. */ if (elem_type == SSD_ELEM_COMMAND) { len_to_copy = MIN(len_to_copy, sizeof(cmd.command_info)); descriptor = (uint8_t *)&cmd; descriptor_size = sizeof(cmd); data_dest =(uint8_t *)&cmd.command_info; cmd.desc_type = SSD_DESC_COMMAND; cmd.length = sizeof(cmd) - offsetof(struct scsi_sense_command, reserved); } else { len_to_copy = MIN(len_to_copy, sizeof(info.info)); descriptor = (uint8_t *)&info; descriptor_size = sizeof(cmd); data_dest = (uint8_t *)&info.info; info.desc_type = SSD_DESC_INFO; info.byte2 = SSD_INFO_VALID; info.length = sizeof(info) - offsetof(struct scsi_sense_info, byte2); } /* * Copy this in reverse because the spec * (SPC-4) says that when 4 byte quantities * are stored in this 8 byte field, the * first four bytes shall be 0. * * So we fill the bytes in from the end, and * if we have less than 8 bytes to copy, * the initial, most significant bytes will * be 0. */ for (i = sense_len - 1; i >= 0 && len_to_copy > 0; i--, len_to_copy--) data_dest[len_to_copy - 1] = data[i]; /* * This calculation looks much like the * initial len_to_copy calculation, but * we have to do it again here, because * we're looking at a larger amount that * may or may not fit. It's not only the * data the user passed in, but also the * rest of the descriptor. */ copy_len = MIN(descriptor_size, SSD_EXTRA_MAX - sense->extra_len); bcopy(descriptor, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } case SSD_ELEM_FRU: { struct scsi_sense_fru fru; int copy_len; bzero(&fru, sizeof(fru)); fru.desc_type = SSD_DESC_FRU; fru.length = sizeof(fru) - offsetof(struct scsi_sense_fru, reserved); fru.fru = *data; copy_len = MIN(sizeof(fru), SSD_EXTRA_MAX - sense->extra_len); bcopy(&fru, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } case SSD_ELEM_STREAM: { struct scsi_sense_stream stream_sense; int copy_len; bzero(&stream_sense, sizeof(stream_sense)); stream_sense.desc_type = SSD_DESC_STREAM; stream_sense.length = sizeof(stream_sense) - offsetof(struct scsi_sense_stream, reserved); stream_sense.byte3 = *data; copy_len = MIN(sizeof(stream_sense), SSD_EXTRA_MAX - sense->extra_len); bcopy(&stream_sense, &sense->sense_desc[ sense->extra_len], copy_len); sense->extra_len += copy_len; break; } default: /* * We shouldn't get here, but if we do, do * nothing. We've already consumed the * arguments above. */ break; } } } else { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (current_error != 0) sense->error_code = SSD_CURRENT_ERROR; else sense->error_code = SSD_DEFERRED_ERROR; sense->flags = sense_key; sense->add_sense_code = asc; sense->add_sense_code_qual = ascq; /* * We've set the ASC and ASCQ, so we have 6 more bytes of * valid data. If we wind up setting any of the other * fields, we'll bump this to 10 extra bytes. */ sense->extra_len = 6; while ((elem_type = (scsi_sense_elem_type)va_arg(ap, scsi_sense_elem_type)) != SSD_ELEM_NONE) { int sense_len, len_to_copy; uint8_t *data; if (elem_type >= SSD_ELEM_MAX) { printf("%s: invalid sense type %d\n", __func__, elem_type); break; } /* * If we get in here, just bump the extra length to * 10 bytes. That will encompass anything we're * going to set here. */ sense->extra_len = 10; sense_len = (int)va_arg(ap, int); data = (uint8_t *)va_arg(ap, uint8_t *); switch (elem_type) { case SSD_ELEM_SKS: /* * The user passed in pre-formatted sense * key specific data. */ bcopy(data, &sense->sense_key_spec[0], MIN(sizeof(sense->sense_key_spec), sense_len)); break; case SSD_ELEM_INFO: case SSD_ELEM_COMMAND: { uint8_t *data_dest; int i; if (elem_type == SSD_ELEM_COMMAND) { data_dest = &sense->cmd_spec_info[0]; len_to_copy = MIN(sense_len, sizeof(sense->cmd_spec_info)); } else { data_dest = &sense->info[0]; len_to_copy = MIN(sense_len, sizeof(sense->info)); /* * We're setting the info field, so * set the valid bit. */ sense->error_code |= SSD_ERRCODE_VALID; } /* * Copy this in reverse so that if we have * less than 4 bytes to fill, the least * significant bytes will be at the end. * If we have more than 4 bytes, only the * least significant bytes will be included. */ for (i = sense_len - 1; i >= 0 && len_to_copy > 0; i--, len_to_copy--) data_dest[len_to_copy - 1] = data[i]; break; } case SSD_ELEM_FRU: sense->fru = *data; break; case SSD_ELEM_STREAM: sense->flags |= *data; break; case SSD_ELEM_DESC: default: /* * If the user passes in descriptor sense, * we can't handle that in fixed format. * So just skip it, and any unknown argument * types. */ break; } } } } void scsi_set_sense_data(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) { va_list ap; va_start(ap, ascq); scsi_set_sense_data_va(sense_data, sense_format, current_error, sense_key, asc, ascq, ap); va_end(ap); } /* * Get sense information for three similar sense data types. */ int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t info_type, uint64_t *info, int64_t *signed_info) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; uint8_t *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = scsi_find_desc(sense, sense_len, info_type); if (desc == NULL) goto bailout; switch (info_type) { case SSD_DESC_INFO: { struct scsi_sense_info *info_desc; info_desc = (struct scsi_sense_info *)desc; *info = scsi_8btou64(info_desc->info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_COMMAND: { struct scsi_sense_command *cmd_desc; cmd_desc = (struct scsi_sense_command *)desc; *info = scsi_8btou64(cmd_desc->command_info); if (signed_info != NULL) *signed_info = *info; break; } case SSD_DESC_FRU: { struct scsi_sense_fru *fru_desc; fru_desc = (struct scsi_sense_fru *)desc; *info = fru_desc->fru; if (signed_info != NULL) *signed_info = (int8_t)fru_desc->fru; break; } default: goto bailout; break; } break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; switch (info_type) { case SSD_DESC_INFO: { uint32_t info_val; if ((sense->error_code & SSD_ERRCODE_VALID) == 0) goto bailout; if (SSD_FIXED_IS_PRESENT(sense, sense_len, info) == 0) goto bailout; info_val = scsi_4btoul(sense->info); *info = info_val; if (signed_info != NULL) *signed_info = (int32_t)info_val; break; } case SSD_DESC_COMMAND: { uint32_t cmd_val; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, cmd_spec_info) == 0) || (SSD_FIXED_IS_FILLED(sense, cmd_spec_info) == 0)) goto bailout; cmd_val = scsi_4btoul(sense->cmd_spec_info); if (cmd_val == 0) goto bailout; *info = cmd_val; if (signed_info != NULL) *signed_info = (int32_t)cmd_val; break; } case SSD_DESC_FRU: if ((SSD_FIXED_IS_PRESENT(sense, sense_len, fru) == 0) || (SSD_FIXED_IS_FILLED(sense, fru) == 0)) goto bailout; if (sense->fru == 0) goto bailout; *info = sense->fru; if (signed_info != NULL) *signed_info = (int8_t)sense->fru; break; default: goto bailout; break; } break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_sks(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t *sks) { scsi_sense_data_type sense_type; if (sense_len == 0) goto bailout; sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_sks *desc; sense = (struct scsi_sense_data_desc *)sense_data; desc = (struct scsi_sense_sks *)scsi_find_desc(sense, sense_len, SSD_DESC_SKS); if (desc == NULL) goto bailout; /* * No need to check the SKS valid bit for descriptor sense. * If the descriptor is present, it is valid. */ bcopy(desc->sense_key_spec, sks, sizeof(desc->sense_key_spec)); break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, sense_key_spec)== 0) || (SSD_FIXED_IS_FILLED(sense, sense_key_spec) == 0)) goto bailout; if ((sense->sense_key_spec[0] & SSD_SCS_VALID) == 0) goto bailout; bcopy(sense->sense_key_spec, sks,sizeof(sense->sense_key_spec)); break; } default: goto bailout; break; } return (0); bailout: return (1); } /* * Provide a common interface for fixed and descriptor sense to detect * whether we have block-specific sense information. It is clear by the * presence of the block descriptor in descriptor mode, but we have to * infer from the inquiry data and ILI bit in fixed mode. */ int scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *block_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_DIRECT: case T_RBC: + case T_ZBC_HM: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_block *block; sense = (struct scsi_sense_data_desc *)sense_data; block = (struct scsi_sense_block *)scsi_find_desc(sense, sense_len, SSD_DESC_BLOCK); if (block == NULL) goto bailout; *block_bits = block->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & SSD_ILI) == 0) goto bailout; *block_bits = sense->flags & SSD_ILI; break; } default: goto bailout; break; } return (0); bailout: return (1); } int scsi_get_stream_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *stream_bits) { scsi_sense_data_type sense_type; if (inq_data != NULL) { switch (SID_TYPE(inq_data)) { case T_SEQUENTIAL: break; default: goto bailout; break; } } sense_type = scsi_sense_type(sense_data); switch (sense_type) { case SSD_TYPE_DESC: { struct scsi_sense_data_desc *sense; struct scsi_sense_stream *stream; sense = (struct scsi_sense_data_desc *)sense_data; stream = (struct scsi_sense_stream *)scsi_find_desc(sense, sense_len, SSD_DESC_STREAM); if (stream == NULL) goto bailout; *stream_bits = stream->byte3; break; } case SSD_TYPE_FIXED: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags) == 0) goto bailout; if ((sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK)) == 0) goto bailout; *stream_bits = sense->flags & (SSD_ILI|SSD_EOM|SSD_FILEMARK); break; } default: goto bailout; break; } return (0); bailout: return (1); } void scsi_info_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t info) { sbuf_printf(sb, "Info: %#jx", info); } void scsi_command_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t csi) { sbuf_printf(sb, "Command Specific Info: %#jx", csi); } void scsi_progress_sbuf(struct sbuf *sb, uint16_t progress) { sbuf_printf(sb, "Progress: %d%% (%d/%d) complete", (progress * 100) / SSD_SKS_PROGRESS_DENOM, progress, SSD_SKS_PROGRESS_DENOM); } /* * Returns 1 for failure (i.e. SKS isn't valid) and 0 for success. */ int scsi_sks_sbuf(struct sbuf *sb, int sense_key, uint8_t *sks) { if ((sks[0] & SSD_SKS_VALID) == 0) return (1); switch (sense_key) { case SSD_KEY_ILLEGAL_REQUEST: { struct scsi_sense_sks_field *field; int bad_command; char tmpstr[40]; /*Field Pointer*/ field = (struct scsi_sense_sks_field *)sks; if (field->byte0 & SSD_SKS_FIELD_CMD) bad_command = 1; else bad_command = 0; tmpstr[0] = '\0'; /* Bit pointer is valid */ if (field->byte0 & SSD_SKS_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", field->byte0 & SSD_SKS_BIT_VALUE); sbuf_printf(sb, "%s byte %d %sis invalid", bad_command ? "Command" : "Data", scsi_2btoul(field->field), tmpstr); break; } case SSD_KEY_UNIT_ATTENTION: { struct scsi_sense_sks_overflow *overflow; overflow = (struct scsi_sense_sks_overflow *)sks; /*UA Condition Queue Overflow*/ sbuf_printf(sb, "Unit Attention Condition Queue %s", (overflow->byte0 & SSD_SKS_OVERFLOW_SET) ? "Overflowed" : "Did Not Overflow??"); break; } case SSD_KEY_RECOVERED_ERROR: case SSD_KEY_HARDWARE_ERROR: case SSD_KEY_MEDIUM_ERROR: { struct scsi_sense_sks_retry *retry; /*Actual Retry Count*/ retry = (struct scsi_sense_sks_retry *)sks; sbuf_printf(sb, "Actual Retry Count: %d", scsi_2btoul(retry->actual_retry_count)); break; } case SSD_KEY_NO_SENSE: case SSD_KEY_NOT_READY: { struct scsi_sense_sks_progress *progress; int progress_val; /*Progress Indication*/ progress = (struct scsi_sense_sks_progress *)sks; progress_val = scsi_2btoul(progress->progress); scsi_progress_sbuf(sb, progress_val); break; } case SSD_KEY_COPY_ABORTED: { struct scsi_sense_sks_segment *segment; char tmpstr[40]; /*Segment Pointer*/ segment = (struct scsi_sense_sks_segment *)sks; tmpstr[0] = '\0'; if (segment->byte0 & SSD_SKS_SEGMENT_BPV) snprintf(tmpstr, sizeof(tmpstr), "bit %d ", segment->byte0 & SSD_SKS_SEGMENT_BITPTR); sbuf_printf(sb, "%s byte %d %sis invalid", (segment->byte0 & SSD_SKS_SEGMENT_SD) ? "Segment" : "Data", scsi_2btoul(segment->field), tmpstr); break; } default: sbuf_printf(sb, "Sense Key Specific: %#x,%#x", sks[0], scsi_2btoul(&sks[1])); break; } return (0); } void scsi_fru_sbuf(struct sbuf *sb, uint64_t fru) { sbuf_printf(sb, "Field Replaceable Unit: %d", (int)fru); } void scsi_stream_sbuf(struct sbuf *sb, uint8_t stream_bits, uint64_t info) { int need_comma; need_comma = 0; /* * XXX KDM this needs more descriptive decoding. */ if (stream_bits & SSD_DESC_STREAM_FM) { sbuf_printf(sb, "Filemark"); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_EOM) { sbuf_printf(sb, "%sEOM", (need_comma) ? "," : ""); need_comma = 1; } if (stream_bits & SSD_DESC_STREAM_ILI) sbuf_printf(sb, "%sILI", (need_comma) ? "," : ""); sbuf_printf(sb, ": Info: %#jx", (uintmax_t) info); } void scsi_block_sbuf(struct sbuf *sb, uint8_t block_bits, uint64_t info) { if (block_bits & SSD_DESC_BLOCK_ILI) sbuf_printf(sb, "ILI: residue %#jx", (uintmax_t) info); } void scsi_sense_info_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_info *info; info = (struct scsi_sense_info *)header; scsi_info_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(info->info)); } void scsi_sense_command_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_command *command; command = (struct scsi_sense_command *)header; scsi_command_sbuf(sb, cdb, cdb_len, inq_data, scsi_8btou64(command->command_info)); } void scsi_sense_sks_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_sks *sks; int error_code, sense_key, asc, ascq; sks = (struct scsi_sense_sks *)header; scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); scsi_sks_sbuf(sb, sense_key, sks->sense_key_spec); } void scsi_sense_fru_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_fru *fru; fru = (struct scsi_sense_fru *)header; scsi_fru_sbuf(sb, (uint64_t)fru->fru); } void scsi_sense_stream_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_stream *stream; uint64_t info; stream = (struct scsi_sense_stream *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_stream_sbuf(sb, stream->byte3, info); } void scsi_sense_block_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_block *block; uint64_t info; block = (struct scsi_sense_block *)header; info = 0; scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &info, NULL); scsi_block_sbuf(sb, block->byte3, info); } void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { struct scsi_sense_progress *progress; const char *sense_key_desc; const char *asc_desc; int progress_val; progress = (struct scsi_sense_progress *)header; /* * Get descriptions for the sense key, ASC, and ASCQ in the * progress descriptor. These could be different than the values * in the overall sense data. */ scsi_sense_desc(progress->sense_key, progress->add_sense_code, progress->add_sense_code_qual, inq_data, &sense_key_desc, &asc_desc); progress_val = scsi_2btoul(progress->progress); /* * The progress indicator is for the operation described by the * sense key, ASC, and ASCQ in the descriptor. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s): ", progress->add_sense_code, progress->add_sense_code_qual, asc_desc); scsi_progress_sbuf(sb, progress_val); } /* * Generic sense descriptor printing routine. This is used when we have * not yet implemented a specific printing routine for this descriptor. */ void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { int i; uint8_t *buf_ptr; sbuf_printf(sb, "Descriptor %#x:", header->desc_type); buf_ptr = (uint8_t *)&header[1]; for (i = 0; i < header->length; i++, buf_ptr++) sbuf_printf(sb, " %02x", *buf_ptr); } /* * Keep this list in numeric order. This speeds the array traversal. */ struct scsi_sense_desc_printer { uint8_t desc_type; /* * The function arguments here are the superset of what is needed * to print out various different descriptors. Command and * information descriptors need inquiry data and command type. * Sense key specific descriptors need the sense key. * * The sense, cdb, and inquiry data arguments may be NULL, but the * information printed may not be fully decoded as a result. */ void (*print_func)(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); } scsi_sense_printers[] = { {SSD_DESC_INFO, scsi_sense_info_sbuf}, {SSD_DESC_COMMAND, scsi_sense_command_sbuf}, {SSD_DESC_SKS, scsi_sense_sks_sbuf}, {SSD_DESC_FRU, scsi_sense_fru_sbuf}, {SSD_DESC_STREAM, scsi_sense_stream_sbuf}, {SSD_DESC_BLOCK, scsi_sense_block_sbuf}, {SSD_DESC_PROGRESS, scsi_sense_progress_sbuf} }; void scsi_sense_desc_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header) { u_int i; for (i = 0; i < nitems(scsi_sense_printers); i++) { struct scsi_sense_desc_printer *printer; printer = &scsi_sense_printers[i]; /* * The list is sorted, so quit if we've passed our * descriptor number. */ if (printer->desc_type > header->desc_type) break; if (printer->desc_type != header->desc_type) continue; printer->print_func(sb, sense, sense_len, cdb, cdb_len, inq_data, header); return; } /* * No specific printing routine, so use the generic routine. */ scsi_sense_generic_sbuf(sb, sense, sense_len, cdb, cdb_len, inq_data, header); } scsi_sense_data_type scsi_sense_type(struct scsi_sense_data *sense_data) { switch (sense_data->error_code & SSD_ERRCODE) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: return (SSD_TYPE_DESC); break; case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: return (SSD_TYPE_FIXED); break; default: break; } return (SSD_TYPE_NONE); } struct scsi_print_sense_info { struct sbuf *sb; char *path_str; uint8_t *cdb; int cdb_len; struct scsi_inquiry_data *inq_data; }; static int scsi_print_desc_func(struct scsi_sense_data_desc *sense, u_int sense_len, struct scsi_sense_desc_header *header, void *arg) { struct scsi_print_sense_info *print_info; print_info = (struct scsi_print_sense_info *)arg; switch (header->desc_type) { case SSD_DESC_INFO: case SSD_DESC_FRU: case SSD_DESC_COMMAND: case SSD_DESC_SKS: case SSD_DESC_BLOCK: case SSD_DESC_STREAM: /* * We have already printed these descriptors, if they are * present. */ break; default: { sbuf_printf(print_info->sb, "%s", print_info->path_str); scsi_sense_desc_sbuf(print_info->sb, (struct scsi_sense_data *)sense, sense_len, print_info->cdb, print_info->cdb_len, print_info->inq_data, header); sbuf_printf(print_info->sb, "\n"); break; } } /* * Tell the iterator that we want to see more descriptors if they * are present. */ return (0); } void scsi_sense_only_sbuf(struct scsi_sense_data *sense, u_int sense_len, struct sbuf *sb, char *path_str, struct scsi_inquiry_data *inq_data, uint8_t *cdb, int cdb_len) { int error_code, sense_key, asc, ascq; sbuf_cat(sb, path_str); scsi_extract_sense_len(sense, sense_len, &error_code, &sense_key, &asc, &ascq, /*show_errors*/ 1); sbuf_printf(sb, "SCSI sense: "); switch (error_code) { case SSD_DEFERRED_ERROR: case SSD_DESC_DEFERRED_ERROR: sbuf_printf(sb, "Deferred error: "); /* FALLTHROUGH */ case SSD_CURRENT_ERROR: case SSD_DESC_CURRENT_ERROR: { struct scsi_sense_data_desc *desc_sense; struct scsi_print_sense_info print_info; const char *sense_key_desc; const char *asc_desc; uint8_t sks[3]; uint64_t val; int info_valid; /* * Get descriptions for the sense key, ASC, and ASCQ. If * these aren't present in the sense data (i.e. the sense * data isn't long enough), the -1 values that * scsi_extract_sense_len() returns will yield default * or error descriptions. */ scsi_sense_desc(sense_key, asc, ascq, inq_data, &sense_key_desc, &asc_desc); /* * We first print the sense key and ASC/ASCQ. */ sbuf_cat(sb, sense_key_desc); sbuf_printf(sb, " asc:%x,%x (%s)\n", asc, ascq, asc_desc); /* * Get the info field if it is valid. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_INFO, &val, NULL) == 0) info_valid = 1; else info_valid = 0; if (info_valid != 0) { uint8_t bits; /* * Determine whether we have any block or stream * device-specific information. */ if (scsi_get_block_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_block_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (scsi_get_stream_info(sense, sense_len, inq_data, &bits) == 0) { sbuf_cat(sb, path_str); scsi_stream_sbuf(sb, bits, val); sbuf_printf(sb, "\n"); } else if (val != 0) { /* * The information field can be valid but 0. * If the block or stream bits aren't set, * and this is 0, it isn't terribly useful * to print it out. */ sbuf_cat(sb, path_str); scsi_info_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } } /* * Print the FRU. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_FRU, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_fru_sbuf(sb, val); sbuf_printf(sb, "\n"); } /* * Print any command-specific information. */ if (scsi_get_sense_info(sense, sense_len, SSD_DESC_COMMAND, &val, NULL) == 0) { sbuf_cat(sb, path_str); scsi_command_sbuf(sb, cdb, cdb_len, inq_data, val); sbuf_printf(sb, "\n"); } /* * Print out any sense-key-specific information. */ if (scsi_get_sks(sense, sense_len, sks) == 0) { sbuf_cat(sb, path_str); scsi_sks_sbuf(sb, sense_key, sks); sbuf_printf(sb, "\n"); } /* * If this is fixed sense, we're done. If we have * descriptor sense, we might have more information * available. */ if (scsi_sense_type(sense) != SSD_TYPE_DESC) break; desc_sense = (struct scsi_sense_data_desc *)sense; print_info.sb = sb; print_info.path_str = path_str; print_info.cdb = cdb; print_info.cdb_len = cdb_len; print_info.inq_data = inq_data; /* * Print any sense descriptors that we have not already printed. */ scsi_desc_iterate(desc_sense, sense_len, scsi_print_desc_func, &print_info); break; } case -1: /* * scsi_extract_sense_len() sets values to -1 if the * show_errors flag is set and they aren't present in the * sense data. This means that sense_len is 0. */ sbuf_printf(sb, "No sense data present\n"); break; default: { sbuf_printf(sb, "Error code 0x%x", error_code); if (sense->error_code & SSD_ERRCODE_VALID) { struct scsi_sense_data_fixed *fixed_sense; fixed_sense = (struct scsi_sense_data_fixed *)sense; if (SSD_FIXED_IS_PRESENT(fixed_sense, sense_len, info)){ uint32_t info; info = scsi_4btoul(fixed_sense->info); sbuf_printf(sb, " at block no. %d (decimal)", info); } } sbuf_printf(sb, "\n"); break; } } } /* * scsi_sense_sbuf() returns 0 for success and -1 for failure. */ #ifdef _KERNEL int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #else /* !_KERNEL */ int scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags) #endif /* _KERNEL/!_KERNEL */ { struct scsi_sense_data *sense; struct scsi_inquiry_data *inq_data; #ifdef _KERNEL struct ccb_getdev *cgd; #endif /* _KERNEL */ char path_str[64]; uint8_t *cdb; #ifndef _KERNEL if (device == NULL) return(-1); #endif /* !_KERNEL */ if ((csio == NULL) || (sb == NULL)) return(-1); /* * If the CDB is a physical address, we can't deal with it.. */ if ((csio->ccb_h.flags & CAM_CDB_PHYS) != 0) flags &= ~SSS_FLAG_PRINT_COMMAND; #ifdef _KERNEL xpt_path_string(csio->ccb_h.path, path_str, sizeof(path_str)); #else /* !_KERNEL */ cam_path_string(device, path_str, sizeof(path_str)); #endif /* _KERNEL/!_KERNEL */ #ifdef _KERNEL if ((cgd = (struct ccb_getdev*)xpt_alloc_ccb_nowait()) == NULL) return(-1); /* * Get the device information. */ xpt_setup_ccb(&cgd->ccb_h, csio->ccb_h.path, CAM_PRIORITY_NORMAL); cgd->ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)cgd); /* * If the device is unconfigured, just pretend that it is a hard * drive. scsi_op_desc() needs this. */ if (cgd->ccb_h.status == CAM_DEV_NOT_THERE) cgd->inq_data.device = T_DIRECT; inq_data = &cgd->inq_data; #else /* !_KERNEL */ inq_data = &device->inq_data; #endif /* _KERNEL/!_KERNEL */ sense = NULL; if (flags & SSS_FLAG_PRINT_COMMAND) { sbuf_cat(sb, path_str); #ifdef _KERNEL scsi_command_string(csio, sb); #else /* !_KERNEL */ scsi_command_string(device, csio, sb); #endif /* _KERNEL/!_KERNEL */ sbuf_printf(sb, "\n"); } /* * If the sense data is a physical pointer, forget it. */ if (csio->ccb_h.flags & CAM_SENSE_PTR) { if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else { /* * bcopy the pointer to avoid unaligned access * errors on finicky architectures. We don't * ensure that the sense data is pointer aligned. */ bcopy(&csio->sense_data, &sense, sizeof(struct scsi_sense_data *)); } } else { /* * If the physical sense flag is set, but the sense pointer * is not also set, we assume that the user is an idiot and * return. (Well, okay, it could be that somehow, the * entire csio is physical, but we would have probably core * dumped on one of the bogus pointer deferences above * already.) */ if (csio->ccb_h.flags & CAM_SENSE_PHYS) { #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(-1); } else sense = &csio->sense_data; } if (csio->ccb_h.flags & CAM_CDB_POINTER) cdb = csio->cdb_io.cdb_ptr; else cdb = csio->cdb_io.cdb_bytes; scsi_sense_only_sbuf(sense, csio->sense_len - csio->sense_resid, sb, path_str, inq_data, cdb, csio->cdb_len); #ifdef _KERNEL xpt_free_ccb((union ccb*)cgd); #endif /* _KERNEL/!_KERNEL */ return(0); } #ifdef _KERNEL char * scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len) #else /* !_KERNEL */ char * scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio, char *str, int str_len) #endif /* _KERNEL/!_KERNEL */ { struct sbuf sb; sbuf_new(&sb, str, str_len, 0); #ifdef _KERNEL scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); #else /* !_KERNEL */ scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); #endif /* _KERNEL/!_KERNEL */ sbuf_finish(&sb); return(sbuf_data(&sb)); } #ifdef _KERNEL void scsi_sense_print(struct ccb_scsiio *csio) { struct sbuf sb; char str[512]; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); printf("%s", sbuf_data(&sb)); } #else /* !_KERNEL */ void scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio, FILE *ofile) { struct sbuf sb; char str[512]; if ((device == NULL) || (csio == NULL) || (ofile == NULL)) return; sbuf_new(&sb, str, sizeof(str), 0); scsi_sense_sbuf(device, csio, &sb, SSS_FLAG_PRINT_COMMAND); sbuf_finish(&sb); fprintf(ofile, "%s", sbuf_data(&sb)); } #endif /* _KERNEL/!_KERNEL */ /* * Extract basic sense information. This is backward-compatible with the * previous implementation. For new implementations, * scsi_extract_sense_len() is recommended. */ void scsi_extract_sense(struct scsi_sense_data *sense_data, int *error_code, int *sense_key, int *asc, int *ascq) { scsi_extract_sense_len(sense_data, sizeof(*sense_data), error_code, sense_key, asc, ascq, /*show_errors*/ 0); } /* * Extract basic sense information from SCSI I/O CCB structure. */ int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq) { struct scsi_sense_data *sense_data; /* Make sure there are some sense data we can access. */ if (ccb->ccb_h.func_code != XPT_SCSI_IO || (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_SCSI_STATUS_ERROR || (ccb->csio.scsi_status != SCSI_STATUS_CHECK_COND) || (ccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0 || (ccb->ccb_h.flags & CAM_SENSE_PHYS)) return (0); if (ccb->ccb_h.flags & CAM_SENSE_PTR) bcopy(&ccb->csio.sense_data, &sense_data, sizeof(struct scsi_sense_data *)); else sense_data = &ccb->csio.sense_data; scsi_extract_sense_len(sense_data, ccb->csio.sense_len - ccb->csio.sense_resid, error_code, sense_key, asc, ascq, 1); if (*error_code == -1) return (0); return (1); } /* * Extract basic sense information. If show_errors is set, sense values * will be set to -1 if they are not present. */ void scsi_extract_sense_len(struct scsi_sense_data *sense_data, u_int sense_len, int *error_code, int *sense_key, int *asc, int *ascq, int show_errors) { /* * If we have no length, we have no sense. */ if (sense_len == 0) { if (show_errors == 0) { *error_code = 0; *sense_key = 0; *asc = 0; *ascq = 0; } else { *error_code = -1; *sense_key = -1; *asc = -1; *ascq = -1; } return; } *error_code = sense_data->error_code & SSD_ERRCODE; switch (*error_code) { case SSD_DESC_CURRENT_ERROR: case SSD_DESC_DEFERRED_ERROR: { struct scsi_sense_data_desc *sense; sense = (struct scsi_sense_data_desc *)sense_data; if (SSD_DESC_IS_PRESENT(sense, sense_len, sense_key)) *sense_key = sense->sense_key & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code)) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if (SSD_DESC_IS_PRESENT(sense, sense_len, add_sense_code_qual)) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } case SSD_CURRENT_ERROR: case SSD_DEFERRED_ERROR: default: { struct scsi_sense_data_fixed *sense; sense = (struct scsi_sense_data_fixed *)sense_data; if (SSD_FIXED_IS_PRESENT(sense, sense_len, flags)) *sense_key = sense->flags & SSD_KEY; else *sense_key = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len, add_sense_code)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code))) *asc = sense->add_sense_code; else *asc = (show_errors) ? -1 : 0; if ((SSD_FIXED_IS_PRESENT(sense, sense_len,add_sense_code_qual)) && (SSD_FIXED_IS_FILLED(sense, add_sense_code_qual))) *ascq = sense->add_sense_code_qual; else *ascq = (show_errors) ? -1 : 0; break; } } } int scsi_get_sense_key(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (sense_key); } int scsi_get_asc(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (asc); } int scsi_get_ascq(struct scsi_sense_data *sense_data, u_int sense_len, int show_errors) { int error_code, sense_key, asc, ascq; scsi_extract_sense_len(sense_data, sense_len, &error_code, &sense_key, &asc, &ascq, show_errors); return (ascq); } /* * This function currently requires at least 36 bytes, or * SHORT_INQUIRY_LENGTH, worth of data to function properly. If this * function needs more or less data in the future, another length should be * defined in scsi_all.h to indicate the minimum amount of data necessary * for this routine to function properly. */ void scsi_print_inquiry(struct scsi_inquiry_data *inq_data) { u_int8_t type; char *dtype, *qtype; char vendor[16], product[48], revision[16], rstr[12]; type = SID_TYPE(inq_data); /* * Figure out basic device type and qualifier. */ if (SID_QUAL_IS_VENDOR_UNIQUE(inq_data)) { qtype = " (vendor-unique qualifier)"; } else { switch (SID_QUAL(inq_data)) { case SID_QUAL_LU_CONNECTED: qtype = ""; break; case SID_QUAL_LU_OFFLINE: qtype = " (offline)"; break; case SID_QUAL_RSVD: qtype = " (reserved qualifier)"; break; default: case SID_QUAL_BAD_LU: qtype = " (LUN not supported)"; break; } } switch (type) { case T_DIRECT: dtype = "Direct Access"; break; case T_SEQUENTIAL: dtype = "Sequential Access"; break; case T_PRINTER: dtype = "Printer"; break; case T_PROCESSOR: dtype = "Processor"; break; case T_WORM: dtype = "WORM"; break; case T_CDROM: dtype = "CD-ROM"; break; case T_SCANNER: dtype = "Scanner"; break; case T_OPTICAL: dtype = "Optical"; break; case T_CHANGER: dtype = "Changer"; break; case T_COMM: dtype = "Communication"; break; case T_STORARRAY: dtype = "Storage Array"; break; case T_ENCLOSURE: dtype = "Enclosure Services"; break; case T_RBC: dtype = "Simplified Direct Access"; break; case T_OCRW: dtype = "Optical Card Read/Write"; break; case T_OSD: dtype = "Object-Based Storage"; break; case T_ADC: dtype = "Automation/Drive Interface"; break; + case T_ZBC_HM: + dtype = "Host Managed Zoned Block"; + break; case T_NODEVICE: dtype = "Uninstalled"; break; default: dtype = "unknown"; break; } cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); if (SID_ANSI_REV(inq_data) == SCSI_REV_0) snprintf(rstr, sizeof(rstr), "SCSI"); else if (SID_ANSI_REV(inq_data) <= SCSI_REV_SPC) { snprintf(rstr, sizeof(rstr), "SCSI-%d", SID_ANSI_REV(inq_data)); } else { snprintf(rstr, sizeof(rstr), "SPC-%d SCSI", SID_ANSI_REV(inq_data) - 2); } printf("<%s %s %s> %s %s %s device%s\n", vendor, product, revision, SID_IS_REMOVABLE(inq_data) ? "Removable" : "Fixed", dtype, rstr, qtype); } void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data) { char vendor[16], product[48], revision[16]; cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor), sizeof(vendor)); cam_strvis(product, inq_data->product, sizeof(inq_data->product), sizeof(product)); cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision), sizeof(revision)); printf("<%s %s %s>", vendor, product, revision); } /* * Table of syncrates that don't follow the "divisible by 4" * rule. This table will be expanded in future SCSI specs. */ static struct { u_int period_factor; u_int period; /* in 100ths of ns */ } scsi_syncrates[] = { { 0x08, 625 }, /* FAST-160 */ { 0x09, 1250 }, /* FAST-80 */ { 0x0a, 2500 }, /* FAST-40 40MHz */ { 0x0b, 3030 }, /* FAST-40 33MHz */ { 0x0c, 5000 } /* FAST-20 */ }; /* * Return the frequency in kHz corresponding to the given * sync period factor. */ u_int scsi_calc_syncsrate(u_int period_factor) { u_int i; u_int num_syncrates; /* * It's a bug if period is zero, but if it is anyway, don't * die with a divide fault- instead return something which * 'approximates' async */ if (period_factor == 0) { return (3300); } num_syncrates = nitems(scsi_syncrates); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period_factor == scsi_syncrates[i].period_factor) { /* Period in kHz */ return (100000000 / scsi_syncrates[i].period); } } /* * Wasn't in the table, so use the standard * 4 times conversion. */ return (10000000 / (period_factor * 4 * 10)); } /* * Return the SCSI sync parameter that corresponds to * the passed in period in 10ths of ns. */ u_int scsi_calc_syncparam(u_int period) { u_int i; u_int num_syncrates; if (period == 0) return (~0); /* Async */ /* Adjust for exception table being in 100ths. */ period *= 10; num_syncrates = nitems(scsi_syncrates); /* See if the period is in the "exception" table */ for (i = 0; i < num_syncrates; i++) { if (period <= scsi_syncrates[i].period) { /* Period in 100ths of ns */ return (scsi_syncrates[i].period_factor); } } /* * Wasn't in the table, so use the standard * 1/4 period in ns conversion. */ return (period/400); } int scsi_devid_is_naa_ieee_reg(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; struct scsi_vpd_id_naa_basic *naa; descr = (struct scsi_vpd_id_descriptor *)bufp; naa = (struct scsi_vpd_id_naa_basic *)descr->identifier; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; if (descr->length < sizeof(struct scsi_vpd_id_naa_ieee_reg)) return 0; if ((naa->naa >> SVPD_ID_NAA_NAA_SHIFT) != SVPD_ID_NAA_IEEE_REG) return 0; return 1; } int scsi_devid_is_sas_target(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if (!scsi_devid_is_naa_ieee_reg(bufp)) return 0; if ((descr->id_type & SVPD_ID_PIV) == 0) /* proto field reserved */ return 0; if ((descr->proto_codeset >> SVPD_ID_PROTO_SHIFT) != SCSI_PROTO_SAS) return 0; return 1; } int scsi_devid_is_lun_eui64(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_EUI64) return 0; return 1; } int scsi_devid_is_lun_naa(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; return 1; } int scsi_devid_is_lun_t10(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_T10) return 0; return 1; } int scsi_devid_is_lun_name(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_LUN) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_SCSI_NAME) return 0; return 1; } int scsi_devid_is_port_naa(uint8_t *bufp) { struct scsi_vpd_id_descriptor *descr; descr = (struct scsi_vpd_id_descriptor *)bufp; if ((descr->id_type & SVPD_ID_ASSOC_MASK) != SVPD_ID_ASSOC_PORT) return 0; if ((descr->id_type & SVPD_ID_TYPE_MASK) != SVPD_ID_TYPE_NAA) return 0; return 1; } struct scsi_vpd_id_descriptor * scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len, scsi_devid_checkfn_t ck_fn) { uint8_t *desc_buf_end; desc_buf_end = (uint8_t *)desc + len; for (; desc->identifier <= desc_buf_end && desc->identifier + desc->length <= desc_buf_end; desc = (struct scsi_vpd_id_descriptor *)(desc->identifier + desc->length)) { if (ck_fn == NULL || ck_fn((uint8_t *)desc) != 0) return (desc); } return (NULL); } struct scsi_vpd_id_descriptor * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t page_len, scsi_devid_checkfn_t ck_fn) { uint32_t len; if (page_len < sizeof(*id)) return (NULL); len = MIN(scsi_2btoul(id->length), page_len - sizeof(*id)); return (scsi_get_devid_desc((struct scsi_vpd_id_descriptor *) id->desc_list, len, ck_fn)); } int scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr, uint32_t valid_len) { switch (hdr->format_protocol & SCSI_TRN_PROTO_MASK) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; uint64_t n_port_name; fcp = (struct scsi_transportid_fcp *)hdr; n_port_name = scsi_8btou64(fcp->n_port_name); sbuf_printf(sb, "FCP address: 0x%.16jx",(uintmax_t)n_port_name); break; } case SCSI_PROTO_SPI: { struct scsi_transportid_spi *spi; spi = (struct scsi_transportid_spi *)hdr; sbuf_printf(sb, "SPI address: %u,%u", scsi_2btoul(spi->scsi_addr), scsi_2btoul(spi->rel_trgt_port_id)); break; } case SCSI_PROTO_SSA: /* * XXX KDM there is no transport ID defined in SPC-4 for * SSA. */ break; case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; uint64_t eui64; sbp = (struct scsi_transportid_1394 *)hdr; eui64 = scsi_8btou64(sbp->eui64); sbuf_printf(sb, "SBP address: 0x%.16jx", (uintmax_t)eui64); break; } case SCSI_PROTO_RDMA: { struct scsi_transportid_rdma *rdma; unsigned int i; rdma = (struct scsi_transportid_rdma *)hdr; sbuf_printf(sb, "RDMA address: 0x"); for (i = 0; i < sizeof(rdma->initiator_port_id); i++) sbuf_printf(sb, "%02x", rdma->initiator_port_id[i]); break; } case SCSI_PROTO_ISCSI: { uint32_t add_len, i; uint8_t *iscsi_name = NULL; int nul_found = 0; sbuf_printf(sb, "iSCSI address: "); if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_DEVICE) { struct scsi_transportid_iscsi_device *dev; dev = (struct scsi_transportid_iscsi_device *)hdr; /* * Verify how much additional data we really have. */ add_len = scsi_2btoul(dev->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_device, iscsi_name)); iscsi_name = &dev->iscsi_name[0]; } else if ((hdr->format_protocol & SCSI_TRN_FORMAT_MASK) == SCSI_TRN_ISCSI_FORMAT_PORT) { struct scsi_transportid_iscsi_port *port; port = (struct scsi_transportid_iscsi_port *)hdr; add_len = scsi_2btoul(port->additional_length); add_len = MIN(add_len, valid_len - __offsetof(struct scsi_transportid_iscsi_port, iscsi_name)); iscsi_name = &port->iscsi_name[0]; } else { sbuf_printf(sb, "unknown format %x", (hdr->format_protocol & SCSI_TRN_FORMAT_MASK) >> SCSI_TRN_FORMAT_SHIFT); break; } if (add_len == 0) { sbuf_printf(sb, "not enough data"); break; } /* * This is supposed to be a NUL-terminated ASCII * string, but you never know. So we're going to * check. We need to do this because there is no * sbuf equivalent of strncat(). */ for (i = 0; i < add_len; i++) { if (iscsi_name[i] == '\0') { nul_found = 1; break; } } /* * If there is a NUL in the name, we can just use * sbuf_cat(). Otherwise we need to use sbuf_bcat(). */ if (nul_found != 0) sbuf_cat(sb, iscsi_name); else sbuf_bcat(sb, iscsi_name, add_len); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; uint64_t sas_addr; sas = (struct scsi_transportid_sas *)hdr; sas_addr = scsi_8btou64(sas->sas_address); sbuf_printf(sb, "SAS address: 0x%.16jx", (uintmax_t)sas_addr); break; } case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: /* * No Transport ID format for ADI, ATA or USB is defined in * SPC-4. */ sbuf_printf(sb, "No known Transport ID format for protocol " "%#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; case SCSI_PROTO_SOP: { struct scsi_transportid_sop *sop; struct scsi_sop_routing_id_norm *rid; sop = (struct scsi_transportid_sop *)hdr; rid = (struct scsi_sop_routing_id_norm *)sop->routing_id; /* * Note that there is no alternate format specified in SPC-4 * for the PCIe routing ID, so we don't really have a way * to know whether the second byte of the routing ID is * a device and function or just a function. So we just * assume bus,device,function. */ sbuf_printf(sb, "SOP Routing ID: %u,%u,%u", rid->bus, rid->devfunc >> SCSI_TRN_SOP_DEV_SHIFT, rid->devfunc & SCSI_TRN_SOP_FUNC_NORM_MAX); break; } case SCSI_PROTO_NONE: default: sbuf_printf(sb, "Unknown protocol %#x", hdr->format_protocol & SCSI_TRN_PROTO_MASK); break; } return (0); } struct scsi_nv scsi_proto_map[] = { { "fcp", SCSI_PROTO_FC }, { "spi", SCSI_PROTO_SPI }, { "ssa", SCSI_PROTO_SSA }, { "sbp", SCSI_PROTO_1394 }, { "1394", SCSI_PROTO_1394 }, { "srp", SCSI_PROTO_RDMA }, { "rdma", SCSI_PROTO_RDMA }, { "iscsi", SCSI_PROTO_ISCSI }, { "iqn", SCSI_PROTO_ISCSI }, { "sas", SCSI_PROTO_SAS }, { "aditp", SCSI_PROTO_ADITP }, { "ata", SCSI_PROTO_ATA }, { "uas", SCSI_PROTO_UAS }, { "usb", SCSI_PROTO_UAS }, { "sop", SCSI_PROTO_SOP } }; const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value) { int i; for (i = 0; i < num_table_entries; i++) { if (table[i].value == value) return (table[i].name); } return (NULL); } /* * Given a name/value table, find a value matching the given name. * Return values: * SCSI_NV_FOUND - match found * SCSI_NV_AMBIGUOUS - more than one match, none of them exact * SCSI_NV_NOT_FOUND - no match found */ scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries, char *name, int *table_entry, scsi_nv_flags flags) { int i, num_matches = 0; for (i = 0; i < num_table_entries; i++) { size_t table_len, name_len; table_len = strlen(table[i].name); name_len = strlen(name); if ((((flags & SCSI_NV_FLAG_IG_CASE) != 0) && (strncasecmp(table[i].name, name, name_len) == 0)) || (((flags & SCSI_NV_FLAG_IG_CASE) == 0) && (strncmp(table[i].name, name, name_len) == 0))) { *table_entry = i; /* * Check for an exact match. If we have the same * number of characters in the table as the argument, * and we already know they're the same, we have * an exact match. */ if (table_len == name_len) return (SCSI_NV_FOUND); /* * Otherwise, bump up the number of matches. We'll * see later how many we have. */ num_matches++; } } if (num_matches > 1) return (SCSI_NV_AMBIGUOUS); else if (num_matches == 1) return (SCSI_NV_FOUND); else return (SCSI_NV_NOT_FOUND); } /* * Parse transport IDs for Fibre Channel, 1394 and SAS. Since these are * all 64-bit numbers, the code is similar. */ int scsi_parse_transportid_64bit(int proto_id, char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { uint64_t value; char *endptr; int retval; size_t alloc_size; retval = 0; value = strtouq(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing ID %s, 64-bit number required", __func__, id_str); } retval = 1; goto bailout; } switch (proto_id) { case SCSI_PROTO_FC: alloc_size = sizeof(struct scsi_transportid_fcp); break; case SCSI_PROTO_1394: alloc_size = sizeof(struct scsi_transportid_1394); break; case SCSI_PROTO_SAS: alloc_size = sizeof(struct scsi_transportid_sas); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unsupported " "protocol %d", __func__, proto_id); } retval = 1; goto bailout; break; /* NOTREACHED */ } #ifdef _KERNEL *hdr = malloc(alloc_size, type, flags); #else /* _KERNEL */ *hdr = malloc(alloc_size); #endif /*_KERNEL */ if (*hdr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, alloc_size); } retval = 1; goto bailout; } *alloc_len = alloc_size; bzero(*hdr, alloc_size); switch (proto_id) { case SCSI_PROTO_FC: { struct scsi_transportid_fcp *fcp; fcp = (struct scsi_transportid_fcp *)(*hdr); fcp->format_protocol = SCSI_PROTO_FC | SCSI_TRN_FCP_FORMAT_DEFAULT; scsi_u64to8b(value, fcp->n_port_name); break; } case SCSI_PROTO_1394: { struct scsi_transportid_1394 *sbp; sbp = (struct scsi_transportid_1394 *)(*hdr); sbp->format_protocol = SCSI_PROTO_1394 | SCSI_TRN_1394_FORMAT_DEFAULT; scsi_u64to8b(value, sbp->eui64); break; } case SCSI_PROTO_SAS: { struct scsi_transportid_sas *sas; sas = (struct scsi_transportid_sas *)(*hdr); sas->format_protocol = SCSI_PROTO_SAS | SCSI_TRN_SAS_FORMAT_DEFAULT; scsi_u64to8b(value, sas->sas_address); break; } default: break; } bailout: return (retval); } /* * Parse a SPI (Parallel SCSI) address of the form: id,rel_tgt_port */ int scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { unsigned long scsi_addr, target_port; struct scsi_transportid_spi *spi; char *tmpstr, *endptr; int retval; retval = 0; tmpstr = strsep(&id_str, ","); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } scsi_addr = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing SCSI ID %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if (id_str == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no relative " "target port found", __func__); } retval = 1; goto bailout; } target_port = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing relative target port %s, number " "required", __func__, id_str); } retval = 1; goto bailout; } #ifdef _KERNEL spi = malloc(sizeof(*spi), type, flags); #else spi = malloc(sizeof(*spi)); #endif if (spi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*spi)); } retval = 1; goto bailout; } *alloc_len = sizeof(*spi); bzero(spi, sizeof(*spi)); spi->format_protocol = SCSI_PROTO_SPI | SCSI_TRN_SPI_FORMAT_DEFAULT; scsi_ulto2b(scsi_addr, spi->scsi_addr); scsi_ulto2b(target_port, spi->rel_trgt_port_id); *hdr = (struct scsi_transportid_header *)spi; bailout: return (retval); } /* * Parse an RDMA/SRP Initiator Port ID string. This is 32 hexadecimal digits, * optionally prefixed by "0x" or "0X". */ int scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_rdma *rdma; int retval; size_t id_len, rdma_id_size; uint8_t rdma_id[SCSI_TRN_RDMA_PORT_LEN]; char *tmpstr; unsigned int i, j; retval = 0; id_len = strlen(id_str); rdma_id_size = SCSI_TRN_RDMA_PORT_LEN; /* * Check the size. It needs to be either 32 or 34 characters long. */ if ((id_len != (rdma_id_size * 2)) && (id_len != ((rdma_id_size * 2) + 2))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA ID " "must be 32 hex digits (0x prefix " "optional), only %zu seen", __func__, id_len); } retval = 1; goto bailout; } tmpstr = id_str; /* * If the user gave us 34 characters, the string needs to start * with '0x'. */ if (id_len == ((rdma_id_size * 2) + 2)) { if ((tmpstr[0] == '0') && ((tmpstr[1] == 'x') || (tmpstr[1] == 'X'))) { tmpstr += 2; } else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: RDMA " "ID prefix, if used, must be \"0x\", " "got %s", __func__, tmpstr); } retval = 1; goto bailout; } } bzero(rdma_id, sizeof(rdma_id)); /* * Convert ASCII hex into binary bytes. There is no standard * 128-bit integer type, and so no strtou128t() routine to convert * from hex into a large integer. In the end, we're not going to * an integer, but rather to a byte array, so that and the fact * that we require the user to give us 32 hex digits simplifies the * logic. */ for (i = 0; i < (rdma_id_size * 2); i++) { int cur_shift; unsigned char c; /* Increment the byte array one for every 2 hex digits */ j = i >> 1; /* * The first digit in every pair is the most significant * 4 bits. The second is the least significant 4 bits. */ if ((i % 2) == 0) cur_shift = 4; else cur_shift = 0; c = tmpstr[i]; /* Convert the ASCII hex character into a number */ if (isdigit(c)) c -= '0'; else if (isalpha(c)) c -= isupper(c) ? 'A' - 10 : 'a' - 10; else { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } /* * The converted number can't be less than 0; the type is * unsigned, and the subtraction logic will not give us * a negative number. So we only need to make sure that * the value is not greater than 0xf. (i.e. make sure the * user didn't give us a value like "0x12jklmno"). */ if (c > 0xf) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "RDMA ID must be hex digits, got " "invalid character %c", __func__, tmpstr[i]); } retval = 1; goto bailout; } rdma_id[j] |= c << cur_shift; } #ifdef _KERNEL rdma = malloc(sizeof(*rdma), type, flags); #else rdma = malloc(sizeof(*rdma)); #endif if (rdma == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*rdma)); } retval = 1; goto bailout; } *alloc_len = sizeof(*rdma); bzero(rdma, *alloc_len); rdma->format_protocol = SCSI_PROTO_RDMA | SCSI_TRN_RDMA_FORMAT_DEFAULT; bcopy(rdma_id, rdma->initiator_port_id, SCSI_TRN_RDMA_PORT_LEN); *hdr = (struct scsi_transportid_header *)rdma; bailout: return (retval); } /* * Parse an iSCSI name. The format is either just the name: * * iqn.2012-06.com.example:target0 * or the name, separator and initiator session ID: * * iqn.2012-06.com.example:target0,i,0x123 * * The separator format is exact. */ int scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { size_t id_len, sep_len, id_size, name_len; int retval; unsigned int i, sep_pos, sep_found; const char *sep_template = ",i,0x"; const char *iqn_prefix = "iqn."; struct scsi_transportid_iscsi_device *iscsi; retval = 0; sep_found = 0; id_len = strlen(id_str); sep_len = strlen(sep_template); /* * The separator is defined as exactly ',i,0x'. Any other commas, * or any other form, is an error. So look for a comma, and once * we find that, the next few characters must match the separator * exactly. Once we get through the separator, there should be at * least one character. */ for (i = 0, sep_pos = 0; i < id_len; i++) { if (sep_pos == 0) { if (id_str[i] == sep_template[sep_pos]) sep_pos++; continue; } if (sep_pos < sep_len) { if (id_str[i] == sep_template[sep_pos]) { sep_pos++; continue; } if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "invalid separator in iSCSI name " "\"%s\"", __func__, id_str); } retval = 1; goto bailout; } else { sep_found = 1; break; } } /* * Check to see whether we have a separator but no digits after it. */ if ((sep_pos != 0) && (sep_found == 0)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no digits " "found after separator in iSCSI name \"%s\"", __func__, id_str); } retval = 1; goto bailout; } /* * The incoming ID string has the "iqn." prefix stripped off. We * need enough space for the base structure (the structures are the * same for the two iSCSI forms), the prefix, the ID string and a * terminating NUL. */ id_size = sizeof(*iscsi) + strlen(iqn_prefix) + id_len + 1; #ifdef _KERNEL iscsi = malloc(id_size, type, flags); #else iscsi = malloc(id_size); #endif if (iscsi == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, id_size); } retval = 1; goto bailout; } *alloc_len = id_size; bzero(iscsi, id_size); iscsi->format_protocol = SCSI_PROTO_ISCSI; if (sep_found == 0) iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_DEVICE; else iscsi->format_protocol |= SCSI_TRN_ISCSI_FORMAT_PORT; name_len = id_size - sizeof(*iscsi); scsi_ulto2b(name_len, iscsi->additional_length); snprintf(iscsi->iscsi_name, name_len, "%s%s", iqn_prefix, id_str); *hdr = (struct scsi_transportid_header *)iscsi; bailout: return (retval); } /* * Parse a SCSI over PCIe (SOP) identifier. The Routing ID can either be * of the form 'bus,device,function' or 'bus,function'. */ int scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { struct scsi_transportid_sop *sop; unsigned long bus, device, function; char *tmpstr, *endptr; int retval, device_spec; retval = 0; device_spec = 0; device = 0; tmpstr = strsep(&id_str, ","); if ((tmpstr == NULL) || (*tmpstr == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no ID found", __func__); } retval = 1; goto bailout; } bus = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe bus %s, number required", __func__, tmpstr); } retval = 1; goto bailout; } if ((id_str == NULL) || (*id_str == '\0')) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no PCIe " "device or function found", __func__); } retval = 1; goto bailout; } tmpstr = strsep(&id_str, ","); function = strtoul(tmpstr, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: error " "parsing PCIe device/function %s, number " "required", __func__, tmpstr); } retval = 1; goto bailout; } /* * Check to see whether the user specified a third value. If so, * the second is the device. */ if (id_str != NULL) { if (*id_str == '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "no PCIe function found", __func__); } retval = 1; goto bailout; } device = function; device_spec = 1; function = strtoul(id_str, &endptr, 0); if (*endptr != '\0') { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: " "error parsing PCIe function %s, " "number required", __func__, id_str); } retval = 1; goto bailout; } } if (bus > SCSI_TRN_SOP_BUS_MAX) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: bus value " "%lu greater than maximum %u", __func__, bus, SCSI_TRN_SOP_BUS_MAX); } retval = 1; goto bailout; } if ((device_spec != 0) && (device > SCSI_TRN_SOP_DEV_MASK)) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: device value " "%lu greater than maximum %u", __func__, device, SCSI_TRN_SOP_DEV_MAX); } retval = 1; goto bailout; } if (((device_spec != 0) && (function > SCSI_TRN_SOP_FUNC_NORM_MAX)) || ((device_spec == 0) && (function > SCSI_TRN_SOP_FUNC_ALT_MAX))) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: function value " "%lu greater than maximum %u", __func__, function, (device_spec == 0) ? SCSI_TRN_SOP_FUNC_ALT_MAX : SCSI_TRN_SOP_FUNC_NORM_MAX); } retval = 1; goto bailout; } #ifdef _KERNEL sop = malloc(sizeof(*sop), type, flags); #else sop = malloc(sizeof(*sop)); #endif if (sop == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: unable to " "allocate %zu bytes", __func__, sizeof(*sop)); } retval = 1; goto bailout; } *alloc_len = sizeof(*sop); bzero(sop, sizeof(*sop)); sop->format_protocol = SCSI_PROTO_SOP | SCSI_TRN_SOP_FORMAT_DEFAULT; if (device_spec != 0) { struct scsi_sop_routing_id_norm rid; rid.bus = bus; rid.devfunc = (device << SCSI_TRN_SOP_DEV_SHIFT) | function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } else { struct scsi_sop_routing_id_alt rid; rid.bus = bus; rid.function = function; bcopy(&rid, sop->routing_id, MIN(sizeof(rid), sizeof(sop->routing_id))); } *hdr = (struct scsi_transportid_header *)sop; bailout: return (retval); } /* * transportid_str: NUL-terminated string with format: protcol,id * The ID is protocol specific. * hdr: Storage will be allocated for the transport ID. * alloc_len: The amount of memory allocated is returned here. * type: Malloc bucket (kernel only). * flags: Malloc flags (kernel only). * error_str: If non-NULL, it will contain error information (without * a terminating newline) if an error is returned. * error_str_len: Allocated length of the error string. * * Returns 0 for success, non-zero for failure. */ int scsi_parse_transportid(char *transportid_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len) { char *tmpstr; scsi_nv_status status; u_int num_proto_entries; int retval, table_entry; retval = 0; table_entry = 0; /* * We do allow a period as well as a comma to separate the protocol * from the ID string. This is to accommodate iSCSI names, which * start with "iqn.". */ tmpstr = strsep(&transportid_str, ",."); if (tmpstr == NULL) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: transportid_str is NULL", __func__); } retval = 1; goto bailout; } num_proto_entries = nitems(scsi_proto_map); status = scsi_get_nv(scsi_proto_map, num_proto_entries, tmpstr, &table_entry, SCSI_NV_FLAG_IG_CASE); if (status != SCSI_NV_FOUND) { if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: %s protocol " "name %s", __func__, (status == SCSI_NV_AMBIGUOUS) ? "ambiguous" : "invalid", tmpstr); } retval = 1; goto bailout; } switch (scsi_proto_map[table_entry].value) { case SCSI_PROTO_FC: case SCSI_PROTO_1394: case SCSI_PROTO_SAS: retval = scsi_parse_transportid_64bit( scsi_proto_map[table_entry].value, transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SPI: retval = scsi_parse_transportid_spi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_RDMA: retval = scsi_parse_transportid_rdma(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_ISCSI: retval = scsi_parse_transportid_iscsi(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SOP: retval = scsi_parse_transportid_sop(transportid_str, hdr, alloc_len, #ifdef _KERNEL type, flags, #endif error_str, error_str_len); break; case SCSI_PROTO_SSA: case SCSI_PROTO_ADITP: case SCSI_PROTO_ATA: case SCSI_PROTO_UAS: case SCSI_PROTO_NONE: default: /* * There is no format defined for a Transport ID for these * protocols. So even if the user gives us something, we * have no way to turn it into a standard SCSI Transport ID. */ retval = 1; if (error_str != NULL) { snprintf(error_str, error_str_len, "%s: no Transport " "ID format exists for protocol %s", __func__, tmpstr); } goto bailout; break; /* NOTREACHED */ } bailout: return (retval); } struct scsi_attrib_table_entry scsi_mam_attr_table[] = { { SMA_ATTR_REM_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Remaining Capacity in Partition", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf,/*parse_str*/ NULL }, { SMA_ATTR_MAX_CAP_PARTITION, SCSI_ATTR_FLAG_NONE, "Maximum Capacity in Partition", /*suffix*/"MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TAPEALERT_FLAGS, SCSI_ATTR_FLAG_HEX, "TapeAlert Flags", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_COUNT, SCSI_ATTR_FLAG_NONE, "Load Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_SPACE_REMAINING, SCSI_ATTR_FLAG_NONE, "MAM Space Remaining", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FORMAT_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Format Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_INITIALIZATION_COUNT, SCSI_ATTR_FLAG_NONE, "Initialization Count", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_ID, SCSI_ATTR_FLAG_NONE, "Volume Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOLUME_CHANGE_REF, SCSI_ATTR_FLAG_HEX, "Volume Change Reference", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_1, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 1", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_2, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 2", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_DEV_SERIAL_LAST_LOAD_3, SCSI_ATTR_FLAG_NONE, "Device Vendor/Serial at Last Load - 3", /*suffix*/NULL, /*to_str*/ scsi_attrib_vendser_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_LT, SCSI_ATTR_FLAG_NONE, "Total MB Written in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_LT, SCSI_ATTR_FLAG_NONE, "Total MB Read in Medium Life", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_WRITTEN_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Written in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TOTAL_MB_READ_CUR, SCSI_ATTR_FLAG_NONE, "Total MB Read in Current/Last Load", /*suffix*/ "MB", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_FIRST_ENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_NEXT_UNENC_BLOCK, SCSI_ATTR_FLAG_NONE, "Logical Position of First Unencrypted Block after First " "Encrypted Block", /*suffix*/ NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIUM_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Medium Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_PART_USAGE_HIST, SCSI_ATTR_FLAG_NONE, "Partition Usage History", /*suffix*/ NULL, /*to_str*/ NULL, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF, SCSI_ATTR_FLAG_NONE, "Medium Manufacturer", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_LENGTH, SCSI_ATTR_FLAG_NONE, "Medium Length", /*suffix*/"m", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_WIDTH, SCSI_ATTR_FLAG_FP | SCSI_ATTR_FLAG_DIV_10 | SCSI_ATTR_FLAG_FP_1DIGIT, "Medium Width", /*suffix*/"mm", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_ASSIGNING_ORG, SCSI_ATTR_FLAG_NONE, "Assigning Organization", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_DENSITY_CODE, SCSI_ATTR_FLAG_HEX, "Medium Density Code", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_MANUF_DATE, SCSI_ATTR_FLAG_NONE, "Medium Manufacture Date", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MAM_CAPACITY, SCSI_ATTR_FLAG_NONE, "MAM Capacity", /*suffix*/"bytes", /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE, SCSI_ATTR_FLAG_HEX, "Medium Type", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_TYPE_INFO, SCSI_ATTR_FLAG_HEX, "Medium Type Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MED_SERIAL_NUM, SCSI_ATTR_FLAG_NONE, "Medium Serial Number", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VENDOR, SCSI_ATTR_FLAG_NONE, "Application Vendor", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_NAME, SCSI_ATTR_FLAG_NONE, "Application Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_VERSION, SCSI_ATTR_FLAG_NONE, "Application Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_USER_MED_TEXT_LABEL, SCSI_ATTR_FLAG_NONE, "User Medium Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LAST_WRITTEN_TIME, SCSI_ATTR_FLAG_NONE, "Date and Time Last Written", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_TEXT_LOCAL_ID, SCSI_ATTR_FLAG_HEX, "Text Localization Identifier", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_BARCODE, SCSI_ATTR_FLAG_NONE, "Barcode", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_HOST_OWNER_NAME, SCSI_ATTR_FLAG_NONE, "Owning Host Textual Name", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_MEDIA_POOL, SCSI_ATTR_FLAG_NONE, "Media Pool", /*suffix*/NULL, /*to_str*/ scsi_attrib_text_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_PART_USER_LABEL, SCSI_ATTR_FLAG_NONE, "Partition User Text Label", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_LOAD_UNLOAD_AT_PART, SCSI_ATTR_FLAG_NONE, "Load/Unload at Partition", /*suffix*/NULL, /*to_str*/ scsi_attrib_int_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_APP_FORMAT_VERSION, SCSI_ATTR_FLAG_NONE, "Application Format Version", /*suffix*/NULL, /*to_str*/ scsi_attrib_ascii_sbuf, /*parse_str*/ NULL }, { SMA_ATTR_VOL_COHERENCY_INFO, SCSI_ATTR_FLAG_NONE, "Volume Coherency Information", /*suffix*/NULL, /*to_str*/ scsi_attrib_volcoh_sbuf, /*parse_str*/ NULL }, { 0x0ff1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ff7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x0ffe, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f1, SCSI_ATTR_FLAG_NONE, "Spectra MLM Creation", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f2, SCSI_ATTR_FLAG_NONE, "Spectra MLM C3", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f3, SCSI_ATTR_FLAG_NONE, "Spectra MLM RW", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f4, SCSI_ATTR_FLAG_NONE, "Spectra MLM SDC List", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17f7, SCSI_ATTR_FLAG_NONE, "Spectra MLM Post Scan", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, { 0x17ff, SCSI_ATTR_FLAG_NONE, "Spectra MLM Checksum", /*suffix*/NULL, /*to_str*/ scsi_attrib_hexdump_sbuf, /*parse_str*/ NULL }, }; /* * Print out Volume Coherency Information (Attribute 0x080c). * This field has two variable length members, including one at the * beginning, so it isn't practical to have a fixed structure definition. * This is current as of SSC4r03 (see section 4.2.21.3), dated March 25, * 2013. */ int scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; uint64_t tmp_val; uint8_t *cur_ptr; int retval; int vcr_len, as_len; retval = 0; tmp_val = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * It isn't clear from the spec whether a field length of * 0 is invalid here. It probably is, but be lenient here * to avoid inconveniencing the user. */ goto bailout; } cur_ptr = hdr->attribute; vcr_len = *cur_ptr; cur_ptr++; sbuf_printf(sb, "\n\tVolume Change Reference Value:"); switch (vcr_len) { case 0: if (error_str != NULL) { snprintf(error_str, error_str_len, "Volume Change " "Reference value has length of 0"); } retval = 1; goto bailout; break; /*NOTREACHED*/ case 1: tmp_val = *cur_ptr; break; case 2: tmp_val = scsi_2btoul(cur_ptr); break; case 3: tmp_val = scsi_3btoul(cur_ptr); break; case 4: tmp_val = scsi_4btoul(cur_ptr); break; case 8: tmp_val = scsi_8btou64(cur_ptr); break; default: sbuf_printf(sb, "\n"); sbuf_hexdump(sb, cur_ptr, vcr_len, NULL, 0); break; } if (vcr_len <= 8) sbuf_printf(sb, " 0x%jx\n", (uintmax_t)tmp_val); cur_ptr += vcr_len; tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Count: %ju\n", (uintmax_t)tmp_val); cur_ptr += sizeof(tmp_val); tmp_val = scsi_8btou64(cur_ptr); sbuf_printf(sb, "\tVolume Coherency Set Identifier: 0x%jx\n", (uintmax_t)tmp_val); /* * Figure out how long the Application Client Specific Information * is and produce a hexdump. */ cur_ptr += sizeof(tmp_val); as_len = scsi_2btoul(cur_ptr); cur_ptr += sizeof(uint16_t); sbuf_printf(sb, "\tApplication Client Specific Information: "); if (((as_len == SCSI_LTFS_VER0_LEN) || (as_len == SCSI_LTFS_VER1_LEN)) && (strncmp(cur_ptr, SCSI_LTFS_STR_NAME, SCSI_LTFS_STR_LEN) == 0)) { sbuf_printf(sb, "LTFS\n"); cur_ptr += SCSI_LTFS_STR_LEN + 1; if (cur_ptr[SCSI_LTFS_UUID_LEN] != '\0') cur_ptr[SCSI_LTFS_UUID_LEN] = '\0'; sbuf_printf(sb, "\tLTFS UUID: %s\n", cur_ptr); cur_ptr += SCSI_LTFS_UUID_LEN + 1; /* XXX KDM check the length */ sbuf_printf(sb, "\tLTFS Version: %d\n", *cur_ptr); } else { sbuf_printf(sb, "Unknown\n"); sbuf_hexdump(sb, cur_ptr, as_len, NULL, 0); } bailout: return (retval); } int scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size; struct scsi_attrib_vendser *vendser; cam_strvis_flags strvis_flags; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (field_size > avail_len) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; goto bailout; } else if (field_size == 0) { /* * A field size of 0 doesn't make sense here. The device * can at least give you the vendor ID, even if it can't * give you the serial number. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "The length of " "attribute ID 0x%.4x is 0", scsi_2btoul(hdr->id)); } retval = 1; goto bailout; } vendser = (struct scsi_attrib_vendser *)hdr->attribute; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break;; } cam_strvis_sbuf(sb, vendser->vendor, sizeof(vendser->vendor), strvis_flags); sbuf_putc(sb, ' '); cam_strvis_sbuf(sb, vendser->serial_num, sizeof(vendser->serial_num), strvis_flags); bailout: return (retval); } int scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint32_t field_size; ssize_t avail_len; uint32_t print_len; uint8_t *num_ptr; int retval = 0; field_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); print_len = MIN(avail_len, field_size); num_ptr = hdr->attribute; if (print_len > 0) { sbuf_printf(sb, "\n"); sbuf_hexdump(sb, num_ptr, print_len, NULL, 0); } return (retval); } int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { uint64_t print_number; size_t avail_len; uint32_t number_size; int retval = 0; number_size = scsi_2btoul(hdr->length); avail_len = valid_len - sizeof(*hdr); if (avail_len < number_size) { if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, number_size); } retval = 1; goto bailout; } switch (number_size) { case 0: /* * We don't treat this as an error, since there may be * scenarios where a device reports a field but then gives * a length of 0. See the note in scsi_attrib_ascii_sbuf(). */ goto bailout; break; /*NOTREACHED*/ case 1: print_number = hdr->attribute[0]; break; case 2: print_number = scsi_2btoul(hdr->attribute); break; case 3: print_number = scsi_3btoul(hdr->attribute); break; case 4: print_number = scsi_4btoul(hdr->attribute); break; case 8: print_number = scsi_8btou64(hdr->attribute); break; default: /* * If we wind up here, the number is too big to print * normally, so just do a hexdump. */ retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, flags, output_flags, error_str, error_str_len); goto bailout; break; } if (flags & SCSI_ATTR_FLAG_FP) { #ifndef _KERNEL long double num_float; num_float = (long double)print_number; if (flags & SCSI_ATTR_FLAG_DIV_10) num_float /= 10; sbuf_printf(sb, "%.*Lf", (flags & SCSI_ATTR_FLAG_FP_1DIGIT) ? 1 : 0, num_float); #else /* _KERNEL */ sbuf_printf(sb, "%ju", (flags & SCSI_ATTR_FLAG_DIV_10) ? (print_number / 10) : print_number); #endif /* _KERNEL */ } else if (flags & SCSI_ATTR_FLAG_HEX) { sbuf_printf(sb, "0x%jx", (uintmax_t)print_number); } else sbuf_printf(sb, "%ju", (uintmax_t)print_number); bailout: return (retval); } int scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if (print_size > 0) { cam_strvis_flags strvis_flags; switch (output_flags & SCSI_ATTR_OUTPUT_NONASCII_MASK) { case SCSI_ATTR_OUTPUT_NONASCII_TRIM: strvis_flags = CAM_STRVIS_FLAG_NONASCII_TRIM; break; case SCSI_ATTR_OUTPUT_NONASCII_RAW: strvis_flags = CAM_STRVIS_FLAG_NONASCII_RAW; break; case SCSI_ATTR_OUTPUT_NONASCII_ESC: default: strvis_flags = CAM_STRVIS_FLAG_NONASCII_ESC; break; } cam_strvis_sbuf(sb, hdr->attribute, print_size, strvis_flags); } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. If * the field length is 0, that is allowed by the spec. * e.g. in SPC-4r37, section 7.4.2.2.5, VOLUME IDENTIFIER * "This attribute indicates the current volume identifier * (see SMC-3) of the medium. If the device server supports * this attribute but does not have access to the volume * identifier, the device server shall report this attribute * with an attribute length value of zero." */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } int scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len) { size_t avail_len; uint32_t field_size, print_size; int retval = 0; int esc_text = 1; avail_len = valid_len - sizeof(*hdr); field_size = scsi_2btoul(hdr->length); print_size = MIN(avail_len, field_size); if ((output_flags & SCSI_ATTR_OUTPUT_TEXT_MASK) == SCSI_ATTR_OUTPUT_TEXT_RAW) esc_text = 0; if (print_size > 0) { uint32_t i; for (i = 0; i < print_size; i++) { if (hdr->attribute[i] == '\0') continue; else if (((unsigned char)hdr->attribute[i] < 0x80) || (esc_text == 0)) sbuf_putc(sb, hdr->attribute[i]); else sbuf_printf(sb, "%%%02x", (unsigned char)hdr->attribute[i]); } } else if (avail_len < field_size) { /* * We only report an error if the user didn't allocate * enough space to hold the full value of this field. */ if (error_str != NULL) { snprintf(error_str, error_str_len, "Available " "length of attribute ID 0x%.4x %zu < field " "length %u", scsi_2btoul(hdr->id), avail_len, field_size); } retval = 1; } return (retval); } struct scsi_attrib_table_entry * scsi_find_attrib_entry(struct scsi_attrib_table_entry *table, size_t num_table_entries, uint32_t id) { uint32_t i; for (i = 0; i < num_table_entries; i++) { if (table[i].id == id) return (&table[i]); } return (NULL); } struct scsi_attrib_table_entry * scsi_get_attrib_entry(uint32_t id) { return (scsi_find_attrib_entry(scsi_mam_attr_table, nitems(scsi_mam_attr_table), id)); } int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len, struct scsi_mam_attribute_header *hdr, uint32_t output_flags, char *error_str, size_t error_str_len) { int retval; switch (hdr->byte2 & SMA_FORMAT_MASK) { case SMA_FORMAT_ASCII: retval = scsi_attrib_ascii_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str,error_str_len); break; case SMA_FORMAT_BINARY: if (scsi_2btoul(hdr->length) <= 8) retval = scsi_attrib_int_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); else retval = scsi_attrib_hexdump_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; case SMA_FORMAT_TEXT: retval = scsi_attrib_text_sbuf(sb, hdr, valid_len, SCSI_ATTR_FLAG_NONE, output_flags, error_str, error_str_len); break; default: if (error_str != NULL) { snprintf(error_str, error_str_len, "Unknown attribute " "format 0x%x", hdr->byte2 & SMA_FORMAT_MASK); } retval = 1; goto bailout; break; /*NOTREACHED*/ } sbuf_trim(sb); bailout: return (retval); } void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, const char *desc) { int need_space = 0; uint32_t len; uint32_t id; /* * We can't do anything if we don't have enough valid data for the * header. */ if (valid_len < sizeof(*hdr)) return; id = scsi_2btoul(hdr->id); /* * Note that we print out the value of the attribute listed in the * header, regardless of whether we actually got that many bytes * back from the device through the controller. A truncated result * could be the result of a failure to ask for enough data; the * header indicates how many bytes are allocated for this attribute * in the MAM. */ len = scsi_2btoul(hdr->length); if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_MASK) == SCSI_ATTR_OUTPUT_FIELD_NONE) return; if ((output_flags & SCSI_ATTR_OUTPUT_FIELD_DESC) && (desc != NULL)) { sbuf_printf(sb, "%s", desc); need_space = 1; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_NUM) { sbuf_printf(sb, "%s(0x%.4x)", (need_space) ? " " : "", id); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_SIZE) { sbuf_printf(sb, "%s[%d]", (need_space) ? " " : "", len); need_space = 0; } if (output_flags & SCSI_ATTR_OUTPUT_FIELD_RW) { sbuf_printf(sb, "%s(%s)", (need_space) ? " " : "", (hdr->byte2 & SMA_READ_ONLY) ? "RO" : "RW"); } sbuf_printf(sb, ": "); } int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, struct scsi_attrib_table_entry *user_table, size_t num_user_entries, int prefer_user_table, uint32_t output_flags, char *error_str, int error_str_len) { int retval; struct scsi_attrib_table_entry *table1 = NULL, *table2 = NULL; struct scsi_attrib_table_entry *entry = NULL; size_t table1_size = 0, table2_size = 0; uint32_t id; retval = 0; if (valid_len < sizeof(*hdr)) { retval = 1; goto bailout; } id = scsi_2btoul(hdr->id); if (user_table != NULL) { if (prefer_user_table != 0) { table1 = user_table; table1_size = num_user_entries; table2 = scsi_mam_attr_table; table2_size = nitems(scsi_mam_attr_table); } else { table1 = scsi_mam_attr_table; table1_size = nitems(scsi_mam_attr_table); table2 = user_table; table2_size = num_user_entries; } } else { table1 = scsi_mam_attr_table; table1_size = nitems(scsi_mam_attr_table); } entry = scsi_find_attrib_entry(table1, table1_size, id); if (entry != NULL) { scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); if (entry->to_str == NULL) goto print_default; retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } if (table2 != NULL) { entry = scsi_find_attrib_entry(table2, table2_size, id); if (entry != NULL) { if (entry->to_str == NULL) goto print_default; scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, entry->desc); retval = entry->to_str(sb, hdr, valid_len, entry->flags, output_flags, error_str, error_str_len); goto bailout; } } scsi_attrib_prefix_sbuf(sb, output_flags, hdr, valid_len, NULL); print_default: retval = scsi_attrib_value_sbuf(sb, valid_len, hdr, output_flags, error_str, error_str_len); bailout: if (retval == 0) { if ((entry != NULL) && (entry->suffix != NULL)) sbuf_printf(sb, " %s", entry->suffix); sbuf_trim(sb); sbuf_printf(sb, "\n"); } return (retval); } void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_test_unit_ready *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_test_unit_ready *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = TEST_UNIT_READY; } void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_request_sense *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_request_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REQUEST_SENSE; scsi_cmd->length = dxfer_len; } void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout) { struct scsi_inquiry *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/inq_buf, /*dxfer_len*/inq_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_inquiry *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = INQUIRY; if (evpd) { scsi_cmd->byte2 |= SI_EVPD; scsi_cmd->page_code = page_code; } scsi_ulto2b(inq_len, scsi_cmd->length); } void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { scsi_mode_sense_len(csio, retries, cbfcnp, tag_action, dbd, page_code, page, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_sense_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_6; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = page_code | page; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_sense_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_sense_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SENSE_10; if (dbd != 0) scsi_cmd->byte2 |= SMS_DBD; scsi_cmd->page = page_code | page; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_IN, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { scsi_mode_select_len(csio, retries, cbfcnp, tag_action, scsi_page_fmt, save_pages, param_buf, param_len, 0, sense_len, timeout); } void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; /* * Use the smallest possible command to perform the operation. */ if ((param_len < 256) && (minimum_cmd_size < 10)) { /* * We can fit in a 6 byte cdb. */ struct scsi_mode_select_6 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_6 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_6; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_cmd->length = param_len; cdb_len = sizeof(*scsi_cmd); } else { /* * Need a 10 byte cdb. */ struct scsi_mode_select_10 *scsi_cmd; scsi_cmd = (struct scsi_mode_select_10 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MODE_SELECT_10; if (scsi_page_fmt != 0) scsi_cmd->byte2 |= SMS_PF; if (save_pages != 0) scsi_cmd->byte2 |= SMS_SP; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, CAM_DIR_OUT, tag_action, param_buf, param_len, sense_len, cdb_len, timeout); } void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, u_int8_t page, int save_pages, int ppc, u_int32_t paramptr, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_sense *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_sense *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SENSE; scsi_cmd->page = page_code | page; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (ppc != 0) scsi_cmd->byte2 |= SLS_PPC; scsi_ulto2b(paramptr, scsi_cmd->paramptr); scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, int save_pages, int pc_reset, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_log_select *scsi_cmd; u_int8_t cdb_len; scsi_cmd = (struct scsi_log_select *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = LOG_SELECT; scsi_cmd->page = page_code & SLS_PAGE_CODE; if (save_pages != 0) scsi_cmd->byte2 |= SLS_SP; if (pc_reset != 0) scsi_cmd->byte2 |= SLS_PCR; scsi_ulto2b(param_len, scsi_cmd->length); cdb_len = sizeof(*scsi_cmd); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/param_buf, /*dxfer_len*/param_len, sense_len, cdb_len, timeout); } /* * Prevent or allow the user to remove the media */ void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout) { struct scsi_prevent *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_prevent *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PREVENT_ALLOW; scsi_cmd->how = action; } /* XXX allow specification of address and PMI bit and LBA */ void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *rcap_buf, u_int8_t sense_len, u_int32_t timeout) { struct scsi_read_capacity *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/sizeof(*rcap_buf), sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_CAPACITY; } void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout) { struct scsi_read_capacity_16 *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rcap_buf, /*dxfer_len*/rcap_buf_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SERVICE_ACTION_IN; scsi_cmd->service_action = SRC16_SERVICE_ACTION; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(rcap_buf_len, scsi_cmd->alloc_len); if (pmi) reladr |= SRC16_PMI; if (reladr) reladr |= SRC16_RELADR; } void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t select_report, struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_report_luns *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)rpl_buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_report_luns *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = REPORT_LUNS; scsi_cmd->select_report = select_report; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_TARGET_PORT_GROUPS | pdf; scsi_ulto4b(alloc_len, scsi_cmd->length); } void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_target_group *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/(u_int8_t *)buf, /*dxfer_len*/alloc_len, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_target_group *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_OUT; scsi_cmd->service_action = SET_TARGET_PORT_GROUPS; scsi_ulto4b(alloc_len, scsi_cmd->length); } /* * Syncronize the media to the contents of the cache for * the given lba/count pair. Specifying 0/0 means sync * the whole cache. */ void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sync_cache *scsi_cmd; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); scsi_cmd = (struct scsi_sync_cache *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SYNCHRONIZE_CACHE; scsi_ulto4b(begin_lba, scsi_cmd->begin_lba); scsi_ulto2b(lb_count, scsi_cmd->lb_count); } void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { int read; u_int8_t cdb_len; read = (readop & SCSI_RW_DIRMASK) == SCSI_RW_READ; /* * Use the smallest possible command to perform the operation * as some legacy hardware does not support the 10 byte commands. * If any of the bits in byte2 is set, we have to go with a larger * command. */ if ((minimum_cmd_size < 10) && ((lba & 0x1fffff) == lba) && ((block_count & 0xff) == block_count) && (byte2 == 0)) { /* * We can fit in a 6 byte cdb. */ struct scsi_rw_6 *scsi_cmd; scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_6 : WRITE_6; scsi_ulto3b(lba, scsi_cmd->addr); scsi_cmd->length = block_count & 0xff; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->length, dxfer_len)); } else if ((minimum_cmd_size < 12) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_rw_10 *scsi_cmd; scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_10 : WRITE_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else if ((minimum_cmd_size < 16) && ((block_count & 0xffffffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * The block count is too big for a 10 byte CDB, use a 12 * byte CDB. */ struct scsi_rw_12 *scsi_cmd; scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_12 : WRITE_12; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_rw_16 *scsi_cmd; scsi_cmd = (struct scsi_rw_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = read ? READ_16 : WRITE_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_cmd->reserved = 0; scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); } cam_fill_csio(csio, retries, cbfcnp, (read ? CAM_DIR_IN : CAM_DIR_OUT) | ((readop & SCSI_RW_BIO) != 0 ? CAM_DATA_BIO : 0), tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { u_int8_t cdb_len; if ((minimum_cmd_size < 16) && ((block_count & 0xffff) == block_count) && ((lba & 0xffffffff) == lba)) { /* * Need a 10 byte cdb. */ struct scsi_write_same_10 *scsi_cmd; scsi_cmd = (struct scsi_write_same_10 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_10; scsi_cmd->byte2 = byte2; scsi_ulto4b(lba, scsi_cmd->addr); scsi_cmd->group = 0; scsi_ulto2b(block_count, scsi_cmd->length); scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->length[0], scsi_cmd->length[1], dxfer_len)); } else { /* * 16 byte CDB. We'll only get here if the LBA is larger * than 2^32, or if the user asks for a 16 byte command. */ struct scsi_write_same_16 *scsi_cmd; scsi_cmd = (struct scsi_write_same_16 *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = WRITE_SAME_16; scsi_cmd->byte2 = byte2; scsi_u64to8b(lba, scsi_cmd->addr); scsi_ulto4b(block_count, scsi_cmd->length); scsi_cmd->group = 0; scsi_cmd->control = 0; cdb_len = sizeof(*scsi_cmd); CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, ("16byte: %x%x%x%x%x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], scsi_cmd->addr[1], scsi_cmd->addr[2], scsi_cmd->addr[3], scsi_cmd->addr[4], scsi_cmd->addr[5], scsi_cmd->addr[6], scsi_cmd->addr[7], scsi_cmd->length[0], scsi_cmd->length[1], scsi_cmd->length[2], scsi_cmd->length[3], dxfer_len)); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { - scsi_ata_pass_16(csio, - retries, - cbfcnp, - /*flags*/CAM_DIR_IN, - tag_action, - /*protocol*/AP_PROTO_PIO_IN, - /*ata_flags*/AP_FLAG_TDIR_FROM_DEV| - AP_FLAG_BYT_BLOK_BYTES|AP_FLAG_TLEN_SECT_CNT, - /*features*/0, - /*sector_count*/dxfer_len, - /*lba*/0, - /*command*/ATA_ATA_IDENTIFY, - /*control*/0, - data_ptr, - dxfer_len, - sense_len, - timeout); + scsi_ata_pass(csio, + retries, + cbfcnp, + /*flags*/CAM_DIR_IN, + tag_action, + /*protocol*/AP_PROTO_PIO_IN, + /*ata_flags*/AP_FLAG_TDIR_FROM_DEV | + AP_FLAG_BYT_BLOK_BYTES | + AP_FLAG_TLEN_SECT_CNT, + /*features*/0, + /*sector_count*/dxfer_len, + /*lba*/0, + /*command*/ATA_ATA_IDENTIFY, + /*device*/ 0, + /*icc*/ 0, + /*auxiliary*/ 0, + /*control*/0, + data_ptr, + dxfer_len, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*minimum_cmd_size*/ 0, + sense_len, + timeout); } void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { scsi_ata_pass_16(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*protocol*/AP_EXTEND|AP_PROTO_DMA, /*ata_flags*/AP_FLAG_TLEN_SECT_CNT|AP_FLAG_BYT_BLOK_BLOCKS, /*features*/ATA_DSM_TRIM, /*sector_count*/block_count, /*lba*/0, /*command*/ATA_DATA_SET_MANAGEMENT, /*control*/0, data_ptr, dxfer_len, sense_len, timeout); +} + +int +scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint32_t log_address, + uint32_t page_number, uint16_t block_count, + uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t sense_len, uint32_t timeout) +{ + uint8_t command, protocol_out; + uint16_t count_out; + uint64_t lba; + int retval; + + retval = 0; + + switch (protocol) { + case AP_PROTO_DMA: + count_out = block_count; + command = ATA_READ_LOG_DMA_EXT; + protocol_out = AP_PROTO_DMA; + break; + case AP_PROTO_PIO_IN: + default: + count_out = block_count; + command = ATA_READ_LOG_EXT; + protocol_out = AP_PROTO_PIO_IN; + break; + } + + lba = (((uint64_t)page_number & 0xff00) << 32) | + ((page_number & 0x00ff) << 8) | + (log_address & 0xff); + + protocol_out |= AP_EXTEND; + + retval = scsi_ata_pass(csio, + retries, + cbfcnp, + /*flags*/CAM_DIR_IN, + tag_action, + /*protocol*/ protocol_out, + /*ata_flags*/AP_FLAG_TLEN_SECT_CNT | + AP_FLAG_BYT_BLOK_BLOCKS | + AP_FLAG_TDIR_FROM_DEV, + /*feature*/ 0, + /*sector_count*/ count_out, + /*lba*/ lba, + /*command*/ command, + /*device*/ 0, + /*icc*/ 0, + /*auxiliary*/ 0, + /*control*/0, + data_ptr, + dxfer_len, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*minimum_cmd_size*/ 0, + sense_len, + timeout); + + return (retval); +} + +/* + * Note! This is an unusual CDB building function because it can return + * an error in the event that the command in question requires a variable + * length CDB, but the caller has not given storage space for one or has not + * given enough storage space. If there is enough space available in the + * standard SCSI CCB CDB bytes, we'll prefer that over passed in storage. + */ +int +scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint32_t flags, uint8_t tag_action, + uint8_t protocol, uint8_t ata_flags, uint16_t features, + uint16_t sector_count, uint64_t lba, uint8_t command, + uint8_t device, uint8_t icc, uint32_t auxiliary, + uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len, + uint8_t *cdb_storage, size_t cdb_storage_len, + int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout) +{ + uint32_t cam_flags; + uint8_t *cdb_ptr; + int cmd_size; + int retval; + uint8_t cdb_len; + + retval = 0; + cam_flags = flags; + + /* + * Round the user's request to the nearest command size that is at + * least as big as what he requested. + */ + if (minimum_cmd_size <= 12) + cmd_size = 12; + else if (minimum_cmd_size > 16) + cmd_size = 32; + else + cmd_size = 16; + + /* + * If we have parameters that require a 48-bit ATA command, we have to + * use the 16 byte ATA PASS-THROUGH command at least. + */ + if (((lba > ATA_MAX_28BIT_LBA) + || (sector_count > 255) + || (features > 255) + || (protocol & AP_EXTEND)) + && ((cmd_size < 16) + || ((protocol & AP_EXTEND) == 0))) { + if (cmd_size < 16) + cmd_size = 16; + protocol |= AP_EXTEND; + } + + /* + * The icc and auxiliary ATA registers are only supported in the + * 32-byte version of the ATA PASS-THROUGH command. + */ + if ((icc != 0) + || (auxiliary != 0)) { + cmd_size = 32; + protocol |= AP_EXTEND; + } + + + if ((cmd_size > sizeof(csio->cdb_io.cdb_bytes)) + && ((cdb_storage == NULL) + || (cdb_storage_len < cmd_size))) { + retval = 1; + goto bailout; + } + + /* + * At this point we know we have enough space to store the command + * in one place or another. We prefer the built-in array, but used + * the passed in storage if necessary. + */ + if (cmd_size <= sizeof(csio->cdb_io.cdb_bytes)) + cdb_ptr = csio->cdb_io.cdb_bytes; + else { + cdb_ptr = cdb_storage; + cam_flags |= CAM_CDB_POINTER; + } + + if (cmd_size <= 12) { + struct ata_pass_12 *cdb; + + cdb = (struct ata_pass_12 *)cdb_ptr; + cdb_len = sizeof(*cdb); + bzero(cdb, cdb_len); + + cdb->opcode = ATA_PASS_12; + cdb->protocol = protocol; + cdb->flags = ata_flags; + cdb->features = features; + cdb->sector_count = sector_count; + cdb->lba_low = lba & 0xff; + cdb->lba_mid = (lba >> 8) & 0xff; + cdb->lba_high = (lba >> 16) & 0xff; + cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; + cdb->command = command; + cdb->control = control; + } else if (cmd_size <= 16) { + struct ata_pass_16 *cdb; + + cdb = (struct ata_pass_16 *)cdb_ptr; + cdb_len = sizeof(*cdb); + bzero(cdb, cdb_len); + + cdb->opcode = ATA_PASS_16; + cdb->protocol = protocol; + cdb->flags = ata_flags; + cdb->features = features & 0xff; + cdb->sector_count = sector_count & 0xff; + cdb->lba_low = lba & 0xff; + cdb->lba_mid = (lba >> 8) & 0xff; + cdb->lba_high = (lba >> 16) & 0xff; + /* + * If AP_EXTEND is set, we're sending a 48-bit command. + * Otherwise it's a 28-bit command. + */ + if (protocol & AP_EXTEND) { + cdb->lba_low_ext = (lba >> 24) & 0xff; + cdb->lba_mid_ext = (lba >> 32) & 0xff; + cdb->lba_high_ext = (lba >> 40) & 0xff; + cdb->features_ext = (features >> 8) & 0xff; + cdb->sector_count_ext = (sector_count >> 8) & 0xff; + cdb->device = device | ATA_DEV_LBA; + } else { + cdb->lba_low_ext = (lba >> 24) & 0xf; + cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; + } + cdb->command = command; + cdb->control = control; + } else { + struct ata_pass_32 *cdb; + uint8_t tmp_lba[8]; + + cdb = (struct ata_pass_32 *)cdb_ptr; + cdb_len = sizeof(*cdb); + bzero(cdb, cdb_len); + cdb->opcode = VARIABLE_LEN_CDB; + cdb->control = control; + cdb->length = sizeof(*cdb) - __offsetof(struct ata_pass_32, + service_action); + scsi_ulto2b(ATA_PASS_32_SA, cdb->service_action); + cdb->protocol = protocol; + cdb->flags = ata_flags; + + if ((protocol & AP_EXTEND) == 0) { + lba &= 0x0fffffff; + cdb->device = ((lba >> 24) & 0xf) | ATA_DEV_LBA; + features &= 0xff; + sector_count &= 0xff; + } else { + cdb->device = device | ATA_DEV_LBA; + } + scsi_u64to8b(lba, tmp_lba); + bcopy(&tmp_lba[2], cdb->lba, sizeof(cdb->lba)); + scsi_ulto2b(features, cdb->features); + scsi_ulto2b(sector_count, cdb->count); + cdb->command = command; + cdb->icc = icc; + scsi_ulto4b(auxiliary, cdb->auxiliary); + } + + cam_fill_csio(csio, + retries, + cbfcnp, + cam_flags, + tag_action, + data_ptr, + dxfer_len, + sense_len, + cmd_size, + timeout); +bailout: + return (retval); } void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t protocol, u_int8_t ata_flags, u_int16_t features, u_int16_t sector_count, uint64_t lba, u_int8_t command, u_int8_t control, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct ata_pass_16 *ata_cmd; ata_cmd = (struct ata_pass_16 *)&csio->cdb_io.cdb_bytes; ata_cmd->opcode = ATA_PASS_16; ata_cmd->protocol = protocol; ata_cmd->flags = ata_flags; ata_cmd->features_ext = features >> 8; ata_cmd->features = features; ata_cmd->sector_count_ext = sector_count >> 8; ata_cmd->sector_count = sector_count; ata_cmd->lba_low = lba; ata_cmd->lba_mid = lba >> 8; ata_cmd->lba_high = lba >> 16; ata_cmd->device = ATA_DEV_LBA; if (protocol & AP_EXTEND) { ata_cmd->lba_low_ext = lba >> 24; ata_cmd->lba_mid_ext = lba >> 32; ata_cmd->lba_high_ext = lba >> 40; } else ata_cmd->device |= (lba >> 24) & 0x0f; ata_cmd->command = command; ata_cmd->control = control; cam_fill_csio(csio, retries, cbfcnp, flags, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*ata_cmd), timeout); } void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_unmap *scsi_cmd; scsi_cmd = (struct scsi_unmap *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = UNMAP; scsi_cmd->byte2 = byte2; scsi_ulto4b(0, scsi_cmd->reserved); scsi_cmd->group = 0; scsi_ulto2b(dxfer_len, scsi_cmd->length); scsi_cmd->control = 0; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_receive_diag *scsi_cmd; scsi_cmd = (struct scsi_receive_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = RECEIVE_DIAGNOSTIC; if (pcv) { scsi_cmd->byte2 |= SRD_PCV; scsi_cmd->page_code = page_code; } scsi_ulto2b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int unit_offline, int device_offline, int self_test, int page_format, int self_test_code, uint8_t *data_ptr, uint16_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_send_diag *scsi_cmd; scsi_cmd = (struct scsi_send_diag *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = SEND_DIAGNOSTIC; /* * The default self-test mode control and specific test * control are mutually exclusive. */ if (self_test) self_test_code = SSD_SELF_TEST_CODE_NONE; scsi_cmd->byte2 = ((self_test_code << SSD_SELF_TEST_CODE_SHIFT) & SSD_SELF_TEST_CODE_MASK) | (unit_offline ? SSD_UNITOFFL : 0) | (device_offline ? SSD_DEVOFFL : 0) | (self_test ? SSD_SELFTEST : 0) | (page_format ? SSD_PF : 0); scsi_ulto2b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t allocation_length, uint8_t sense_len, uint32_t timeout) { struct scsi_read_buffer *scsi_cmd; scsi_cmd = (struct scsi_read_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(allocation_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, allocation_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t param_list_length, uint8_t sense_len, uint32_t timeout) { struct scsi_write_buffer *scsi_cmd; scsi_cmd = (struct scsi_write_buffer *)&csio->cdb_io.cdb_bytes; memset(scsi_cmd, 0, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_BUFFER; scsi_cmd->byte2 = mode; scsi_cmd->buffer_id = buffer_id; scsi_ulto3b(offset, scsi_cmd->offset); scsi_ulto3b(param_list_length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/param_list_length ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, param_list_length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout) { struct scsi_start_stop_unit *scsi_cmd; int extra_flags = 0; scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = START_STOP_UNIT; if (start != 0) { scsi_cmd->how |= SSS_START; /* it takes a lot of power to start a drive */ extra_flags |= CAM_HIGH_POWER; } if (load_eject != 0) scsi_cmd->how |= SSS_LOEJ; if (immediate != 0) scsi_cmd->byte2 |= SSS_IMMED; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_NONE | extra_flags, tag_action, /*data_ptr*/NULL, /*dxfer_len*/0, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t service_action, uint32_t element, u_int8_t elem_type, int logical_volume, int partition, u_int32_t first_attribute, int cache, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_read_attribute *scsi_cmd; scsi_cmd = (struct scsi_read_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = READ_ATTRIBUTE; scsi_cmd->service_action = service_action, scsi_ulto2b(element, scsi_cmd->element); scsi_cmd->elem_type = elem_type; scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto2b(first_attribute, scsi_cmd->first_attribute); scsi_ulto4b(length, scsi_cmd->length); if (cache != 0) scsi_cmd->cache |= SRA_CACHE; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, uint32_t element, int logical_volume, int partition, int wtc, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout) { struct scsi_write_attribute *scsi_cmd; scsi_cmd = (struct scsi_write_attribute *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = WRITE_ATTRIBUTE; if (wtc != 0) scsi_cmd->byte2 = SWA_WTC; scsi_ulto3b(element, scsi_cmd->element); scsi_cmd->logical_volume = logical_volume; scsi_cmd->partition = partition; scsi_ulto4b(length, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/length, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_in *scsi_cmd; scsi_cmd = (struct scsi_per_res_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_IN; scsi_cmd->action = service_action; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, int scope, int res_type, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_per_res_out *scsi_cmd; scsi_cmd = (struct scsi_per_res_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = PERSISTENT_RES_OUT; scsi_cmd->action = service_action; scsi_cmd->scope_type = scope | res_type; cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, /*data_ptr*/data_ptr, /*dxfer_len*/dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_in *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_in *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_IN; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_security_protocol_out *scsi_cmd; scsi_cmd = (struct scsi_security_protocol_out *)&csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = SECURITY_PROTOCOL_OUT; scsi_cmd->security_protocol = security_protocol; scsi_ulto2b(security_protocol_specific, scsi_cmd->security_protocol_specific); scsi_cmd->byte4 = byte4; scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_OUT, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int options, int req_opcode, int req_service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout) { struct scsi_report_supported_opcodes *scsi_cmd; scsi_cmd = (struct scsi_report_supported_opcodes *) &csio->cdb_io.cdb_bytes; bzero(scsi_cmd, sizeof(*scsi_cmd)); scsi_cmd->opcode = MAINTENANCE_IN; scsi_cmd->service_action = REPORT_SUPPORTED_OPERATION_CODES; scsi_cmd->options = options; scsi_cmd->requested_opcode = req_opcode; scsi_ulto2b(req_service_action, scsi_cmd->requested_service_action); scsi_ulto4b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /* * Try make as good a match as possible with * available sub drivers */ int scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry) { struct scsi_static_inquiry_pattern *entry; struct scsi_inquiry_data *inq; entry = (struct scsi_static_inquiry_pattern *)table_entry; inq = (struct scsi_inquiry_data *)inqbuffer; if (((SID_TYPE(inq) == entry->type) || (entry->type == T_ANY)) && (SID_IS_REMOVABLE(inq) ? entry->media_type & SIP_MEDIA_REMOVABLE : entry->media_type & SIP_MEDIA_FIXED) && (cam_strmatch(inq->vendor, entry->vendor, sizeof(inq->vendor)) == 0) && (cam_strmatch(inq->product, entry->product, sizeof(inq->product)) == 0) && (cam_strmatch(inq->revision, entry->revision, sizeof(inq->revision)) == 0)) { return (0); } return (-1); } /** * Compare two buffers of vpd device descriptors for a match. * * \param lhs Pointer to first buffer of descriptors to compare. * \param lhs_len The length of the first buffer. * \param rhs Pointer to second buffer of descriptors to compare. * \param rhs_len The length of the second buffer. * * \return 0 on a match, -1 otherwise. * * Treat rhs and lhs as arrays of vpd device id descriptors. Walk lhs matching * against each element in rhs until all data are exhausted or we have found * a match. */ int scsi_devid_match(uint8_t *lhs, size_t lhs_len, uint8_t *rhs, size_t rhs_len) { struct scsi_vpd_id_descriptor *lhs_id; struct scsi_vpd_id_descriptor *lhs_last; struct scsi_vpd_id_descriptor *rhs_last; uint8_t *lhs_end; uint8_t *rhs_end; lhs_end = lhs + lhs_len; rhs_end = rhs + rhs_len; /* * rhs_last and lhs_last are the last posible position of a valid * descriptor assuming it had a zero length identifier. We use * these variables to insure we can safely dereference the length * field in our loop termination tests. */ lhs_last = (struct scsi_vpd_id_descriptor *) (lhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); rhs_last = (struct scsi_vpd_id_descriptor *) (rhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier)); lhs_id = (struct scsi_vpd_id_descriptor *)lhs; while (lhs_id <= lhs_last && (lhs_id->identifier + lhs_id->length) <= lhs_end) { struct scsi_vpd_id_descriptor *rhs_id; rhs_id = (struct scsi_vpd_id_descriptor *)rhs; while (rhs_id <= rhs_last && (rhs_id->identifier + rhs_id->length) <= rhs_end) { if ((rhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) == (lhs_id->id_type & (SVPD_ID_ASSOC_MASK | SVPD_ID_TYPE_MASK)) && rhs_id->length == lhs_id->length && memcmp(rhs_id->identifier, lhs_id->identifier, rhs_id->length) == 0) return (0); rhs_id = (struct scsi_vpd_id_descriptor *) (rhs_id->identifier + rhs_id->length); } lhs_id = (struct scsi_vpd_id_descriptor *) (lhs_id->identifier + lhs_id->length); } return (-1); } #ifdef _KERNEL int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id) { struct cam_ed *device; struct scsi_vpd_supported_pages *vpds; int i, num_pages; device = periph->path->device; vpds = (struct scsi_vpd_supported_pages *)device->supported_vpds; if (vpds != NULL) { num_pages = device->supported_vpds_len - SVPD_SUPPORTED_PAGES_HDR_LEN; for (i = 0; i < num_pages; i++) { if (vpds->page_list[i] == page_id) return (1); } } return (0); } static void init_scsi_delay(void) { int delay; delay = SCSI_DELAY; TUNABLE_INT_FETCH("kern.cam.scsi_delay", &delay); if (set_scsi_delay(delay) != 0) { printf("cam: invalid value for tunable kern.cam.scsi_delay\n"); set_scsi_delay(SCSI_DELAY); } } SYSINIT(scsi_delay, SI_SUB_TUNABLES, SI_ORDER_ANY, init_scsi_delay, NULL); static int sysctl_scsi_delay(SYSCTL_HANDLER_ARGS) { int error, delay; delay = scsi_delay; error = sysctl_handle_int(oidp, &delay, 0, req); if (error != 0 || req->newptr == NULL) return (error); return (set_scsi_delay(delay)); } SYSCTL_PROC(_kern_cam, OID_AUTO, scsi_delay, CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_scsi_delay, "I", "Delay to allow devices to settle after a SCSI bus reset (ms)"); static int set_scsi_delay(int delay) { /* * If someone sets this to 0, we assume that they want the * minimum allowable bus settle delay. */ if (delay == 0) { printf("cam: using minimum scsi_delay (%dms)\n", SCSI_MIN_DELAY); delay = SCSI_MIN_DELAY; } if (delay < SCSI_MIN_DELAY) return (EINVAL); scsi_delay = delay; return (0); } #endif /* _KERNEL */ Index: head/sys/cam/scsi/scsi_all.h =================================================================== --- head/sys/cam/scsi/scsi_all.h (revision 300206) +++ head/sys/cam/scsi/scsi_all.h (revision 300207) @@ -1,4190 +1,4284 @@ /*- * Largely written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * $FreeBSD$ */ /* * SCSI general interface description */ #ifndef _SCSI_SCSI_ALL_H #define _SCSI_SCSI_ALL_H 1 #include #include #ifdef _KERNEL /* * This is the number of seconds we wait for devices to settle after a SCSI * bus reset. */ extern int scsi_delay; #endif /* _KERNEL */ /* * SCSI command format */ /* * Define dome bits that are in ALL (or a lot of) scsi commands */ #define SCSI_CTL_LINK 0x01 #define SCSI_CTL_FLAG 0x02 #define SCSI_CTL_VENDOR 0xC0 #define SCSI_CMD_LUN 0xA0 /* these two should not be needed */ #define SCSI_CMD_LUN_SHIFT 5 /* LUN in the cmd is no longer SCSI */ #define SCSI_MAX_CDBLEN 16 /* * 16 byte commands are in the * SCSI-3 spec */ #if defined(CAM_MAX_CDBLEN) && (CAM_MAX_CDBLEN < SCSI_MAX_CDBLEN) #error "CAM_MAX_CDBLEN cannot be less than SCSI_MAX_CDBLEN" #endif /* 6byte CDBs special case 0 length to be 256 */ #define SCSI_CDB6_LEN(len) ((len) == 0 ? 256 : len) /* * This type defines actions to be taken when a particular sense code is * received. Right now, these flags are only defined to take up 16 bits, * but can be expanded in the future if necessary. */ typedef enum { SS_NOP = 0x000000, /* Do nothing */ SS_RETRY = 0x010000, /* Retry the command */ SS_FAIL = 0x020000, /* Bail out */ SS_START = 0x030000, /* Send a Start Unit command to the device, * then retry the original command. */ SS_TUR = 0x040000, /* Send a Test Unit Ready command to the * device, then retry the original command. */ SS_MASK = 0xff0000 } scsi_sense_action; typedef enum { SSQ_NONE = 0x0000, SSQ_DECREMENT_COUNT = 0x0100, /* Decrement the retry count */ SSQ_MANY = 0x0200, /* send lots of recovery commands */ SSQ_RANGE = 0x0400, /* * This table entry represents the * end of a range of ASCQs that * have identical error actions * and text. */ SSQ_PRINT_SENSE = 0x0800, SSQ_UA = 0x1000, /* Broadcast UA. */ SSQ_RESCAN = 0x2000, /* Rescan target for LUNs. */ SSQ_LOST = 0x4000, /* Destroy the LUNs. */ SSQ_MASK = 0xff00 } scsi_sense_action_qualifier; /* Mask for error status values */ #define SS_ERRMASK 0xff /* The default, retyable, error action */ #define SS_RDEF SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE|EIO /* The retyable, error action, with table specified error code */ #define SS_RET SS_RETRY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE /* Wait for transient error status to change */ #define SS_WAIT SS_TUR|SSQ_MANY|SSQ_DECREMENT_COUNT|SSQ_PRINT_SENSE /* Fatal error action, with table specified error code */ #define SS_FATAL SS_FAIL|SSQ_PRINT_SENSE struct scsi_generic { u_int8_t opcode; u_int8_t bytes[11]; }; struct scsi_request_sense { u_int8_t opcode; u_int8_t byte2; #define SRS_DESC 0x01 u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_test_unit_ready { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[3]; u_int8_t control; }; struct scsi_receive_diag { uint8_t opcode; uint8_t byte2; #define SRD_PCV 0x01 uint8_t page_code; uint8_t length[2]; uint8_t control; }; struct scsi_send_diag { uint8_t opcode; uint8_t byte2; #define SSD_UNITOFFL 0x01 #define SSD_DEVOFFL 0x02 #define SSD_SELFTEST 0x04 #define SSD_PF 0x10 #define SSD_SELF_TEST_CODE_MASK 0xE0 #define SSD_SELF_TEST_CODE_SHIFT 5 #define SSD_SELF_TEST_CODE_NONE 0x00 #define SSD_SELF_TEST_CODE_BG_SHORT 0x01 #define SSD_SELF_TEST_CODE_BG_EXTENDED 0x02 #define SSD_SELF_TEST_CODE_BG_ABORT 0x04 #define SSD_SELF_TEST_CODE_FG_SHORT 0x05 #define SSD_SELF_TEST_CODE_FG_EXTENDED 0x06 uint8_t reserved; uint8_t length[2]; uint8_t control; }; struct scsi_sense { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_inquiry { u_int8_t opcode; u_int8_t byte2; #define SI_EVPD 0x01 #define SI_CMDDT 0x02 u_int8_t page_code; u_int8_t length[2]; u_int8_t control; }; struct scsi_mode_sense_6 { u_int8_t opcode; u_int8_t byte2; #define SMS_DBD 0x08 u_int8_t page; #define SMS_PAGE_CODE 0x3F #define SMS_VENDOR_SPECIFIC_PAGE 0x00 #define SMS_DISCONNECT_RECONNECT_PAGE 0x02 #define SMS_FORMAT_DEVICE_PAGE 0x03 #define SMS_GEOMETRY_PAGE 0x04 #define SMS_CACHE_PAGE 0x08 #define SMS_PERIPHERAL_DEVICE_PAGE 0x09 #define SMS_CONTROL_MODE_PAGE 0x0A #define SMS_PROTO_SPECIFIC_PAGE 0x19 #define SMS_INFO_EXCEPTIONS_PAGE 0x1C #define SMS_ALL_PAGES_PAGE 0x3F #define SMS_PAGE_CTRL_MASK 0xC0 #define SMS_PAGE_CTRL_CURRENT 0x00 #define SMS_PAGE_CTRL_CHANGEABLE 0x40 #define SMS_PAGE_CTRL_DEFAULT 0x80 #define SMS_PAGE_CTRL_SAVED 0xC0 u_int8_t subpage; #define SMS_SUBPAGE_PAGE_0 0x00 #define SMS_SUBPAGE_ALL 0xff u_int8_t length; u_int8_t control; }; struct scsi_mode_sense_10 { u_int8_t opcode; u_int8_t byte2; /* same bits as small version */ #define SMS10_LLBAA 0x10 u_int8_t page; /* same bits as small version */ u_int8_t subpage; u_int8_t unused[3]; u_int8_t length[2]; u_int8_t control; }; struct scsi_mode_select_6 { u_int8_t opcode; u_int8_t byte2; #define SMS_SP 0x01 #define SMS_PF 0x10 u_int8_t unused[2]; u_int8_t length; u_int8_t control; }; struct scsi_mode_select_10 { u_int8_t opcode; u_int8_t byte2; /* same bits as small version */ u_int8_t unused[5]; u_int8_t length[2]; u_int8_t control; }; /* * When sending a mode select to a tape drive, the medium type must be 0. */ struct scsi_mode_hdr_6 { u_int8_t datalen; u_int8_t medium_type; u_int8_t dev_specific; u_int8_t block_descr_len; }; struct scsi_mode_hdr_10 { u_int8_t datalen[2]; u_int8_t medium_type; u_int8_t dev_specific; u_int8_t reserved[2]; u_int8_t block_descr_len[2]; }; struct scsi_mode_block_descr { u_int8_t density_code; u_int8_t num_blocks[3]; u_int8_t reserved; u_int8_t block_len[3]; }; struct scsi_per_res_in { u_int8_t opcode; u_int8_t action; #define SPRI_RK 0x00 #define SPRI_RR 0x01 #define SPRI_RC 0x02 #define SPRI_RS 0x03 u_int8_t reserved[5]; u_int8_t length[2]; #define SPRI_MAX_LEN 0xffff u_int8_t control; }; struct scsi_per_res_in_header { u_int8_t generation[4]; u_int8_t length[4]; }; struct scsi_per_res_key { u_int8_t key[8]; }; struct scsi_per_res_in_keys { struct scsi_per_res_in_header header; struct scsi_per_res_key keys[0]; }; struct scsi_per_res_cap { uint8_t length[2]; uint8_t flags1; #define SPRI_RLR_C 0x80 #define SPRI_CRH 0x10 #define SPRI_SIP_C 0x08 #define SPRI_ATP_C 0x04 #define SPRI_PTPL_C 0x01 uint8_t flags2; #define SPRI_TMV 0x80 #define SPRI_ALLOW_CMD_MASK 0x70 #define SPRI_ALLOW_CMD_SHIFT 4 #define SPRI_ALLOW_NA 0x00 #define SPRI_ALLOW_1 0x10 #define SPRI_ALLOW_2 0x20 #define SPRI_ALLOW_3 0x30 #define SPRI_ALLOW_4 0x40 #define SPRI_ALLOW_5 0x50 #define SPRI_PTPL_A 0x01 uint8_t type_mask[2]; #define SPRI_TM_WR_EX_AR 0x8000 #define SPRI_TM_EX_AC_RO 0x4000 #define SPRI_TM_WR_EX_RO 0x2000 #define SPRI_TM_EX_AC 0x0800 #define SPRI_TM_WR_EX 0x0200 #define SPRI_TM_EX_AC_AR 0x0001 uint8_t reserved[2]; }; struct scsi_per_res_in_rsrv_data { uint8_t reservation[8]; uint8_t scope_addr[4]; uint8_t reserved; uint8_t scopetype; #define SPRT_WE 0x01 #define SPRT_EA 0x03 #define SPRT_WERO 0x05 #define SPRT_EARO 0x06 #define SPRT_WEAR 0x07 #define SPRT_EAAR 0x08 uint8_t extent_length[2]; }; struct scsi_per_res_in_rsrv { struct scsi_per_res_in_header header; struct scsi_per_res_in_rsrv_data data; }; struct scsi_per_res_in_full_desc { struct scsi_per_res_key res_key; uint8_t reserved1[4]; uint8_t flags; #define SPRI_FULL_ALL_TG_PT 0x02 #define SPRI_FULL_R_HOLDER 0x01 uint8_t scopetype; uint8_t reserved2[4]; uint8_t rel_trgt_port_id[2]; uint8_t additional_length[4]; uint8_t transport_id[]; }; struct scsi_per_res_in_full { struct scsi_per_res_in_header header; struct scsi_per_res_in_full_desc desc[]; }; struct scsi_per_res_out { u_int8_t opcode; u_int8_t action; #define SPRO_REGISTER 0x00 #define SPRO_RESERVE 0x01 #define SPRO_RELEASE 0x02 #define SPRO_CLEAR 0x03 #define SPRO_PREEMPT 0x04 #define SPRO_PRE_ABO 0x05 #define SPRO_REG_IGNO 0x06 #define SPRO_REG_MOVE 0x07 #define SPRO_REPL_LOST_RES 0x08 #define SPRO_ACTION_MASK 0x1f u_int8_t scope_type; #define SPR_SCOPE_MASK 0xf0 #define SPR_SCOPE_SHIFT 4 #define SPR_LU_SCOPE 0x00 #define SPR_EXTENT_SCOPE 0x10 #define SPR_ELEMENT_SCOPE 0x20 #define SPR_TYPE_MASK 0x0f #define SPR_TYPE_RD_SHARED 0x00 #define SPR_TYPE_WR_EX 0x01 #define SPR_TYPE_RD_EX 0x02 #define SPR_TYPE_EX_AC 0x03 #define SPR_TYPE_SHARED 0x04 #define SPR_TYPE_WR_EX_RO 0x05 #define SPR_TYPE_EX_AC_RO 0x06 #define SPR_TYPE_WR_EX_AR 0x07 #define SPR_TYPE_EX_AC_AR 0x08 u_int8_t reserved[2]; u_int8_t length[4]; u_int8_t control; }; struct scsi_per_res_out_parms { struct scsi_per_res_key res_key; u_int8_t serv_act_res_key[8]; u_int8_t scope_spec_address[4]; u_int8_t flags; #define SPR_SPEC_I_PT 0x08 #define SPR_ALL_TG_PT 0x04 #define SPR_APTPL 0x01 u_int8_t reserved1; u_int8_t extent_length[2]; u_int8_t transport_id_list[]; }; struct scsi_per_res_out_trans_ids { u_int8_t additional_length[4]; u_int8_t transport_ids[]; }; /* * Used with REGISTER AND MOVE serivce action of the PERSISTENT RESERVE OUT * command. */ struct scsi_per_res_reg_move { struct scsi_per_res_key res_key; u_int8_t serv_act_res_key[8]; u_int8_t reserved; u_int8_t flags; #define SPR_REG_MOVE_UNREG 0x02 #define SPR_REG_MOVE_APTPL 0x01 u_int8_t rel_trgt_port_id[2]; u_int8_t transport_id_length[4]; u_int8_t transport_id[]; }; struct scsi_transportid_header { uint8_t format_protocol; #define SCSI_TRN_FORMAT_MASK 0xc0 #define SCSI_TRN_FORMAT_SHIFT 6 #define SCSI_TRN_PROTO_MASK 0x0f }; struct scsi_transportid_fcp { uint8_t format_protocol; #define SCSI_TRN_FCP_FORMAT_DEFAULT 0x00 uint8_t reserved1[7]; uint8_t n_port_name[8]; uint8_t reserved2[8]; }; struct scsi_transportid_spi { uint8_t format_protocol; #define SCSI_TRN_SPI_FORMAT_DEFAULT 0x00 uint8_t reserved1; uint8_t scsi_addr[2]; uint8_t obsolete[2]; uint8_t rel_trgt_port_id[2]; uint8_t reserved2[16]; }; struct scsi_transportid_1394 { uint8_t format_protocol; #define SCSI_TRN_1394_FORMAT_DEFAULT 0x00 uint8_t reserved1[7]; uint8_t eui64[8]; uint8_t reserved2[8]; }; struct scsi_transportid_rdma { uint8_t format_protocol; #define SCSI_TRN_RDMA_FORMAT_DEFAULT 0x00 uint8_t reserved[7]; #define SCSI_TRN_RDMA_PORT_LEN 16 uint8_t initiator_port_id[SCSI_TRN_RDMA_PORT_LEN]; }; struct scsi_transportid_iscsi_device { uint8_t format_protocol; #define SCSI_TRN_ISCSI_FORMAT_DEVICE 0x00 uint8_t reserved; uint8_t additional_length[2]; uint8_t iscsi_name[]; }; struct scsi_transportid_iscsi_port { uint8_t format_protocol; #define SCSI_TRN_ISCSI_FORMAT_PORT 0x40 uint8_t reserved; uint8_t additional_length[2]; uint8_t iscsi_name[]; /* * Followed by a separator and iSCSI initiator session ID */ }; struct scsi_transportid_sas { uint8_t format_protocol; #define SCSI_TRN_SAS_FORMAT_DEFAULT 0x00 uint8_t reserved1[3]; uint8_t sas_address[8]; uint8_t reserved2[12]; }; struct scsi_sop_routing_id_norm { uint8_t bus; uint8_t devfunc; #define SCSI_TRN_SOP_BUS_MAX 0xff #define SCSI_TRN_SOP_DEV_MAX 0x1f #define SCSI_TRN_SOP_DEV_MASK 0xf8 #define SCSI_TRN_SOP_DEV_SHIFT 3 #define SCSI_TRN_SOP_FUNC_NORM_MASK 0x07 #define SCSI_TRN_SOP_FUNC_NORM_MAX 0x07 }; struct scsi_sop_routing_id_alt { uint8_t bus; uint8_t function; #define SCSI_TRN_SOP_FUNC_ALT_MAX 0xff }; struct scsi_transportid_sop { uint8_t format_protocol; #define SCSI_TRN_SOP_FORMAT_DEFAULT 0x00 uint8_t reserved1; uint8_t routing_id[2]; uint8_t reserved2[20]; }; struct scsi_log_sense { u_int8_t opcode; u_int8_t byte2; #define SLS_SP 0x01 #define SLS_PPC 0x02 u_int8_t page; #define SLS_PAGE_CODE 0x3F #define SLS_SUPPORTED_PAGES_PAGE 0x00 #define SLS_OVERRUN_PAGE 0x01 #define SLS_ERROR_WRITE_PAGE 0x02 #define SLS_ERROR_READ_PAGE 0x03 #define SLS_ERROR_READREVERSE_PAGE 0x04 #define SLS_ERROR_VERIFY_PAGE 0x05 #define SLS_ERROR_NONMEDIUM_PAGE 0x06 #define SLS_ERROR_LASTN_PAGE 0x07 #define SLS_LOGICAL_BLOCK_PROVISIONING 0x0c #define SLS_SELF_TEST_PAGE 0x10 #define SLS_STAT_AND_PERF 0x19 #define SLS_IE_PAGE 0x2f #define SLS_PAGE_CTRL_MASK 0xC0 #define SLS_PAGE_CTRL_THRESHOLD 0x00 #define SLS_PAGE_CTRL_CUMULATIVE 0x40 #define SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 #define SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 u_int8_t subpage; #define SLS_SUPPORTED_SUBPAGES_SUBPAGE 0xff u_int8_t reserved; u_int8_t paramptr[2]; u_int8_t length[2]; u_int8_t control; }; struct scsi_log_select { u_int8_t opcode; u_int8_t byte2; /* SLS_SP 0x01 */ #define SLS_PCR 0x02 u_int8_t page; /* SLS_PAGE_CTRL_MASK 0xC0 */ /* SLS_PAGE_CTRL_THRESHOLD 0x00 */ /* SLS_PAGE_CTRL_CUMULATIVE 0x40 */ /* SLS_PAGE_CTRL_THRESH_DEFAULT 0x80 */ /* SLS_PAGE_CTRL_CUMUL_DEFAULT 0xC0 */ u_int8_t reserved[4]; u_int8_t length[2]; u_int8_t control; }; struct scsi_log_header { u_int8_t page; #define SL_PAGE_CODE 0x3F #define SL_SPF 0x40 #define SL_DS 0x80 u_int8_t subpage; u_int8_t datalen[2]; }; struct scsi_log_param_header { u_int8_t param_code[2]; u_int8_t param_control; #define SLP_LP 0x01 #define SLP_LBIN 0x02 #define SLP_TMC_MASK 0x0C #define SLP_TMC_ALWAYS 0x00 #define SLP_TMC_EQUAL 0x04 #define SLP_TMC_NOTEQUAL 0x08 #define SLP_TMC_GREATER 0x0C #define SLP_ETC 0x10 #define SLP_TSD 0x20 #define SLP_DS 0x40 #define SLP_DU 0x80 u_int8_t param_len; }; struct scsi_log_stat_and_perf { struct scsi_log_param_header hdr; #define SLP_SAP 0x0001 uint8_t read_num[8]; uint8_t write_num[8]; uint8_t recvieved_lba[8]; uint8_t transmitted_lba[8]; uint8_t read_int[8]; uint8_t write_int[8]; uint8_t weighted_num[8]; uint8_t weighted_int[8]; }; struct scsi_log_idle_time { struct scsi_log_param_header hdr; #define SLP_IT 0x0002 uint8_t idle_int[8]; }; struct scsi_log_time_interval { struct scsi_log_param_header hdr; #define SLP_TI 0x0003 uint8_t exponent[4]; uint8_t integer[4]; }; struct scsi_log_fua_stat_and_perf { struct scsi_log_param_header hdr; #define SLP_FUA_SAP 0x0004 uint8_t fua_read_num[8]; uint8_t fua_write_num[8]; uint8_t fuanv_read_num[8]; uint8_t fuanv_write_num[8]; uint8_t fua_read_int[8]; uint8_t fua_write_int[8]; uint8_t fuanv_read_int[8]; uint8_t fuanv_write_int[8]; }; struct scsi_control_page { u_int8_t page_code; u_int8_t page_length; u_int8_t rlec; #define SCP_RLEC 0x01 /*Report Log Exception Cond*/ #define SCP_GLTSD 0x02 /*Global Logging target save disable */ #define SCP_DSENSE 0x04 /*Descriptor Sense */ #define SCP_DPICZ 0x08 /*Disable Prot. Info Check if Prot. Field is Zero */ #define SCP_TMF_ONLY 0x10 /*TM Functions Only*/ #define SCP_TST_MASK 0xE0 /*Task Set Type Mask*/ #define SCP_TST_ONE 0x00 /*One Task Set*/ #define SCP_TST_SEPARATE 0x20 /*Separate Task Sets*/ u_int8_t queue_flags; #define SCP_QUEUE_ALG_MASK 0xF0 #define SCP_QUEUE_ALG_RESTRICTED 0x00 #define SCP_QUEUE_ALG_UNRESTRICTED 0x10 #define SCP_NUAR 0x08 /*No UA on release*/ #define SCP_QUEUE_ERR 0x02 /*Queued I/O aborted for CACs*/ #define SCP_QUEUE_DQUE 0x01 /*Queued I/O disabled*/ u_int8_t eca_and_aen; #define SCP_EECA 0x80 /*Enable Extended CA*/ #define SCP_RAC 0x40 /*Report a check*/ #define SCP_SWP 0x08 /*Software Write Protect*/ #define SCP_RAENP 0x04 /*Ready AEN Permission*/ #define SCP_UAAENP 0x02 /*UA AEN Permission*/ #define SCP_EAENP 0x01 /*Error AEN Permission*/ u_int8_t flags4; #define SCP_ATO 0x80 /*Application tag owner*/ #define SCP_TAS 0x40 /*Task aborted status*/ #define SCP_ATMPE 0x20 /*Application tag mode page*/ #define SCP_RWWP 0x10 /*Reject write without prot*/ u_int8_t aen_holdoff_period[2]; u_int8_t busy_timeout_period[2]; u_int8_t extended_selftest_completion_time[2]; }; struct scsi_control_ext_page { uint8_t page_code; uint8_t subpage_code; uint8_t page_length[2]; uint8_t flags; #define SCEP_TCMOS 0x04 /* Timestamp Changeable by */ #define SCEP_SCSIP 0x02 /* SCSI Precedence (clock) */ #define SCEP_IALUAE 0x01 /* Implicit ALUA Enabled */ uint8_t prio; uint8_t max_sense; uint8_t reserve[25]; }; struct scsi_cache_page { u_int8_t page_code; #define SCHP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t cache_flags; #define SCHP_FLAGS_WCE 0x04 /* Write Cache Enable */ #define SCHP_FLAGS_MF 0x02 /* Multiplication factor */ #define SCHP_FLAGS_RCD 0x01 /* Read Cache Disable */ u_int8_t rw_cache_policy; u_int8_t dis_prefetch[2]; u_int8_t min_prefetch[2]; u_int8_t max_prefetch[2]; u_int8_t max_prefetch_ceil[2]; }; /* * XXX KDM * Updated version of the cache page, as of SBC. Update this to SBC-3 and * rationalize the two. */ struct scsi_caching_page { uint8_t page_code; #define SMS_CACHING_PAGE 0x08 uint8_t page_length; uint8_t flags1; #define SCP_IC 0x80 #define SCP_ABPF 0x40 #define SCP_CAP 0x20 #define SCP_DISC 0x10 #define SCP_SIZE 0x08 #define SCP_WCE 0x04 #define SCP_MF 0x02 #define SCP_RCD 0x01 uint8_t ret_priority; uint8_t disable_pf_transfer_len[2]; uint8_t min_prefetch[2]; uint8_t max_prefetch[2]; uint8_t max_pf_ceiling[2]; uint8_t flags2; #define SCP_FSW 0x80 #define SCP_LBCSS 0x40 #define SCP_DRA 0x20 #define SCP_VS1 0x10 #define SCP_VS2 0x08 uint8_t cache_segments; uint8_t cache_seg_size[2]; uint8_t reserved; uint8_t non_cache_seg_size[3]; }; /* * XXX KDM move this off to a vendor shim. */ struct copan_debugconf_subpage { uint8_t page_code; #define DBGCNF_PAGE_CODE 0x00 uint8_t subpage; #define DBGCNF_SUBPAGE_CODE 0xF0 uint8_t page_length[2]; uint8_t page_version; #define DBGCNF_VERSION 0x00 uint8_t ctl_time_io_secs[2]; }; struct scsi_info_exceptions_page { u_int8_t page_code; #define SIEP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t info_flags; #define SIEP_FLAGS_PERF 0x80 #define SIEP_FLAGS_EBF 0x20 #define SIEP_FLAGS_EWASC 0x10 #define SIEP_FLAGS_DEXCPT 0x08 #define SIEP_FLAGS_TEST 0x04 #define SIEP_FLAGS_EBACKERR 0x02 #define SIEP_FLAGS_LOGERR 0x01 u_int8_t mrie; u_int8_t interval_timer[4]; u_int8_t report_count[4]; }; struct scsi_logical_block_provisioning_page_descr { uint8_t flags; #define SLBPPD_ENABLED 0x80 #define SLBPPD_TYPE_MASK 0x38 #define SLBPPD_ARMING_MASK 0x07 #define SLBPPD_ARMING_DEC 0x02 #define SLBPPD_ARMING_INC 0x01 uint8_t resource; uint8_t reserved[2]; uint8_t count[4]; }; struct scsi_logical_block_provisioning_page { uint8_t page_code; uint8_t subpage_code; uint8_t page_length[2]; uint8_t flags; #define SLBPP_SITUA 0x01 uint8_t reserved[11]; struct scsi_logical_block_provisioning_page_descr descr[0]; }; /* * SCSI protocol identifier values, current as of SPC4r36l. */ #define SCSI_PROTO_FC 0x00 /* Fibre Channel */ #define SCSI_PROTO_SPI 0x01 /* Parallel SCSI */ #define SCSI_PROTO_SSA 0x02 /* Serial Storage Arch. */ #define SCSI_PROTO_1394 0x03 /* IEEE 1394 (Firewire) */ #define SCSI_PROTO_RDMA 0x04 /* SCSI RDMA Protocol */ #define SCSI_PROTO_ISCSI 0x05 /* Internet SCSI */ #define SCSI_PROTO_iSCSI 0x05 /* Internet SCSI */ #define SCSI_PROTO_SAS 0x06 /* SAS Serial SCSI Protocol */ #define SCSI_PROTO_ADT 0x07 /* Automation/Drive Int. Trans. Prot.*/ #define SCSI_PROTO_ADITP 0x07 /* Automation/Drive Int. Trans. Prot.*/ #define SCSI_PROTO_ATA 0x08 /* AT Attachment Interface */ #define SCSI_PROTO_UAS 0x09 /* USB Atached SCSI */ #define SCSI_PROTO_SOP 0x0a /* SCSI over PCI Express */ #define SCSI_PROTO_NONE 0x0f /* No specific protocol */ struct scsi_proto_specific_page { u_int8_t page_code; #define SPSP_PAGE_SAVABLE 0x80 /* Page is savable */ u_int8_t page_length; u_int8_t protocol; #define SPSP_PROTO_FC SCSI_PROTO_FC #define SPSP_PROTO_SPI SCSI_PROTO_SPI #define SPSP_PROTO_SSA SCSI_PROTO_SSA #define SPSP_PROTO_1394 SCSI_PROTO_1394 #define SPSP_PROTO_RDMA SCSI_PROTO_RDMA #define SPSP_PROTO_ISCSI SCSI_PROTO_ISCSI #define SPSP_PROTO_SAS SCSI_PROTO_SAS #define SPSP_PROTO_ADT SCSI_PROTO_ADITP #define SPSP_PROTO_ATA SCSI_PROTO_ATA #define SPSP_PROTO_UAS SCSI_PROTO_UAS #define SPSP_PROTO_SOP SCSI_PROTO_SOP #define SPSP_PROTO_NONE SCSI_PROTO_NONE }; struct scsi_reserve { u_int8_t opcode; u_int8_t byte2; #define SR_EXTENT 0x01 #define SR_ID_MASK 0x0e #define SR_3RDPTY 0x10 #define SR_LUN_MASK 0xe0 u_int8_t resv_id; u_int8_t length[2]; u_int8_t control; }; struct scsi_reserve_10 { uint8_t opcode; uint8_t byte2; #define SR10_3RDPTY 0x10 #define SR10_LONGID 0x02 #define SR10_EXTENT 0x01 uint8_t resv_id; uint8_t thirdparty_id; uint8_t reserved[3]; uint8_t length[2]; uint8_t control; }; struct scsi_release { u_int8_t opcode; u_int8_t byte2; u_int8_t resv_id; u_int8_t unused[1]; u_int8_t length; u_int8_t control; }; struct scsi_release_10 { uint8_t opcode; uint8_t byte2; uint8_t resv_id; uint8_t thirdparty_id; uint8_t reserved[3]; uint8_t length[2]; uint8_t control; }; struct scsi_prevent { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[2]; u_int8_t how; u_int8_t control; }; #define PR_PREVENT 0x01 #define PR_ALLOW 0x00 struct scsi_sync_cache { u_int8_t opcode; u_int8_t byte2; #define SSC_IMMED 0x02 #define SSC_RELADR 0x01 u_int8_t begin_lba[4]; u_int8_t reserved; u_int8_t lb_count[2]; u_int8_t control; }; struct scsi_sync_cache_16 { uint8_t opcode; uint8_t byte2; uint8_t begin_lba[8]; uint8_t lb_count[4]; uint8_t reserved; uint8_t control; }; struct scsi_format { uint8_t opcode; uint8_t byte2; #define SF_LONGLIST 0x20 #define SF_FMTDATA 0x10 #define SF_CMPLIST 0x08 #define SF_FORMAT_MASK 0x07 #define SF_FORMAT_BLOCK 0x00 #define SF_FORMAT_LONG_BLOCK 0x03 #define SF_FORMAT_BFI 0x04 #define SF_FORMAT_PHYS 0x05 uint8_t vendor; uint8_t interleave[2]; uint8_t control; }; struct scsi_format_header_short { uint8_t reserved; #define SF_DATA_FOV 0x80 #define SF_DATA_DPRY 0x40 #define SF_DATA_DCRT 0x20 #define SF_DATA_STPF 0x10 #define SF_DATA_IP 0x08 #define SF_DATA_DSP 0x04 #define SF_DATA_IMMED 0x02 #define SF_DATA_VS 0x01 uint8_t byte2; uint8_t defect_list_len[2]; }; struct scsi_format_header_long { uint8_t reserved; uint8_t byte2; uint8_t reserved2[2]; uint8_t defect_list_len[4]; }; struct scsi_changedef { u_int8_t opcode; u_int8_t byte2; u_int8_t unused1; u_int8_t how; u_int8_t unused[4]; u_int8_t datalen; u_int8_t control; }; struct scsi_read_buffer { u_int8_t opcode; u_int8_t byte2; #define RWB_MODE 0x1F #define RWB_MODE_HDR_DATA 0x00 #define RWB_MODE_VENDOR 0x01 #define RWB_MODE_DATA 0x02 #define RWB_MODE_DESCR 0x03 #define RWB_MODE_DOWNLOAD 0x04 #define RWB_MODE_DOWNLOAD_SAVE 0x05 #define RWB_MODE_ECHO 0x0A #define RWB_MODE_ECHO_DESCR 0x0B #define RWB_MODE_ERROR_HISTORY 0x1C u_int8_t buffer_id; u_int8_t offset[3]; u_int8_t length[3]; u_int8_t control; }; struct scsi_read_buffer_16 { uint8_t opcode; uint8_t byte2; uint8_t offset[8]; uint8_t length[4]; uint8_t buffer_id; uint8_t control; }; struct scsi_write_buffer { u_int8_t opcode; u_int8_t byte2; u_int8_t buffer_id; u_int8_t offset[3]; u_int8_t length[3]; u_int8_t control; }; struct scsi_read_attribute { u_int8_t opcode; u_int8_t service_action; #define SRA_SA_ATTR_VALUES 0x00 #define SRA_SA_ATTR_LIST 0x01 #define SRA_SA_LOG_VOL_LIST 0x02 #define SRA_SA_PART_LIST 0x03 #define SRA_SA_RESTRICTED 0x04 #define SRA_SA_SUPPORTED_ATTRS 0x05 #define SRA_SA_MASK 0x1f u_int8_t element[2]; u_int8_t elem_type; u_int8_t logical_volume; u_int8_t reserved1; u_int8_t partition; u_int8_t first_attribute[2]; u_int8_t length[4]; u_int8_t cache; #define SRA_CACHE 0x01 u_int8_t control; }; struct scsi_write_attribute { u_int8_t opcode; u_int8_t byte2; #define SWA_WTC 0x01 u_int8_t element[3]; u_int8_t logical_volume; u_int8_t reserved1; u_int8_t partition; u_int8_t reserved2[2]; u_int8_t length[4]; u_int8_t reserved3; u_int8_t control; }; struct scsi_read_attribute_values { u_int8_t length[4]; u_int8_t attribute_0[0]; }; struct scsi_mam_attribute_header { u_int8_t id[2]; /* * Attributes obtained from SPC-4r36g (section 7.4.2.2) and * SSC-4r03 (section 4.2.21). */ #define SMA_ATTR_ID_DEVICE_MIN 0x0000 #define SMA_ATTR_REM_CAP_PARTITION 0x0000 #define SMA_ATTR_MAX_CAP_PARTITION 0x0001 #define SMA_ATTR_TAPEALERT_FLAGS 0x0002 #define SMA_ATTR_LOAD_COUNT 0x0003 #define SMA_ATTR_MAM_SPACE_REMAINING 0x0004 #define SMA_ATTR_DEV_ASSIGNING_ORG 0x0005 #define SMA_ATTR_FORMAT_DENSITY_CODE 0x0006 #define SMA_ATTR_INITIALIZATION_COUNT 0x0007 #define SMA_ATTR_VOLUME_ID 0x0008 #define SMA_ATTR_VOLUME_CHANGE_REF 0x0009 #define SMA_ATTR_DEV_SERIAL_LAST_LOAD 0x020a #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_1 0x020b #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_2 0x020c #define SMA_ATTR_DEV_SERIAL_LAST_LOAD_3 0x020d #define SMA_ATTR_TOTAL_MB_WRITTEN_LT 0x0220 #define SMA_ATTR_TOTAL_MB_READ_LT 0x0221 #define SMA_ATTR_TOTAL_MB_WRITTEN_CUR 0x0222 #define SMA_ATTR_TOTAL_MB_READ_CUR 0x0223 #define SMA_ATTR_FIRST_ENC_BLOCK 0x0224 #define SMA_ATTR_NEXT_UNENC_BLOCK 0x0225 #define SMA_ATTR_MEDIUM_USAGE_HIST 0x0340 #define SMA_ATTR_PART_USAGE_HIST 0x0341 #define SMA_ATTR_ID_DEVICE_MAX 0x03ff #define SMA_ATTR_ID_MEDIUM_MIN 0x0400 #define SMA_ATTR_MED_MANUF 0x0400 #define SMA_ATTR_MED_SERIAL 0x0401 #define SMA_ATTR_MED_LENGTH 0x0402 #define SMA_ATTR_MED_WIDTH 0x0403 #define SMA_ATTR_MED_ASSIGNING_ORG 0x0404 #define SMA_ATTR_MED_DENSITY_CODE 0x0405 #define SMA_ATTR_MED_MANUF_DATE 0x0406 #define SMA_ATTR_MAM_CAPACITY 0x0407 #define SMA_ATTR_MED_TYPE 0x0408 #define SMA_ATTR_MED_TYPE_INFO 0x0409 #define SMA_ATTR_MED_SERIAL_NUM 0x040a #define SMA_ATTR_ID_MEDIUM_MAX 0x07ff #define SMA_ATTR_ID_HOST_MIN 0x0800 #define SMA_ATTR_APP_VENDOR 0x0800 #define SMA_ATTR_APP_NAME 0x0801 #define SMA_ATTR_APP_VERSION 0x0802 #define SMA_ATTR_USER_MED_TEXT_LABEL 0x0803 #define SMA_ATTR_LAST_WRITTEN_TIME 0x0804 #define SMA_ATTR_TEXT_LOCAL_ID 0x0805 #define SMA_ATTR_BARCODE 0x0806 #define SMA_ATTR_HOST_OWNER_NAME 0x0807 #define SMA_ATTR_MEDIA_POOL 0x0808 #define SMA_ATTR_PART_USER_LABEL 0x0809 #define SMA_ATTR_LOAD_UNLOAD_AT_PART 0x080a #define SMA_ATTR_APP_FORMAT_VERSION 0x080b #define SMA_ATTR_VOL_COHERENCY_INFO 0x080c #define SMA_ATTR_ID_HOST_MAX 0x0bff #define SMA_ATTR_VENDOR_DEVICE_MIN 0x0c00 #define SMA_ATTR_VENDOR_DEVICE_MAX 0x0fff #define SMA_ATTR_VENDOR_MEDIUM_MIN 0x1000 #define SMA_ATTR_VENDOR_MEDIUM_MAX 0x13ff #define SMA_ATTR_VENDOR_HOST_MIN 0x1400 #define SMA_ATTR_VENDOR_HOST_MAX 0x17ff u_int8_t byte2; #define SMA_FORMAT_BINARY 0x00 #define SMA_FORMAT_ASCII 0x01 #define SMA_FORMAT_TEXT 0x02 #define SMA_FORMAT_MASK 0x03 #define SMA_READ_ONLY 0x80 u_int8_t length[2]; u_int8_t attribute[0]; }; struct scsi_attrib_list_header { u_int8_t length[4]; u_int8_t first_attr_0[0]; }; struct scsi_attrib_lv_list { u_int8_t length[2]; u_int8_t first_lv_number; u_int8_t num_logical_volumes; }; struct scsi_attrib_vendser { uint8_t vendor[8]; uint8_t serial_num[32]; }; /* * These values are used to decode the Volume Coherency Information * Attribute (0x080c) for LTFS-format coherency information. * Although the Application Client Specific lengths are different for * Version 0 and Version 1, the data is in fact the same. The length * difference was due to a code bug. */ #define SCSI_LTFS_VER0_LEN 42 #define SCSI_LTFS_VER1_LEN 43 #define SCSI_LTFS_UUID_LEN 36 #define SCSI_LTFS_STR_NAME "LTFS" #define SCSI_LTFS_STR_LEN 4 typedef enum { SCSI_ATTR_FLAG_NONE = 0x00, SCSI_ATTR_FLAG_HEX = 0x01, SCSI_ATTR_FLAG_FP = 0x02, SCSI_ATTR_FLAG_DIV_10 = 0x04, SCSI_ATTR_FLAG_FP_1DIGIT = 0x08 } scsi_attrib_flags; typedef enum { SCSI_ATTR_OUTPUT_NONE = 0x00, SCSI_ATTR_OUTPUT_TEXT_MASK = 0x03, SCSI_ATTR_OUTPUT_TEXT_RAW = 0x00, SCSI_ATTR_OUTPUT_TEXT_ESC = 0x01, SCSI_ATTR_OUTPUT_TEXT_RSV1 = 0x02, SCSI_ATTR_OUTPUT_TEXT_RSV2 = 0x03, SCSI_ATTR_OUTPUT_NONASCII_MASK = 0x0c, SCSI_ATTR_OUTPUT_NONASCII_TRIM = 0x00, SCSI_ATTR_OUTPUT_NONASCII_ESC = 0x04, SCSI_ATTR_OUTPUT_NONASCII_RAW = 0x08, SCSI_ATTR_OUTPUT_NONASCII_RSV1 = 0x0c, SCSI_ATTR_OUTPUT_FIELD_MASK = 0xf0, SCSI_ATTR_OUTPUT_FIELD_ALL = 0xf0, SCSI_ATTR_OUTPUT_FIELD_NONE = 0x00, SCSI_ATTR_OUTPUT_FIELD_DESC = 0x10, SCSI_ATTR_OUTPUT_FIELD_NUM = 0x20, SCSI_ATTR_OUTPUT_FIELD_SIZE = 0x40, SCSI_ATTR_OUTPUT_FIELD_RW = 0x80 } scsi_attrib_output_flags; struct sbuf; struct scsi_attrib_table_entry { u_int32_t id; u_int32_t flags; const char *desc; const char *suffix; int (*to_str)(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int (*parse_str)(char *str, struct scsi_mam_attribute_header *hdr, uint32_t alloc_len, uint32_t flags, char *error_str, int error_str_len); }; struct scsi_rw_6 { u_int8_t opcode; u_int8_t addr[3]; /* only 5 bits are valid in the MSB address byte */ #define SRW_TOPADDR 0x1F u_int8_t length; u_int8_t control; }; struct scsi_rw_10 { u_int8_t opcode; #define SRW10_RELADDR 0x01 /* EBP defined for WRITE(10) only */ #define SRW10_EBP 0x04 #define SRW10_FUA 0x08 #define SRW10_DPO 0x10 u_int8_t byte2; u_int8_t addr[4]; u_int8_t reserved; u_int8_t length[2]; u_int8_t control; }; struct scsi_rw_12 { u_int8_t opcode; #define SRW12_RELADDR 0x01 #define SRW12_FUA 0x08 #define SRW12_DPO 0x10 u_int8_t byte2; u_int8_t addr[4]; u_int8_t length[4]; u_int8_t reserved; u_int8_t control; }; struct scsi_rw_16 { u_int8_t opcode; #define SRW16_RELADDR 0x01 #define SRW16_FUA 0x08 #define SRW16_DPO 0x10 u_int8_t byte2; u_int8_t addr[8]; u_int8_t length[4]; u_int8_t reserved; u_int8_t control; }; struct scsi_write_atomic_16 { uint8_t opcode; uint8_t byte2; uint8_t addr[8]; uint8_t boundary[2]; uint8_t length[2]; uint8_t group; uint8_t control; }; struct scsi_write_same_10 { uint8_t opcode; uint8_t byte2; #define SWS_LBDATA 0x02 #define SWS_PBDATA 0x04 #define SWS_UNMAP 0x08 #define SWS_ANCHOR 0x10 uint8_t addr[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_write_same_16 { uint8_t opcode; uint8_t byte2; #define SWS_NDOB 0x01 uint8_t addr[8]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_unmap { uint8_t opcode; uint8_t byte2; #define SU_ANCHOR 0x01 uint8_t reserved[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_unmap_header { uint8_t length[2]; uint8_t desc_length[2]; uint8_t reserved[4]; }; struct scsi_unmap_desc { uint8_t lba[8]; uint8_t length[4]; uint8_t reserved[4]; }; struct scsi_write_verify_10 { uint8_t opcode; uint8_t byte2; #define SWV_BYTCHK 0x02 #define SWV_DPO 0x10 #define SWV_WRPROECT_MASK 0xe0 uint8_t addr[4]; uint8_t group; uint8_t length[2]; uint8_t control; }; struct scsi_write_verify_12 { uint8_t opcode; uint8_t byte2; uint8_t addr[4]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_write_verify_16 { uint8_t opcode; uint8_t byte2; uint8_t addr[8]; uint8_t length[4]; uint8_t group; uint8_t control; }; struct scsi_start_stop_unit { u_int8_t opcode; u_int8_t byte2; #define SSS_IMMED 0x01 u_int8_t reserved[2]; u_int8_t how; #define SSS_START 0x01 #define SSS_LOEJ 0x02 #define SSS_PC_MASK 0xf0 #define SSS_PC_START_VALID 0x00 #define SSS_PC_ACTIVE 0x10 #define SSS_PC_IDLE 0x20 #define SSS_PC_STANDBY 0x30 #define SSS_PC_LU_CONTROL 0x70 #define SSS_PC_FORCE_IDLE_0 0xa0 #define SSS_PC_FORCE_STANDBY_0 0xb0 u_int8_t control; }; struct ata_pass_12 { u_int8_t opcode; u_int8_t protocol; #define AP_PROTO_HARD_RESET (0x00 << 1) #define AP_PROTO_SRST (0x01 << 1) #define AP_PROTO_NON_DATA (0x03 << 1) #define AP_PROTO_PIO_IN (0x04 << 1) #define AP_PROTO_PIO_OUT (0x05 << 1) #define AP_PROTO_DMA (0x06 << 1) #define AP_PROTO_DMA_QUEUED (0x07 << 1) #define AP_PROTO_DEVICE_DIAG (0x08 << 1) #define AP_PROTO_DEVICE_RESET (0x09 << 1) #define AP_PROTO_UDMA_IN (0x0a << 1) #define AP_PROTO_UDMA_OUT (0x0b << 1) #define AP_PROTO_FPDMA (0x0c << 1) #define AP_PROTO_RESP_INFO (0x0f << 1) +#define AP_PROTO_MASK 0x1e #define AP_MULTI 0xe0 u_int8_t flags; #define AP_T_LEN 0x03 #define AP_BB 0x04 #define AP_T_DIR 0x08 #define AP_CK_COND 0x20 #define AP_OFFLINE 0x60 u_int8_t features; u_int8_t sector_count; u_int8_t lba_low; u_int8_t lba_mid; u_int8_t lba_high; u_int8_t device; u_int8_t command; u_int8_t reserved; u_int8_t control; }; struct scsi_maintenance_in { uint8_t opcode; uint8_t byte2; #define SERVICE_ACTION_MASK 0x1f #define SA_RPRT_TRGT_GRP 0x0a uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_opcodes { uint8_t opcode; uint8_t service_action; uint8_t options; #define RSO_RCTD 0x80 #define RSO_OPTIONS_MASK 0x07 #define RSO_OPTIONS_ALL 0x00 #define RSO_OPTIONS_OC 0x01 #define RSO_OPTIONS_OC_SA 0x02 uint8_t requested_opcode; uint8_t requested_service_action[2]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_opcodes_timeout { uint8_t length[2]; uint8_t reserved; uint8_t cmd_specific; uint8_t nominal_time[4]; uint8_t recommended_time[4]; }; struct scsi_report_supported_opcodes_descr { uint8_t opcode; uint8_t reserved; uint8_t service_action[2]; uint8_t reserved2; uint8_t flags; #define RSO_SERVACTV 0x01 #define RSO_CTDP 0x02 uint8_t cdb_length[2]; struct scsi_report_supported_opcodes_timeout timeout[0]; }; struct scsi_report_supported_opcodes_all { uint8_t length[4]; struct scsi_report_supported_opcodes_descr descr[0]; }; struct scsi_report_supported_opcodes_one { uint8_t reserved; uint8_t support; #define RSO_ONE_CTDP 0x80 #define RSO_ONE_SUP_MASK 0x07 #define RSO_ONE_SUP_UNAVAIL 0x00 #define RSO_ONE_SUP_NOT_SUP 0x01 #define RSO_ONE_SUP_AVAIL 0x03 #define RSO_ONE_SUP_VENDOR 0x05 uint8_t cdb_length[2]; uint8_t cdb_usage[]; }; struct scsi_report_supported_tmf { uint8_t opcode; uint8_t service_action; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_supported_tmf_data { uint8_t byte1; #define RST_WAKES 0x01 #define RST_TRS 0x02 #define RST_QTS 0x04 #define RST_LURS 0x08 #define RST_CTSS 0x10 #define RST_CACAS 0x20 #define RST_ATSS 0x40 #define RST_ATS 0x80 uint8_t byte2; #define RST_ITNRS 0x01 #define RST_QTSS 0x02 #define RST_QAES 0x04 uint8_t reserved[2]; }; struct scsi_report_timestamp { uint8_t opcode; uint8_t service_action; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_report_timestamp_data { uint8_t length[2]; uint8_t origin; #define RTS_ORIG_MASK 0x00 #define RTS_ORIG_ZERO 0x00 #define RTS_ORIG_SET 0x02 #define RTS_ORIG_OUTSIDE 0x03 uint8_t reserved; uint8_t timestamp[6]; uint8_t reserve2[2]; }; struct scsi_receive_copy_status_lid1 { uint8_t opcode; uint8_t service_action; #define RCS_RCS_LID1 0x00 uint8_t list_identifier; uint8_t reserved[7]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_status_lid1_data { uint8_t available_data[4]; uint8_t copy_command_status; #define RCS_CCS_INPROG 0x00 #define RCS_CCS_COMPLETED 0x01 #define RCS_CCS_ERROR 0x02 uint8_t segments_processed[2]; uint8_t transfer_count_units; #define RCS_TC_BYTES 0x00 #define RCS_TC_KBYTES 0x01 #define RCS_TC_MBYTES 0x02 #define RCS_TC_GBYTES 0x03 #define RCS_TC_TBYTES 0x04 #define RCS_TC_PBYTES 0x05 #define RCS_TC_EBYTES 0x06 #define RCS_TC_LBAS 0xf1 uint8_t transfer_count[4]; }; struct scsi_receive_copy_failure_details { uint8_t opcode; uint8_t service_action; #define RCS_RCFD 0x04 uint8_t list_identifier; uint8_t reserved[7]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_failure_details_data { uint8_t available_data[4]; uint8_t reserved[52]; uint8_t copy_command_status; uint8_t reserved2; uint8_t sense_data_length[2]; uint8_t sense_data[]; }; struct scsi_receive_copy_status_lid4 { uint8_t opcode; uint8_t service_action; #define RCS_RCS_LID4 0x05 uint8_t list_identifier[4]; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_status_lid4_data { uint8_t available_data[4]; uint8_t response_to_service_action; uint8_t copy_command_status; #define RCS_CCS_COMPLETED_PROD 0x03 #define RCS_CCS_COMPLETED_RESID 0x04 #define RCS_CCS_INPROG_FGBG 0x10 #define RCS_CCS_INPROG_FG 0x11 #define RCS_CCS_INPROG_BG 0x12 #define RCS_CCS_ABORTED 0x60 uint8_t operation_counter[2]; uint8_t estimated_status_update_delay[4]; uint8_t extended_copy_completion_status; uint8_t length_of_the_sense_data_field; uint8_t sense_data_length; uint8_t transfer_count_units; uint8_t transfer_count[8]; uint8_t segments_processed[2]; uint8_t reserved[6]; uint8_t sense_data[]; }; struct scsi_receive_copy_operating_parameters { uint8_t opcode; uint8_t service_action; #define RCS_RCOP 0x03 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_receive_copy_operating_parameters_data { uint8_t length[4]; uint8_t snlid; #define RCOP_SNLID 0x01 uint8_t reserved[3]; uint8_t maximum_cscd_descriptor_count[2]; uint8_t maximum_segment_descriptor_count[2]; uint8_t maximum_descriptor_list_length[4]; uint8_t maximum_segment_length[4]; uint8_t maximum_inline_data_length[4]; uint8_t held_data_limit[4]; uint8_t maximum_stream_device_transfer_size[4]; uint8_t reserved2[2]; uint8_t total_concurrent_copies[2]; uint8_t maximum_concurrent_copies; uint8_t data_segment_granularity; uint8_t inline_data_granularity; uint8_t held_data_granularity; uint8_t reserved3[3]; uint8_t implemented_descriptor_list_length; uint8_t list_of_implemented_descriptor_type_codes[0]; }; struct scsi_extended_copy { uint8_t opcode; uint8_t service_action; #define EC_EC_LID1 0x00 #define EC_EC_LID4 0x01 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved1; uint8_t control; }; struct scsi_ec_cscd_dtsp { uint8_t flags; #define EC_CSCD_FIXED 0x01 #define EC_CSCD_PAD 0x04 uint8_t block_length[3]; }; struct scsi_ec_cscd { uint8_t type_code; #define EC_CSCD_EXT 0xff uint8_t luidt_pdt; #define EC_NUL 0x20 #define EC_LUIDT_MASK 0xc0 #define EC_LUIDT_LUN 0x00 #define EC_LUIDT_PROXY_TOKEN 0x40 uint8_t relative_initiator_port[2]; uint8_t cscd_params[24]; struct scsi_ec_cscd_dtsp dtsp; }; struct scsi_ec_cscd_id { uint8_t type_code; #define EC_CSCD_ID 0xe4 uint8_t luidt_pdt; uint8_t relative_initiator_port[2]; uint8_t codeset; uint8_t id_type; uint8_t reserved; uint8_t length; uint8_t designator[20]; struct scsi_ec_cscd_dtsp dtsp; }; struct scsi_ec_segment { uint8_t type_code; uint8_t flags; #define EC_SEG_DC 0x02 #define EC_SEG_CAT 0x01 uint8_t descr_length[2]; uint8_t params[]; }; struct scsi_ec_segment_b2b { uint8_t type_code; #define EC_SEG_B2B 0x02 uint8_t flags; uint8_t descr_length[2]; uint8_t src_cscd[2]; uint8_t dst_cscd[2]; uint8_t reserved[2]; uint8_t number_of_blocks[2]; uint8_t src_lba[8]; uint8_t dst_lba[8]; }; struct scsi_ec_segment_verify { uint8_t type_code; #define EC_SEG_VERIFY 0x07 uint8_t reserved; uint8_t descr_length[2]; uint8_t src_cscd[2]; uint8_t reserved2[2]; uint8_t tur; uint8_t reserved3[3]; }; struct scsi_ec_segment_register_key { uint8_t type_code; #define EC_SEG_REGISTER_KEY 0x14 uint8_t reserved; uint8_t descr_length[2]; uint8_t reserved2[2]; uint8_t dst_cscd[2]; uint8_t res_key[8]; uint8_t sa_res_key[8]; uint8_t reserved3[4]; }; struct scsi_extended_copy_lid1_data { uint8_t list_identifier; uint8_t flags; #define EC_PRIORITY 0x07 #define EC_LIST_ID_USAGE_MASK 0x18 #define EC_LIST_ID_USAGE_FULL 0x08 #define EC_LIST_ID_USAGE_NOHOLD 0x10 #define EC_LIST_ID_USAGE_NONE 0x18 #define EC_STR 0x20 uint8_t cscd_list_length[2]; uint8_t reserved[4]; uint8_t segment_list_length[4]; uint8_t inline_data_length[4]; uint8_t data[]; }; struct scsi_extended_copy_lid4_data { uint8_t list_format; #define EC_LIST_FORMAT 0x01 uint8_t flags; uint8_t header_cscd_list_length[2]; uint8_t reserved[11]; uint8_t flags2; #define EC_IMMED 0x01 #define EC_G_SENSE 0x02 uint8_t header_cscd_type_code; uint8_t reserved2[3]; uint8_t list_identifier[4]; uint8_t reserved3[18]; uint8_t cscd_list_length[2]; uint8_t segment_list_length[2]; uint8_t inline_data_length[2]; uint8_t data[]; }; struct scsi_copy_operation_abort { uint8_t opcode; uint8_t service_action; #define EC_COA 0x1c uint8_t list_identifier[4]; uint8_t reserved[9]; uint8_t control; }; struct scsi_populate_token { uint8_t opcode; uint8_t service_action; #define EC_PT 0x10 uint8_t reserved[4]; uint8_t list_identifier[4]; uint8_t length[4]; uint8_t group_number; uint8_t control; }; struct scsi_range_desc { uint8_t lba[8]; uint8_t length[4]; uint8_t reserved[4]; }; struct scsi_populate_token_data { uint8_t length[2]; uint8_t flags; #define EC_PT_IMMED 0x01 #define EC_PT_RTV 0x02 uint8_t reserved; uint8_t inactivity_timeout[4]; uint8_t rod_type[4]; uint8_t reserved2[2]; uint8_t range_descriptor_length[2]; struct scsi_range_desc desc[]; }; struct scsi_write_using_token { uint8_t opcode; uint8_t service_action; #define EC_WUT 0x11 uint8_t reserved[4]; uint8_t list_identifier[4]; uint8_t length[4]; uint8_t group_number; uint8_t control; }; struct scsi_write_using_token_data { uint8_t length[2]; uint8_t flags; #define EC_WUT_IMMED 0x01 #define EC_WUT_DEL_TKN 0x02 uint8_t reserved[5]; uint8_t offset_into_rod[8]; uint8_t rod_token[512]; uint8_t reserved2[6]; uint8_t range_descriptor_length[2]; struct scsi_range_desc desc[]; }; struct scsi_receive_rod_token_information { uint8_t opcode; uint8_t service_action; #define RCS_RRTI 0x07 uint8_t list_identifier[4]; uint8_t reserved[4]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_token { uint8_t type[4]; #define ROD_TYPE_INTERNAL 0x00000000 #define ROD_TYPE_AUR 0x00010000 #define ROD_TYPE_PIT_DEF 0x00800000 #define ROD_TYPE_PIT_VULN 0x00800001 #define ROD_TYPE_PIT_PERS 0x00800002 #define ROD_TYPE_PIT_ANY 0x0080FFFF #define ROD_TYPE_BLOCK_ZERO 0xFFFF0001 uint8_t reserved[2]; uint8_t length[2]; uint8_t body[0]; }; struct scsi_report_all_rod_tokens { uint8_t opcode; uint8_t service_action; #define RCS_RART 0x08 uint8_t reserved[8]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_report_all_rod_tokens_data { uint8_t available_data[4]; uint8_t reserved[4]; uint8_t rod_management_token_list[]; }; struct ata_pass_16 { u_int8_t opcode; u_int8_t protocol; #define AP_EXTEND 0x01 u_int8_t flags; #define AP_FLAG_TLEN_NO_DATA (0 << 0) #define AP_FLAG_TLEN_FEAT (1 << 0) #define AP_FLAG_TLEN_SECT_CNT (2 << 0) #define AP_FLAG_TLEN_STPSIU (3 << 0) #define AP_FLAG_BYT_BLOK_BYTES (0 << 2) #define AP_FLAG_BYT_BLOK_BLOCKS (1 << 2) #define AP_FLAG_TDIR_TO_DEV (0 << 3) #define AP_FLAG_TDIR_FROM_DEV (1 << 3) #define AP_FLAG_CHK_COND (1 << 5) u_int8_t features_ext; u_int8_t features; u_int8_t sector_count_ext; u_int8_t sector_count; u_int8_t lba_low_ext; u_int8_t lba_low; u_int8_t lba_mid_ext; u_int8_t lba_mid; u_int8_t lba_high_ext; u_int8_t lba_high; u_int8_t device; u_int8_t command; u_int8_t control; }; +struct ata_pass_32 { + uint8_t opcode; + uint8_t control; + uint8_t reserved1[5]; + uint8_t length; + uint8_t service_action[2]; +#define ATA_PASS_32_SA 0x1ff0 + uint8_t protocol; + uint8_t flags; + uint8_t reserved2[2]; + uint8_t lba[6]; + uint8_t features[2]; + uint8_t count[2]; + uint8_t device; + uint8_t command; + uint8_t reserved3; + uint8_t icc; + uint8_t auxiliary[4]; +}; + + #define SC_SCSI_1 0x01 #define SC_SCSI_2 0x03 /* * Opcodes */ #define TEST_UNIT_READY 0x00 #define REQUEST_SENSE 0x03 #define READ_6 0x08 #define WRITE_6 0x0A #define INQUIRY 0x12 #define MODE_SELECT_6 0x15 #define MODE_SENSE_6 0x1A #define START_STOP_UNIT 0x1B #define START_STOP 0x1B #define RESERVE 0x16 #define RELEASE 0x17 #define RECEIVE_DIAGNOSTIC 0x1C #define SEND_DIAGNOSTIC 0x1D #define PREVENT_ALLOW 0x1E #define READ_CAPACITY 0x25 #define READ_10 0x28 #define WRITE_10 0x2A #define POSITION_TO_ELEMENT 0x2B #define WRITE_VERIFY_10 0x2E #define VERIFY_10 0x2F #define SYNCHRONIZE_CACHE 0x35 #define READ_DEFECT_DATA_10 0x37 #define WRITE_BUFFER 0x3B #define READ_BUFFER 0x3C #define CHANGE_DEFINITION 0x40 #define WRITE_SAME_10 0x41 #define UNMAP 0x42 #define LOG_SELECT 0x4C #define LOG_SENSE 0x4D #define MODE_SELECT_10 0x55 #define RESERVE_10 0x56 #define RELEASE_10 0x57 #define MODE_SENSE_10 0x5A #define PERSISTENT_RES_IN 0x5E #define PERSISTENT_RES_OUT 0x5F +#define EXTENDED_CDB 0x7E +#define VARIABLE_LEN_CDB 0x7F #define EXTENDED_COPY 0x83 #define RECEIVE_COPY_STATUS 0x84 #define ATA_PASS_16 0x85 #define READ_16 0x88 #define COMPARE_AND_WRITE 0x89 #define WRITE_16 0x8A #define READ_ATTRIBUTE 0x8C #define WRITE_ATTRIBUTE 0x8D #define WRITE_VERIFY_16 0x8E #define VERIFY_16 0x8F #define SYNCHRONIZE_CACHE_16 0x91 #define WRITE_SAME_16 0x93 #define READ_BUFFER_16 0x9B #define WRITE_ATOMIC_16 0x9C #define SERVICE_ACTION_IN 0x9E #define REPORT_LUNS 0xA0 #define ATA_PASS_12 0xA1 #define SECURITY_PROTOCOL_IN 0xA2 #define MAINTENANCE_IN 0xA3 #define MAINTENANCE_OUT 0xA4 #define MOVE_MEDIUM 0xA5 #define READ_12 0xA8 #define WRITE_12 0xAA #define WRITE_VERIFY_12 0xAE #define VERIFY_12 0xAF #define SECURITY_PROTOCOL_OUT 0xB5 #define READ_ELEMENT_STATUS 0xB8 #define READ_CD 0xBE /* Maintenance In Service Action Codes */ #define REPORT_IDENTIFYING_INFRMATION 0x05 #define REPORT_TARGET_PORT_GROUPS 0x0A #define REPORT_ALIASES 0x0B #define REPORT_SUPPORTED_OPERATION_CODES 0x0C #define REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0D #define REPORT_PRIORITY 0x0E #define REPORT_TIMESTAMP 0x0F #define MANAGEMENT_PROTOCOL_IN 0x10 /* Maintenance Out Service Action Codes */ #define SET_IDENTIFY_INFORMATION 0x06 #define SET_TARGET_PORT_GROUPS 0x0A #define CHANGE_ALIASES 0x0B #define SET_PRIORITY 0x0E #define SET_TIMESTAMP 0x0F #define MANGAEMENT_PROTOCOL_OUT 0x10 /* * Device Types */ #define T_DIRECT 0x00 #define T_SEQUENTIAL 0x01 #define T_PRINTER 0x02 #define T_PROCESSOR 0x03 #define T_WORM 0x04 #define T_CDROM 0x05 #define T_SCANNER 0x06 #define T_OPTICAL 0x07 #define T_CHANGER 0x08 #define T_COMM 0x09 #define T_ASC0 0x0a #define T_ASC1 0x0b #define T_STORARRAY 0x0c #define T_ENCLOSURE 0x0d #define T_RBC 0x0e #define T_OCRW 0x0f #define T_OSD 0x11 #define T_ADC 0x12 +#define T_ZBC_HM 0x14 #define T_NODEVICE 0x1f #define T_ANY 0xff /* Used in Quirk table matches */ #define T_REMOV 1 #define T_FIXED 0 /* * This length is the initial inquiry length used by the probe code, as * well as the length necessary for scsi_print_inquiry() to function * correctly. If either use requires a different length in the future, * the two values should be de-coupled. */ #define SHORT_INQUIRY_LENGTH 36 struct scsi_inquiry_data { u_int8_t device; #define SID_TYPE(inq_data) ((inq_data)->device & 0x1f) #define SID_QUAL(inq_data) (((inq_data)->device & 0xE0) >> 5) #define SID_QUAL_LU_CONNECTED 0x00 /* * The specified peripheral device * type is currently connected to * logical unit. If the target cannot * determine whether or not a physical * device is currently connected, it * shall also use this peripheral * qualifier when returning the INQUIRY * data. This peripheral qualifier * does not mean that the device is * ready for access by the initiator. */ #define SID_QUAL_LU_OFFLINE 0x01 /* * The target is capable of supporting * the specified peripheral device type * on this logical unit; however, the * physical device is not currently * connected to this logical unit. */ #define SID_QUAL_RSVD 0x02 #define SID_QUAL_BAD_LU 0x03 /* * The target is not capable of * supporting a physical device on * this logical unit. For this * peripheral qualifier the peripheral * device type shall be set to 1Fh to * provide compatibility with previous * versions of SCSI. All other * peripheral device type values are * reserved for this peripheral * qualifier. */ #define SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ((SID_QUAL(inq_data) & 0x04) != 0) u_int8_t dev_qual2; #define SID_QUAL2 0x7F #define SID_LU_CONG 0x40 #define SID_RMB 0x80 #define SID_IS_REMOVABLE(inq_data) (((inq_data)->dev_qual2 & SID_RMB) != 0) u_int8_t version; #define SID_ANSI_REV(inq_data) ((inq_data)->version & 0x07) #define SCSI_REV_0 0 #define SCSI_REV_CCS 1 #define SCSI_REV_2 2 #define SCSI_REV_SPC 3 #define SCSI_REV_SPC2 4 #define SCSI_REV_SPC3 5 #define SCSI_REV_SPC4 6 #define SID_ECMA 0x38 #define SID_ISO 0xC0 u_int8_t response_format; #define SID_AENC 0x80 #define SID_TrmIOP 0x40 #define SID_NormACA 0x20 #define SID_HiSup 0x10 u_int8_t additional_length; #define SID_ADDITIONAL_LENGTH(iqd) \ ((iqd)->additional_length + \ __offsetof(struct scsi_inquiry_data, additional_length) + 1) u_int8_t spc3_flags; #define SPC3_SID_PROTECT 0x01 #define SPC3_SID_3PC 0x08 #define SPC3_SID_TPGS_MASK 0x30 #define SPC3_SID_TPGS_IMPLICIT 0x10 #define SPC3_SID_TPGS_EXPLICIT 0x20 #define SPC3_SID_ACC 0x40 #define SPC3_SID_SCCS 0x80 u_int8_t spc2_flags; #define SPC2_SID_ADDR16 0x01 #define SPC2_SID_MChngr 0x08 #define SPC2_SID_MultiP 0x10 #define SPC2_SID_EncServ 0x40 #define SPC2_SID_BQueue 0x80 #define INQ_DATA_TQ_ENABLED(iqd) \ ((SID_ANSI_REV(iqd) < SCSI_REV_SPC2)? ((iqd)->flags & SID_CmdQue) : \ (((iqd)->flags & SID_CmdQue) && !((iqd)->spc2_flags & SPC2_SID_BQueue)) || \ (!((iqd)->flags & SID_CmdQue) && ((iqd)->spc2_flags & SPC2_SID_BQueue))) u_int8_t flags; #define SID_SftRe 0x01 #define SID_CmdQue 0x02 #define SID_Linked 0x08 #define SID_Sync 0x10 #define SID_WBus16 0x20 #define SID_WBus32 0x40 #define SID_RelAdr 0x80 #define SID_VENDOR_SIZE 8 char vendor[SID_VENDOR_SIZE]; #define SID_PRODUCT_SIZE 16 char product[SID_PRODUCT_SIZE]; #define SID_REVISION_SIZE 4 char revision[SID_REVISION_SIZE]; /* * The following fields were taken from SCSI Primary Commands - 2 * (SPC-2) Revision 14, Dated 11 November 1999 */ #define SID_VENDOR_SPECIFIC_0_SIZE 20 u_int8_t vendor_specific0[SID_VENDOR_SPECIFIC_0_SIZE]; /* * An extension of SCSI Parallel Specific Values */ #define SID_SPI_IUS 0x01 #define SID_SPI_QAS 0x02 #define SID_SPI_CLOCK_ST 0x00 #define SID_SPI_CLOCK_DT 0x04 #define SID_SPI_CLOCK_DT_ST 0x0C #define SID_SPI_MASK 0x0F u_int8_t spi3data; u_int8_t reserved2; /* * Version Descriptors, stored 2 byte values. */ u_int8_t version1[2]; u_int8_t version2[2]; u_int8_t version3[2]; u_int8_t version4[2]; u_int8_t version5[2]; u_int8_t version6[2]; u_int8_t version7[2]; u_int8_t version8[2]; u_int8_t reserved3[22]; #define SID_VENDOR_SPECIFIC_1_SIZE 160 u_int8_t vendor_specific1[SID_VENDOR_SPECIFIC_1_SIZE]; }; /* * This structure is more suited to initiator operation, because the * maximum number of supported pages is already allocated. */ struct scsi_vpd_supported_page_list { u_int8_t device; u_int8_t page_code; #define SVPD_SUPPORTED_PAGE_LIST 0x00 #define SVPD_SUPPORTED_PAGES_HDR_LEN 4 u_int8_t reserved; u_int8_t length; /* number of VPD entries */ #define SVPD_SUPPORTED_PAGES_SIZE 251 u_int8_t list[SVPD_SUPPORTED_PAGES_SIZE]; }; /* * This structure is more suited to target operation, because the * number of supported pages is left to the user to allocate. */ struct scsi_vpd_supported_pages { u_int8_t device; u_int8_t page_code; u_int8_t reserved; #define SVPD_SUPPORTED_PAGES 0x00 u_int8_t length; u_int8_t page_list[0]; }; struct scsi_vpd_unit_serial_number { u_int8_t device; u_int8_t page_code; #define SVPD_UNIT_SERIAL_NUMBER 0x80 u_int8_t reserved; u_int8_t length; /* serial number length */ #define SVPD_SERIAL_NUM_SIZE 251 u_int8_t serial_num[SVPD_SERIAL_NUM_SIZE]; }; struct scsi_vpd_device_id { u_int8_t device; u_int8_t page_code; #define SVPD_DEVICE_ID 0x83 #define SVPD_DEVICE_ID_MAX_SIZE 252 #define SVPD_DEVICE_ID_HDR_LEN \ __offsetof(struct scsi_vpd_device_id, desc_list) u_int8_t length[2]; u_int8_t desc_list[]; }; struct scsi_vpd_id_descriptor { u_int8_t proto_codeset; /* * See the SCSI_PROTO definitions above for the protocols. */ #define SVPD_ID_PROTO_SHIFT 4 #define SVPD_ID_CODESET_BINARY 0x01 #define SVPD_ID_CODESET_ASCII 0x02 #define SVPD_ID_CODESET_UTF8 0x03 #define SVPD_ID_CODESET_MASK 0x0f u_int8_t id_type; #define SVPD_ID_PIV 0x80 #define SVPD_ID_ASSOC_LUN 0x00 #define SVPD_ID_ASSOC_PORT 0x10 #define SVPD_ID_ASSOC_TARGET 0x20 #define SVPD_ID_ASSOC_MASK 0x30 #define SVPD_ID_TYPE_VENDOR 0x00 #define SVPD_ID_TYPE_T10 0x01 #define SVPD_ID_TYPE_EUI64 0x02 #define SVPD_ID_TYPE_NAA 0x03 #define SVPD_ID_TYPE_RELTARG 0x04 #define SVPD_ID_TYPE_TPORTGRP 0x05 #define SVPD_ID_TYPE_LUNGRP 0x06 #define SVPD_ID_TYPE_MD5_LUN_ID 0x07 #define SVPD_ID_TYPE_SCSI_NAME 0x08 #define SVPD_ID_TYPE_PROTO 0x09 #define SVPD_ID_TYPE_UUID 0x0a #define SVPD_ID_TYPE_MASK 0x0f u_int8_t reserved; u_int8_t length; #define SVPD_DEVICE_ID_DESC_HDR_LEN \ __offsetof(struct scsi_vpd_id_descriptor, identifier) u_int8_t identifier[]; }; struct scsi_vpd_id_t10 { u_int8_t vendor[8]; u_int8_t vendor_spec_id[0]; }; struct scsi_vpd_id_eui64 { u_int8_t ieee_company_id[3]; u_int8_t extension_id[5]; }; struct scsi_vpd_id_naa_basic { uint8_t naa; /* big endian, packed: uint8_t naa : 4; uint8_t naa_desig : 4; */ #define SVPD_ID_NAA_NAA_SHIFT 4 #define SVPD_ID_NAA_IEEE_EXT 0x02 #define SVPD_ID_NAA_LOCAL_REG 0x03 #define SVPD_ID_NAA_IEEE_REG 0x05 #define SVPD_ID_NAA_IEEE_REG_EXT 0x06 uint8_t naa_data[]; }; struct scsi_vpd_id_naa_ieee_extended_id { uint8_t naa; uint8_t vendor_specific_id_a; uint8_t ieee_company_id[3]; uint8_t vendor_specific_id_b[4]; }; struct scsi_vpd_id_naa_local_reg { uint8_t naa; uint8_t local_value[7]; }; struct scsi_vpd_id_naa_ieee_reg { uint8_t naa; uint8_t reg_value[7]; /* big endian, packed: uint8_t naa_basic : 4; uint8_t ieee_company_id_0 : 4; uint8_t ieee_company_id_1[2]; uint8_t ieee_company_id_2 : 4; uint8_t vendor_specific_id_0 : 4; uint8_t vendor_specific_id_1[4]; */ }; struct scsi_vpd_id_naa_ieee_reg_extended { uint8_t naa; uint8_t reg_value[15]; /* big endian, packed: uint8_t naa_basic : 4; uint8_t ieee_company_id_0 : 4; uint8_t ieee_company_id_1[2]; uint8_t ieee_company_id_2 : 4; uint8_t vendor_specific_id_0 : 4; uint8_t vendor_specific_id_1[4]; uint8_t vendor_specific_id_ext[8]; */ }; struct scsi_vpd_id_rel_trgt_port_id { uint8_t obsolete[2]; uint8_t rel_trgt_port_id[2]; }; struct scsi_vpd_id_trgt_port_grp_id { uint8_t reserved[2]; uint8_t trgt_port_grp[2]; }; struct scsi_vpd_id_lun_grp_id { uint8_t reserved[2]; uint8_t log_unit_grp[2]; }; struct scsi_vpd_id_md5_lun_id { uint8_t lun_id[16]; }; struct scsi_vpd_id_scsi_name { uint8_t name_string[256]; }; struct scsi_service_action_in { uint8_t opcode; uint8_t service_action; uint8_t action_dependent[13]; uint8_t control; }; struct scsi_vpd_extended_inquiry_data { uint8_t device; uint8_t page_code; #define SVPD_EXTENDED_INQUIRY_DATA 0x86 uint8_t page_length[2]; uint8_t flags1; /* These values are for direct access devices */ #define SVPD_EID_AM_MASK 0xC0 #define SVPD_EID_AM_DEFER 0x80 #define SVPD_EID_AM_IMMED 0x40 #define SVPD_EID_AM_UNDEFINED 0x00 #define SVPD_EID_AM_RESERVED 0xc0 #define SVPD_EID_SPT 0x38 #define SVPD_EID_SPT_1 0x00 #define SVPD_EID_SPT_12 0x08 #define SVPD_EID_SPT_2 0x10 #define SVPD_EID_SPT_13 0x18 #define SVPD_EID_SPT_3 0x20 #define SVPD_EID_SPT_23 0x28 #define SVPD_EID_SPT_123 0x38 /* These values are for sequential access devices */ #define SVPD_EID_SA_SPT_LBP 0x08 #define SVPD_EID_GRD_CHK 0x04 #define SVPD_EID_APP_CHK 0x02 #define SVPD_EID_REF_CHK 0x01 uint8_t flags2; #define SVPD_EID_UASK_SUP 0x20 #define SVPD_EID_GROUP_SUP 0x10 #define SVPD_EID_PRIOR_SUP 0x08 #define SVPD_EID_HEADSUP 0x04 #define SVPD_EID_ORDSUP 0x02 #define SVPD_EID_SIMPSUP 0x01 uint8_t flags3; #define SVPD_EID_WU_SUP 0x08 #define SVPD_EID_CRD_SUP 0x04 #define SVPD_EID_NV_SUP 0x02 #define SVPD_EID_V_SUP 0x01 uint8_t flags4; #define SVPD_EID_P_I_I_SUP 0x10 #define SVPD_EID_LUICLT 0x01 uint8_t flags5; #define SVPD_EID_R_SUP 0x10 #define SVPD_EID_CBCS 0x01 uint8_t flags6; #define SVPD_EID_MULTI_I_T_FW 0x0F #define SVPD_EID_MC_VENDOR_SPEC 0x00 #define SVPD_EID_MC_MODE_1 0x01 #define SVPD_EID_MC_MODE_2 0x02 #define SVPD_EID_MC_MODE_3 0x03 uint8_t est[2]; uint8_t flags7; #define SVPD_EID_POA_SUP 0x80 #define SVPD_EID_HRA_SUP 0x80 #define SVPD_EID_VSA_SUP 0x80 uint8_t max_sense_length; uint8_t reserved2[50]; }; struct scsi_vpd_mode_page_policy_descr { uint8_t page_code; uint8_t subpage_code; uint8_t policy; #define SVPD_MPP_SHARED 0x00 #define SVPD_MPP_PORT 0x01 #define SVPD_MPP_I_T 0x03 #define SVPD_MPP_MLUS 0x80 uint8_t reserved; }; struct scsi_vpd_mode_page_policy { uint8_t device; uint8_t page_code; #define SVPD_MODE_PAGE_POLICY 0x87 uint8_t page_length[2]; struct scsi_vpd_mode_page_policy_descr descr[0]; }; struct scsi_diag_page { uint8_t page_code; uint8_t page_specific_flags; uint8_t length[2]; uint8_t params[0]; }; struct scsi_vpd_port_designation { uint8_t reserved[2]; uint8_t relative_port_id[2]; uint8_t reserved2[2]; uint8_t initiator_transportid_length[2]; uint8_t initiator_transportid[0]; }; struct scsi_vpd_port_designation_cont { uint8_t reserved[2]; uint8_t target_port_descriptors_length[2]; struct scsi_vpd_id_descriptor target_port_descriptors[0]; }; struct scsi_vpd_scsi_ports { u_int8_t device; u_int8_t page_code; #define SVPD_SCSI_PORTS 0x88 u_int8_t page_length[2]; struct scsi_vpd_port_designation design[]; }; /* * ATA Information VPD Page based on * T10/2126-D Revision 04 */ #define SVPD_ATA_INFORMATION 0x89 struct scsi_vpd_tpc_descriptor { uint8_t desc_type[2]; uint8_t desc_length[2]; uint8_t parameters[]; }; struct scsi_vpd_tpc_descriptor_bdrl { uint8_t desc_type[2]; #define SVPD_TPC_BDRL 0x0000 uint8_t desc_length[2]; uint8_t vendor_specific[6]; uint8_t maximum_ranges[2]; uint8_t maximum_inactivity_timeout[4]; uint8_t default_inactivity_timeout[4]; uint8_t maximum_token_transfer_size[8]; uint8_t optimal_transfer_count[8]; }; struct scsi_vpd_tpc_descriptor_sc_descr { uint8_t opcode; uint8_t sa_length; uint8_t supported_service_actions[0]; }; struct scsi_vpd_tpc_descriptor_sc { uint8_t desc_type[2]; #define SVPD_TPC_SC 0x0001 uint8_t desc_length[2]; uint8_t list_length; struct scsi_vpd_tpc_descriptor_sc_descr descr[]; }; struct scsi_vpd_tpc_descriptor_pd { uint8_t desc_type[2]; #define SVPD_TPC_PD 0x0004 uint8_t desc_length[2]; uint8_t reserved[4]; uint8_t maximum_cscd_descriptor_count[2]; uint8_t maximum_segment_descriptor_count[2]; uint8_t maximum_descriptor_list_length[4]; uint8_t maximum_inline_data_length[4]; uint8_t reserved2[12]; }; struct scsi_vpd_tpc_descriptor_sd { uint8_t desc_type[2]; #define SVPD_TPC_SD 0x0008 uint8_t desc_length[2]; uint8_t list_length; uint8_t supported_descriptor_codes[]; }; struct scsi_vpd_tpc_descriptor_sdid { uint8_t desc_type[2]; #define SVPD_TPC_SDID 0x000C uint8_t desc_length[2]; uint8_t list_length[2]; uint8_t supported_descriptor_ids[]; }; struct scsi_vpd_tpc_descriptor_rtf_block { uint8_t type_format; #define SVPD_TPC_RTF_BLOCK 0x00 uint8_t reserved; uint8_t desc_length[2]; uint8_t reserved2[2]; uint8_t optimal_length_granularity[2]; uint8_t maximum_bytes[8]; uint8_t optimal_bytes[8]; uint8_t optimal_bytes_to_token_per_segment[8]; uint8_t optimal_bytes_from_token_per_segment[8]; uint8_t reserved3[8]; }; struct scsi_vpd_tpc_descriptor_rtf { uint8_t desc_type[2]; #define SVPD_TPC_RTF 0x0106 uint8_t desc_length[2]; uint8_t remote_tokens; uint8_t reserved[11]; uint8_t minimum_token_lifetime[4]; uint8_t maximum_token_lifetime[4]; uint8_t maximum_token_inactivity_timeout[4]; uint8_t reserved2[18]; uint8_t type_specific_features_length[2]; uint8_t type_specific_features[0]; }; struct scsi_vpd_tpc_descriptor_srtd { uint8_t rod_type[4]; uint8_t flags; #define SVPD_TPC_SRTD_TOUT 0x01 #define SVPD_TPC_SRTD_TIN 0x02 #define SVPD_TPC_SRTD_ECPY 0x80 uint8_t reserved; uint8_t preference_indicator[2]; uint8_t reserved2[56]; }; struct scsi_vpd_tpc_descriptor_srt { uint8_t desc_type[2]; #define SVPD_TPC_SRT 0x0108 uint8_t desc_length[2]; uint8_t reserved[2]; uint8_t rod_type_descriptors_length[2]; uint8_t rod_type_descriptors[0]; }; struct scsi_vpd_tpc_descriptor_gco { uint8_t desc_type[2]; #define SVPD_TPC_GCO 0x8001 uint8_t desc_length[2]; uint8_t total_concurrent_copies[4]; uint8_t maximum_identified_concurrent_copies[4]; uint8_t maximum_segment_length[4]; uint8_t data_segment_granularity; uint8_t inline_data_granularity; uint8_t reserved[18]; }; struct scsi_vpd_tpc { uint8_t device; uint8_t page_code; #define SVPD_SCSI_TPC 0x8F uint8_t page_length[2]; struct scsi_vpd_tpc_descriptor descr[]; }; /* * Block Device Characteristics VPD Page based on * T10/1799-D Revision 31 */ struct scsi_vpd_block_characteristics { u_int8_t device; u_int8_t page_code; #define SVPD_BDC 0xB1 u_int8_t page_length[2]; u_int8_t medium_rotation_rate[2]; #define SVPD_BDC_RATE_NOT_REPORTED 0x00 #define SVPD_BDC_RATE_NON_ROTATING 0x01 u_int8_t reserved1; u_int8_t nominal_form_factor; #define SVPD_BDC_FORM_NOT_REPORTED 0x00 #define SVPD_BDC_FORM_5_25INCH 0x01 #define SVPD_BDC_FORM_3_5INCH 0x02 #define SVPD_BDC_FORM_2_5INCH 0x03 #define SVPD_BDC_FORM_1_5INCH 0x04 #define SVPD_BDC_FORM_LESSTHAN_1_5INCH 0x05 u_int8_t reserved2[56]; }; /* * Block Device Characteristics VPD Page */ struct scsi_vpd_block_device_characteristics { uint8_t device; uint8_t page_code; #define SVPD_BDC 0xB1 uint8_t page_length[2]; uint8_t medium_rotation_rate[2]; #define SVPD_NOT_REPORTED 0x0000 #define SVPD_NON_ROTATING 0x0001 uint8_t product_type; uint8_t wab_wac_ff; uint8_t flags; #define SVPD_VBULS 0x01 #define SVPD_FUAB 0x02 -#define SVPD_HAW_ZBC 0x10 +#define SVPD_ZBC_NR 0x00 /* Not Reported */ +#define SVPD_HAW_ZBC 0x10 /* Host Aware */ +#define SVPD_DM_ZBC 0x20 /* Drive Managed */ +#define SVPD_ZBC_MASK 0x30 /* Zoned mask */ uint8_t reserved[55]; }; +#define SBDC_IS_PRESENT(bdc, length, field) \ + ((length >= offsetof(struct scsi_vpd_block_device_characteristics, \ + field) + sizeof(bdc->field)) ? 1 : 0) + /* * Logical Block Provisioning VPD Page based on * T10/1799-D Revision 31 */ struct scsi_vpd_logical_block_prov { u_int8_t device; u_int8_t page_code; #define SVPD_LBP 0xB2 u_int8_t page_length[2]; #define SVPD_LBP_PL_BASIC 0x04 u_int8_t threshold_exponent; u_int8_t flags; #define SVPD_LBP_UNMAP 0x80 #define SVPD_LBP_WS16 0x40 #define SVPD_LBP_WS10 0x20 #define SVPD_LBP_RZ 0x04 #define SVPD_LBP_ANC_SUP 0x02 #define SVPD_LBP_DP 0x01 u_int8_t prov_type; #define SVPD_LBP_RESOURCE 0x01 #define SVPD_LBP_THIN 0x02 u_int8_t reserved; /* * Provisioning Group Descriptor can be here if SVPD_LBP_DP is set * Its size can be determined from page_length - 4 */ }; /* * Block Limits VDP Page based on SBC-4 Revision 2 */ struct scsi_vpd_block_limits { u_int8_t device; u_int8_t page_code; #define SVPD_BLOCK_LIMITS 0xB0 u_int8_t page_length[2]; #define SVPD_BL_PL_BASIC 0x10 #define SVPD_BL_PL_TP 0x3C u_int8_t reserved1; u_int8_t max_cmp_write_len; u_int8_t opt_txfer_len_grain[2]; u_int8_t max_txfer_len[4]; u_int8_t opt_txfer_len[4]; u_int8_t max_prefetch[4]; u_int8_t max_unmap_lba_cnt[4]; u_int8_t max_unmap_blk_cnt[4]; u_int8_t opt_unmap_grain[4]; u_int8_t unmap_grain_align[4]; u_int8_t max_write_same_length[8]; u_int8_t max_atomic_transfer_length[4]; u_int8_t atomic_alignment[4]; u_int8_t atomic_transfer_length_granularity[4]; u_int8_t max_atomic_transfer_length_with_atomic_boundary[4]; u_int8_t max_atomic_boundary_size[4]; }; +/* + * Zoned Block Device Characacteristics VPD page. + * From ZBC-r04, dated August 12, 2015. + */ +struct scsi_vpd_zoned_bdc { + uint8_t device; + uint8_t page_code; +#define SVPD_ZONED_BDC 0xB6 + uint8_t page_length[2]; +#define SVPD_ZBDC_PL 0x3C + uint8_t flags; +#define SVPD_ZBDC_URSWRZ 0x01 + uint8_t reserved1[3]; + uint8_t optimal_seq_zones[4]; +#define SVPD_ZBDC_OPT_SEQ_NR 0xffffffff + uint8_t optimal_nonseq_zones[4]; +#define SVPD_ZBDC_OPT_NONSEQ_NR 0xffffffff + uint8_t max_seq_req_zones[4]; +#define SVPD_ZBDC_MAX_SEQ_UNLIMITED 0xffffffff + uint8_t reserved2[44]; +}; + struct scsi_read_capacity { u_int8_t opcode; u_int8_t byte2; #define SRC_RELADR 0x01 u_int8_t addr[4]; u_int8_t unused[2]; u_int8_t pmi; #define SRC_PMI 0x01 u_int8_t control; }; struct scsi_read_capacity_16 { uint8_t opcode; #define SRC16_SERVICE_ACTION 0x10 uint8_t service_action; uint8_t addr[8]; uint8_t alloc_len[4]; #define SRC16_PMI 0x01 #define SRC16_RELADR 0x02 uint8_t reladr; uint8_t control; }; struct scsi_read_capacity_data { u_int8_t addr[4]; u_int8_t length[4]; }; struct scsi_read_capacity_data_long { uint8_t addr[8]; uint8_t length[4]; #define SRC16_PROT_EN 0x01 #define SRC16_P_TYPE 0x0e #define SRC16_PTYPE_1 0x00 #define SRC16_PTYPE_2 0x02 #define SRC16_PTYPE_3 0x04 uint8_t prot; #define SRC16_LBPPBE 0x0f #define SRC16_PI_EXPONENT 0xf0 #define SRC16_PI_EXPONENT_SHIFT 4 uint8_t prot_lbppbe; #define SRC16_LALBA 0x3f #define SRC16_LBPRZ 0x40 #define SRC16_LBPME 0x80 /* * Alternate versions of these macros that are intended for use on a 16-bit * version of the lalba_lbp field instead of the array of 2 8 bit numbers. */ #define SRC16_LALBA_A 0x3fff #define SRC16_LBPRZ_A 0x4000 #define SRC16_LBPME_A 0x8000 uint8_t lalba_lbp[2]; uint8_t reserved[16]; }; struct scsi_get_lba_status { uint8_t opcode; #define SGLS_SERVICE_ACTION 0x12 uint8_t service_action; uint8_t addr[8]; uint8_t alloc_len[4]; uint8_t reserved; uint8_t control; }; struct scsi_get_lba_status_data_descr { uint8_t addr[8]; uint8_t length[4]; uint8_t status; uint8_t reserved[3]; }; struct scsi_get_lba_status_data { uint8_t length[4]; uint8_t reserved[4]; struct scsi_get_lba_status_data_descr descr[]; }; struct scsi_report_luns { uint8_t opcode; uint8_t reserved1; #define RPL_REPORT_DEFAULT 0x00 #define RPL_REPORT_WELLKNOWN 0x01 #define RPL_REPORT_ALL 0x02 #define RPL_REPORT_ADMIN 0x10 #define RPL_REPORT_NONSUBSID 0x11 #define RPL_REPORT_CONGLOM 0x12 uint8_t select_report; uint8_t reserved2[3]; uint8_t length[4]; uint8_t reserved3; uint8_t control; }; struct scsi_report_luns_lundata { uint8_t lundata[8]; #define RPL_LUNDATA_PERIPH_BUS_MASK 0x3f #define RPL_LUNDATA_FLAT_LUN_MASK 0x3f #define RPL_LUNDATA_FLAT_LUN_BITS 0x06 #define RPL_LUNDATA_LUN_TARG_MASK 0x3f #define RPL_LUNDATA_LUN_BUS_MASK 0xe0 #define RPL_LUNDATA_LUN_LUN_MASK 0x1f #define RPL_LUNDATA_EXT_LEN_MASK 0x30 #define RPL_LUNDATA_EXT_EAM_MASK 0x0f #define RPL_LUNDATA_EXT_EAM_WK 0x01 #define RPL_LUNDATA_EXT_EAM_NOT_SPEC 0x0f #define RPL_LUNDATA_ATYP_MASK 0xc0 /* MBZ for type 0 lun */ #define RPL_LUNDATA_ATYP_PERIPH 0x00 #define RPL_LUNDATA_ATYP_FLAT 0x40 #define RPL_LUNDATA_ATYP_LUN 0x80 #define RPL_LUNDATA_ATYP_EXTLUN 0xc0 }; struct scsi_report_luns_data { u_int8_t length[4]; /* length of LUN inventory, in bytes */ u_int8_t reserved[4]; /* unused */ /* * LUN inventory- we only support the type zero form for now. */ struct scsi_report_luns_lundata luns[0]; }; struct scsi_target_group { uint8_t opcode; uint8_t service_action; #define STG_PDF_MASK 0xe0 #define STG_PDF_LENGTH 0x00 #define STG_PDF_EXTENDED 0x20 uint8_t reserved1[4]; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_target_port_descriptor { uint8_t reserved[2]; uint8_t relative_target_port_identifier[2]; uint8_t desc_list[]; }; struct scsi_target_port_group_descriptor { uint8_t pref_state; #define TPG_PRIMARY 0x80 #define TPG_ASYMMETRIC_ACCESS_STATE_MASK 0xf #define TPG_ASYMMETRIC_ACCESS_OPTIMIZED 0x0 #define TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED 0x1 #define TPG_ASYMMETRIC_ACCESS_STANDBY 0x2 #define TPG_ASYMMETRIC_ACCESS_UNAVAILABLE 0x3 #define TPG_ASYMMETRIC_ACCESS_LBA_DEPENDENT 0x4 #define TPG_ASYMMETRIC_ACCESS_OFFLINE 0xE #define TPG_ASYMMETRIC_ACCESS_TRANSITIONING 0xF uint8_t support; #define TPG_AO_SUP 0x01 #define TPG_AN_SUP 0x02 #define TPG_S_SUP 0x04 #define TPG_U_SUP 0x08 #define TPG_LBD_SUP 0x10 #define TPG_O_SUP 0x40 #define TPG_T_SUP 0x80 uint8_t target_port_group[2]; uint8_t reserved; uint8_t status; #define TPG_UNAVLBL 0 #define TPG_SET_BY_STPG 0x01 #define TPG_IMPLICIT 0x02 uint8_t vendor_specific; uint8_t target_port_count; struct scsi_target_port_descriptor descriptors[]; }; struct scsi_target_group_data { uint8_t length[4]; /* length of returned data, in bytes */ struct scsi_target_port_group_descriptor groups[]; }; struct scsi_target_group_data_extended { uint8_t length[4]; /* length of returned data, in bytes */ uint8_t format_type; /* STG_PDF_LENGTH or STG_PDF_EXTENDED */ uint8_t implicit_transition_time; uint8_t reserved[2]; struct scsi_target_port_group_descriptor groups[]; }; struct scsi_security_protocol_in { uint8_t opcode; uint8_t security_protocol; #define SPI_PROT_INFORMATION 0x00 #define SPI_PROT_CBCS 0x07 #define SPI_PROT_TAPE_DATA_ENC 0x20 #define SPI_PROT_DATA_ENC_CONFIG 0x21 #define SPI_PROT_SA_CREATE_CAP 0x40 #define SPI_PROT_IKEV2_SCSI 0x41 #define SPI_PROT_JEDEC_UFS 0xEC #define SPI_PROT_SDCARD_TFSSS 0xED #define SPI_PROT_AUTH_HOST_TRANSIENT 0xEE #define SPI_PROT_ATA_DEVICE_PASSWORD 0xEF uint8_t security_protocol_specific[2]; uint8_t byte4; #define SPI_INC_512 0x80 uint8_t reserved1; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; struct scsi_security_protocol_out { uint8_t opcode; uint8_t security_protocol; uint8_t security_protocol_specific[2]; uint8_t byte4; #define SPO_INC_512 0x80 uint8_t reserved1; uint8_t length[4]; uint8_t reserved2; uint8_t control; }; typedef enum { SSD_TYPE_NONE, SSD_TYPE_FIXED, SSD_TYPE_DESC } scsi_sense_data_type; typedef enum { SSD_ELEM_NONE, SSD_ELEM_SKIP, SSD_ELEM_DESC, SSD_ELEM_SKS, SSD_ELEM_COMMAND, SSD_ELEM_INFO, SSD_ELEM_FRU, SSD_ELEM_STREAM, SSD_ELEM_MAX } scsi_sense_elem_type; struct scsi_sense_data { uint8_t error_code; /* * SPC-4 says that the maximum length of sense data is 252 bytes. * So this structure is exactly 252 bytes log. */ #define SSD_FULL_SIZE 252 uint8_t sense_buf[SSD_FULL_SIZE - 1]; /* * XXX KDM is this still a reasonable minimum size? */ #define SSD_MIN_SIZE 18 /* * Maximum value for the extra_len field in the sense data. */ #define SSD_EXTRA_MAX 244 }; /* * Fixed format sense data. */ struct scsi_sense_data_fixed { u_int8_t error_code; #define SSD_ERRCODE 0x7F #define SSD_CURRENT_ERROR 0x70 #define SSD_DEFERRED_ERROR 0x71 #define SSD_ERRCODE_VALID 0x80 u_int8_t segment; u_int8_t flags; #define SSD_KEY 0x0F #define SSD_KEY_NO_SENSE 0x00 #define SSD_KEY_RECOVERED_ERROR 0x01 #define SSD_KEY_NOT_READY 0x02 #define SSD_KEY_MEDIUM_ERROR 0x03 #define SSD_KEY_HARDWARE_ERROR 0x04 #define SSD_KEY_ILLEGAL_REQUEST 0x05 #define SSD_KEY_UNIT_ATTENTION 0x06 #define SSD_KEY_DATA_PROTECT 0x07 #define SSD_KEY_BLANK_CHECK 0x08 #define SSD_KEY_Vendor_Specific 0x09 #define SSD_KEY_COPY_ABORTED 0x0a #define SSD_KEY_ABORTED_COMMAND 0x0b #define SSD_KEY_EQUAL 0x0c #define SSD_KEY_VOLUME_OVERFLOW 0x0d #define SSD_KEY_MISCOMPARE 0x0e #define SSD_KEY_COMPLETED 0x0f #define SSD_ILI 0x20 #define SSD_EOM 0x40 #define SSD_FILEMARK 0x80 u_int8_t info[4]; u_int8_t extra_len; u_int8_t cmd_spec_info[4]; u_int8_t add_sense_code; u_int8_t add_sense_code_qual; u_int8_t fru; u_int8_t sense_key_spec[3]; #define SSD_SCS_VALID 0x80 #define SSD_FIELDPTR_CMD 0x40 #define SSD_BITPTR_VALID 0x08 #define SSD_BITPTR_VALUE 0x07 u_int8_t extra_bytes[14]; #define SSD_FIXED_IS_PRESENT(sense, length, field) \ ((length >= (offsetof(struct scsi_sense_data_fixed, field) + \ sizeof(sense->field))) ? 1 :0) #define SSD_FIXED_IS_FILLED(sense, field) \ ((((offsetof(struct scsi_sense_data_fixed, field) + \ sizeof(sense->field)) - \ (offsetof(struct scsi_sense_data_fixed, extra_len) + \ sizeof(sense->extra_len))) <= sense->extra_len) ? 1 : 0) }; /* * Descriptor format sense data definitions. * Introduced in SPC-3. */ struct scsi_sense_data_desc { uint8_t error_code; #define SSD_DESC_CURRENT_ERROR 0x72 #define SSD_DESC_DEFERRED_ERROR 0x73 uint8_t sense_key; uint8_t add_sense_code; uint8_t add_sense_code_qual; uint8_t reserved[3]; /* * Note that SPC-4, section 4.5.2.1 says that the extra_len field * must be less than or equal to 244. */ uint8_t extra_len; uint8_t sense_desc[0]; #define SSD_DESC_IS_PRESENT(sense, length, field) \ ((length >= (offsetof(struct scsi_sense_data_desc, field) + \ sizeof(sense->field))) ? 1 :0) }; struct scsi_sense_desc_header { uint8_t desc_type; uint8_t length; }; /* * The information provide in the Information descriptor is device type or * command specific information, and defined in a command standard. * * Note that any changes to the field names or positions in this structure, * even reserved fields, should be accompanied by an examination of the * code in ctl_set_sense() that uses them. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_info { uint8_t desc_type; #define SSD_DESC_INFO 0x00 uint8_t length; uint8_t byte2; #define SSD_INFO_VALID 0x80 uint8_t reserved; uint8_t info[8]; }; /* * Command-specific information depends on the command for which the * reported condition occurred. * * Note that any changes to the field names or positions in this structure, * even reserved fields, should be accompanied by an examination of the * code in ctl_set_sense() that uses them. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_command { uint8_t desc_type; #define SSD_DESC_COMMAND 0x01 uint8_t length; uint8_t reserved[2]; uint8_t command_info[8]; }; /* * Sense key specific descriptor. The sense key specific data format * depends on the sense key in question. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_sks { uint8_t desc_type; #define SSD_DESC_SKS 0x02 uint8_t length; uint8_t reserved1[2]; uint8_t sense_key_spec[3]; #define SSD_SKS_VALID 0x80 uint8_t reserved2; }; /* * This is used for the Illegal Request sense key (0x05) only. */ struct scsi_sense_sks_field { uint8_t byte0; #define SSD_SKS_FIELD_VALID 0x80 #define SSD_SKS_FIELD_CMD 0x40 #define SSD_SKS_BPV 0x08 #define SSD_SKS_BIT_VALUE 0x07 uint8_t field[2]; }; /* * This is used for the Hardware Error (0x04), Medium Error (0x03) and * Recovered Error (0x01) sense keys. */ struct scsi_sense_sks_retry { uint8_t byte0; #define SSD_SKS_RETRY_VALID 0x80 uint8_t actual_retry_count[2]; }; /* * Used with the NO Sense (0x00) or Not Ready (0x02) sense keys. */ struct scsi_sense_sks_progress { uint8_t byte0; #define SSD_SKS_PROGRESS_VALID 0x80 uint8_t progress[2]; #define SSD_SKS_PROGRESS_DENOM 0x10000 }; /* * Used with the Copy Aborted (0x0a) sense key. */ struct scsi_sense_sks_segment { uint8_t byte0; #define SSD_SKS_SEGMENT_VALID 0x80 #define SSD_SKS_SEGMENT_SD 0x20 #define SSD_SKS_SEGMENT_BPV 0x08 #define SSD_SKS_SEGMENT_BITPTR 0x07 uint8_t field[2]; }; /* * Used with the Unit Attention (0x06) sense key. * * This is currently used to indicate that the unit attention condition * queue has overflowed (when the overflow bit is set). */ struct scsi_sense_sks_overflow { uint8_t byte0; #define SSD_SKS_OVERFLOW_VALID 0x80 #define SSD_SKS_OVERFLOW_SET 0x01 uint8_t reserved[2]; }; /* * This specifies which component is associated with the sense data. There * is no standard meaning for the fru value. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_fru { uint8_t desc_type; #define SSD_DESC_FRU 0x03 uint8_t length; uint8_t reserved; uint8_t fru; }; /* * Used for Stream commands, defined in SSC-4. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_stream { uint8_t desc_type; #define SSD_DESC_STREAM 0x04 uint8_t length; uint8_t reserved; uint8_t byte3; #define SSD_DESC_STREAM_FM 0x80 #define SSD_DESC_STREAM_EOM 0x40 #define SSD_DESC_STREAM_ILI 0x20 }; /* * Used for Block commands, defined in SBC-3. * * This is currently (as of SBC-3) only used for the Incorrect Length * Indication (ILI) bit, which says that the data length requested in the * READ LONG or WRITE LONG command did not match the length of the logical * block. * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_block { uint8_t desc_type; #define SSD_DESC_BLOCK 0x05 uint8_t length; uint8_t reserved; uint8_t byte3; #define SSD_DESC_BLOCK_ILI 0x20 }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_objid { uint8_t desc_type; #define SSD_DESC_OSD_OBJID 0x06 uint8_t length; uint8_t reserved[6]; /* * XXX KDM provide the bit definitions here? There are a lot of * them, and we don't have an OSD driver yet. */ uint8_t not_init_cmds[4]; uint8_t completed_cmds[4]; uint8_t partition_id[8]; uint8_t object_id[8]; }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_integrity { uint8_t desc_type; #define SSD_DESC_OSD_INTEGRITY 0x07 uint8_t length; uint8_t integ_check_val[32]; }; /* * Used for Object-Based Storage Devices (OSD-3). * * Maximum descriptors allowed: 1 (as of SPC-4) */ struct scsi_sense_osd_attr_id { uint8_t desc_type; #define SSD_DESC_OSD_ATTR_ID 0x08 uint8_t length; uint8_t reserved[2]; uint8_t attr_desc[0]; }; /* + * ATA Return descriptor, used for the SCSI ATA PASS-THROUGH(12), (16) and + * (32) commands. Described in SAT-4r05. + */ +struct scsi_sense_ata_ret_desc +{ + uint8_t desc_type; +#define SSD_DESC_ATA 0x09 + uint8_t length; + uint8_t flags; +#define SSD_DESC_ATA_FLAG_EXTEND 0x01 + uint8_t error; + uint8_t count_15_8; + uint8_t count_7_0; + uint8_t lba_31_24; + uint8_t lba_7_0; + uint8_t lba_39_32; + uint8_t lba_15_8; + uint8_t lba_47_40; + uint8_t lba_23_16; + uint8_t device; + uint8_t status; +}; +/* * Used with Sense keys No Sense (0x00) and Not Ready (0x02). * * Maximum descriptors allowed: 32 (as of SPC-4) */ struct scsi_sense_progress { uint8_t desc_type; #define SSD_DESC_PROGRESS 0x0a uint8_t length; uint8_t sense_key; uint8_t add_sense_code; uint8_t add_sense_code_qual; uint8_t reserved; uint8_t progress[2]; }; /* * This is typically forwarded as the result of an EXTENDED COPY command. * * Maximum descriptors allowed: 2 (as of SPC-4) */ struct scsi_sense_forwarded { uint8_t desc_type; #define SSD_DESC_FORWARDED 0x0c uint8_t length; uint8_t byte2; #define SSD_FORWARDED_FSDT 0x80 #define SSD_FORWARDED_SDS_MASK 0x0f #define SSD_FORWARDED_SDS_UNK 0x00 #define SSD_FORWARDED_SDS_EXSRC 0x01 #define SSD_FORWARDED_SDS_EXDST 0x02 }; /* * Vendor-specific sense descriptor. The desc_type field will be in the * range between MIN and MAX inclusive. */ struct scsi_sense_vendor { uint8_t desc_type; #define SSD_DESC_VENDOR_MIN 0x80 #define SSD_DESC_VENDOR_MAX 0xff uint8_t length; uint8_t data[0]; }; struct scsi_mode_header_6 { u_int8_t data_length; /* Sense data length */ u_int8_t medium_type; u_int8_t dev_spec; u_int8_t blk_desc_len; }; struct scsi_mode_header_10 { u_int8_t data_length[2];/* Sense data length */ u_int8_t medium_type; u_int8_t dev_spec; u_int8_t unused[2]; u_int8_t blk_desc_len[2]; }; struct scsi_mode_page_header { u_int8_t page_code; #define SMPH_PS 0x80 #define SMPH_SPF 0x40 #define SMPH_PC_MASK 0x3f u_int8_t page_length; }; struct scsi_mode_page_header_sp { uint8_t page_code; uint8_t subpage; uint8_t page_length[2]; }; struct scsi_mode_blk_desc { u_int8_t density; u_int8_t nblocks[3]; u_int8_t reserved; u_int8_t blklen[3]; }; #define SCSI_DEFAULT_DENSITY 0x00 /* use 'default' density */ #define SCSI_SAME_DENSITY 0x7f /* use 'same' density- >= SCSI-2 only */ /* * Status Byte */ #define SCSI_STATUS_OK 0x00 #define SCSI_STATUS_CHECK_COND 0x02 #define SCSI_STATUS_COND_MET 0x04 #define SCSI_STATUS_BUSY 0x08 #define SCSI_STATUS_INTERMED 0x10 #define SCSI_STATUS_INTERMED_COND_MET 0x14 #define SCSI_STATUS_RESERV_CONFLICT 0x18 #define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */ #define SCSI_STATUS_QUEUE_FULL 0x28 #define SCSI_STATUS_ACA_ACTIVE 0x30 #define SCSI_STATUS_TASK_ABORTED 0x40 struct scsi_inquiry_pattern { u_int8_t type; u_int8_t media_type; #define SIP_MEDIA_REMOVABLE 0x01 #define SIP_MEDIA_FIXED 0x02 const char *vendor; const char *product; const char *revision; }; struct scsi_static_inquiry_pattern { u_int8_t type; u_int8_t media_type; char vendor[SID_VENDOR_SIZE+1]; char product[SID_PRODUCT_SIZE+1]; char revision[SID_REVISION_SIZE+1]; }; struct scsi_sense_quirk_entry { struct scsi_inquiry_pattern inq_pat; int num_sense_keys; int num_ascs; struct sense_key_table_entry *sense_key_info; struct asc_table_entry *asc_info; }; struct sense_key_table_entry { u_int8_t sense_key; u_int32_t action; const char *desc; }; struct asc_table_entry { u_int8_t asc; u_int8_t ascq; u_int32_t action; const char *desc; }; struct op_table_entry { u_int8_t opcode; u_int32_t opmask; const char *desc; }; struct scsi_op_quirk_entry { struct scsi_inquiry_pattern inq_pat; int num_ops; struct op_table_entry *op_table; }; typedef enum { SSS_FLAG_NONE = 0x00, SSS_FLAG_PRINT_COMMAND = 0x01 } scsi_sense_string_flags; struct scsi_nv { const char *name; uint64_t value; }; typedef enum { SCSI_NV_FOUND, SCSI_NV_AMBIGUOUS, SCSI_NV_NOT_FOUND } scsi_nv_status; typedef enum { SCSI_NV_FLAG_NONE = 0x00, SCSI_NV_FLAG_IG_CASE = 0x01 /* Case insensitive comparison */ } scsi_nv_flags; struct ccb_scsiio; struct cam_periph; union ccb; #ifndef _KERNEL struct cam_device; #endif extern const char *scsi_sense_key_text[]; __BEGIN_DECLS void scsi_sense_desc(int sense_key, int asc, int ascq, struct scsi_inquiry_data *inq_data, const char **sense_key_desc, const char **asc_desc); scsi_sense_action scsi_error_action(struct ccb_scsiio* csio, struct scsi_inquiry_data *inq_data, u_int32_t sense_flags); const char * scsi_status_string(struct ccb_scsiio *csio); void scsi_desc_iterate(struct scsi_sense_data_desc *sense, u_int sense_len, int (*iter_func)(struct scsi_sense_data_desc *sense, u_int, struct scsi_sense_desc_header *, void *), void *arg); uint8_t *scsi_find_desc(struct scsi_sense_data_desc *sense, u_int sense_len, uint8_t desc_type); void scsi_set_sense_data(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, ...) ; void scsi_set_sense_data_va(struct scsi_sense_data *sense_data, scsi_sense_data_type sense_format, int current_error, int sense_key, int asc, int ascq, va_list ap); int scsi_get_sense_info(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t info_type, uint64_t *info, int64_t *signed_info); int scsi_get_sks(struct scsi_sense_data *sense_data, u_int sense_len, uint8_t *sks); int scsi_get_block_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *block_bits); int scsi_get_stream_info(struct scsi_sense_data *sense_data, u_int sense_len, struct scsi_inquiry_data *inq_data, uint8_t *stream_bits); void scsi_info_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t info); void scsi_command_sbuf(struct sbuf *sb, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, uint64_t csi); void scsi_progress_sbuf(struct sbuf *sb, uint16_t progress); int scsi_sks_sbuf(struct sbuf *sb, int sense_key, uint8_t *sks); void scsi_fru_sbuf(struct sbuf *sb, uint64_t fru); void scsi_stream_sbuf(struct sbuf *sb, uint8_t stream_bits, uint64_t info); void scsi_block_sbuf(struct sbuf *sb, uint8_t block_bits, uint64_t info); void scsi_sense_info_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_command_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_sks_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_fru_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_stream_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_block_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_progress_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_generic_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); void scsi_sense_desc_sbuf(struct sbuf *sb, struct scsi_sense_data *sense, u_int sense_len, uint8_t *cdb, int cdb_len, struct scsi_inquiry_data *inq_data, struct scsi_sense_desc_header *header); scsi_sense_data_type scsi_sense_type(struct scsi_sense_data *sense_data); void scsi_sense_only_sbuf(struct scsi_sense_data *sense, u_int sense_len, struct sbuf *sb, char *path_str, struct scsi_inquiry_data *inq_data, uint8_t *cdb, int cdb_len); #ifdef _KERNEL int scsi_command_string(struct ccb_scsiio *csio, struct sbuf *sb); int scsi_sense_sbuf(struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags); char * scsi_sense_string(struct ccb_scsiio *csio, char *str, int str_len); void scsi_sense_print(struct ccb_scsiio *csio); int scsi_vpd_supported_page(struct cam_periph *periph, uint8_t page_id); #else /* _KERNEL */ int scsi_command_string(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb); int scsi_sense_sbuf(struct cam_device *device, struct ccb_scsiio *csio, struct sbuf *sb, scsi_sense_string_flags flags); char * scsi_sense_string(struct cam_device *device, struct ccb_scsiio *csio, char *str, int str_len); void scsi_sense_print(struct cam_device *device, struct ccb_scsiio *csio, FILE *ofile); #endif /* _KERNEL */ const char * scsi_op_desc(u_int16_t opcode, struct scsi_inquiry_data *inq_data); char * scsi_cdb_string(u_int8_t *cdb_ptr, char *cdb_string, size_t len); void scsi_cdb_sbuf(u_int8_t *cdb_ptr, struct sbuf *sb); void scsi_print_inquiry(struct scsi_inquiry_data *inq_data); void scsi_print_inquiry_short(struct scsi_inquiry_data *inq_data); u_int scsi_calc_syncsrate(u_int period_factor); u_int scsi_calc_syncparam(u_int period); typedef int (*scsi_devid_checkfn_t)(uint8_t *); int scsi_devid_is_naa_ieee_reg(uint8_t *bufp); int scsi_devid_is_sas_target(uint8_t *bufp); int scsi_devid_is_lun_eui64(uint8_t *bufp); int scsi_devid_is_lun_naa(uint8_t *bufp); int scsi_devid_is_lun_name(uint8_t *bufp); int scsi_devid_is_lun_t10(uint8_t *bufp); int scsi_devid_is_port_naa(uint8_t *bufp); struct scsi_vpd_id_descriptor * scsi_get_devid(struct scsi_vpd_device_id *id, uint32_t len, scsi_devid_checkfn_t ck_fn); struct scsi_vpd_id_descriptor * scsi_get_devid_desc(struct scsi_vpd_id_descriptor *desc, uint32_t len, scsi_devid_checkfn_t ck_fn); int scsi_transportid_sbuf(struct sbuf *sb, struct scsi_transportid_header *hdr, uint32_t valid_len); const char * scsi_nv_to_str(struct scsi_nv *table, int num_table_entries, uint64_t value); scsi_nv_status scsi_get_nv(struct scsi_nv *table, int num_table_entries, char *name, int *table_entry, scsi_nv_flags flags); int scsi_parse_transportid_64bit(int proto_id, char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_spi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_rdma(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_parse_transportid_iscsi(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str,int error_str_len); int scsi_parse_transportid_sop(char *id_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str,int error_str_len); int scsi_parse_transportid(char *transportid_str, struct scsi_transportid_header **hdr, unsigned int *alloc_len, #ifdef _KERNEL struct malloc_type *type, int flags, #endif char *error_str, int error_str_len); int scsi_attrib_volcoh_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_vendser_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_hexdump_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_int_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_ascii_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); int scsi_attrib_text_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, uint32_t flags, uint32_t output_flags, char *error_str, int error_str_len); struct scsi_attrib_table_entry *scsi_find_attrib_entry( struct scsi_attrib_table_entry *table, size_t num_table_entries, uint32_t id); struct scsi_attrib_table_entry *scsi_get_attrib_entry(uint32_t id); int scsi_attrib_value_sbuf(struct sbuf *sb, uint32_t valid_len, struct scsi_mam_attribute_header *hdr, uint32_t output_flags, char *error_str, size_t error_str_len); void scsi_attrib_prefix_sbuf(struct sbuf *sb, uint32_t output_flags, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, const char *desc); int scsi_attrib_sbuf(struct sbuf *sb, struct scsi_mam_attribute_header *hdr, uint32_t valid_len, struct scsi_attrib_table_entry *user_table, size_t num_user_entries, int prefer_user_table, uint32_t output_flags, char *error_str, int error_str_len); void scsi_test_unit_ready(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout); void scsi_request_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), void *data_ptr, u_int8_t dxfer_len, u_int8_t tag_action, u_int8_t sense_len, u_int32_t timeout); void scsi_inquiry(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *inq_buf, u_int32_t inq_len, int evpd, u_int8_t page_code, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_sense_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int dbd, u_int8_t page_code, u_int8_t page, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_mode_select_len(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int scsi_page_fmt, int save_pages, u_int8_t *param_buf, u_int32_t param_len, int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout); void scsi_log_sense(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, u_int8_t page, int save_pages, int ppc, u_int32_t paramptr, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_log_select(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t page_code, int save_pages, int pc_reset, u_int8_t *param_buf, u_int32_t param_len, u_int8_t sense_len, u_int32_t timeout); void scsi_prevent(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t action, u_int8_t sense_len, u_int32_t timeout); void scsi_read_capacity(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, struct scsi_read_capacity_data *, u_int8_t sense_len, u_int32_t timeout); void scsi_read_capacity_16(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint64_t lba, int reladr, int pmi, uint8_t *rcap_buf, int rcap_buf_len, uint8_t sense_len, uint32_t timeout); void scsi_report_luns(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t select_report, struct scsi_report_luns_data *rpl_buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_report_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t pdf, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_set_target_group(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, void *buf, u_int32_t alloc_len, u_int8_t sense_len, u_int32_t timeout); void scsi_synchronize_cache(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int32_t begin_lba, u_int16_t lb_count, u_int8_t sense_len, u_int32_t timeout); void scsi_receive_diagnostic_results(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int pcv, uint8_t page_code, uint8_t *data_ptr, uint16_t allocation_length, uint8_t sense_len, uint32_t timeout); void scsi_send_diagnostic(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int unit_offline, int device_offline, int self_test, int page_format, int self_test_code, uint8_t *data_ptr, uint16_t param_list_length, uint8_t sense_len, uint32_t timeout); void scsi_read_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb*), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t allocation_length, uint8_t sense_len, uint32_t timeout); void scsi_write_buffer(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int mode, uint8_t buffer_id, u_int32_t offset, uint8_t *data_ptr, uint32_t param_list_length, uint8_t sense_len, uint32_t timeout); #define SCSI_RW_READ 0x0001 #define SCSI_RW_WRITE 0x0002 #define SCSI_RW_DIRMASK 0x0003 #define SCSI_RW_BIO 0x1000 void scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int readop, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_write_same(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, int minimum_cmd_size, u_int64_t lba, u_int32_t block_count, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_identify(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_trim(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int16_t block_count, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); + +int scsi_ata_read_log(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint32_t log_address, + uint32_t page_number, uint16_t block_count, + uint8_t protocol, uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t sense_len, uint32_t timeout); + +int scsi_ata_pass(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint32_t flags, uint8_t tag_action, + uint8_t protocol, uint8_t ata_flags, uint16_t features, + uint16_t sector_count, uint64_t lba, uint8_t command, + uint8_t device, uint8_t icc, uint32_t auxiliary, + uint8_t control, u_int8_t *data_ptr, uint32_t dxfer_len, + uint8_t *cdb_storage, size_t cdb_storage_len, + int minimum_cmd_size, u_int8_t sense_len, u_int32_t timeout); void scsi_ata_pass_16(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int32_t flags, u_int8_t tag_action, u_int8_t protocol, u_int8_t ata_flags, u_int16_t features, u_int16_t sector_count, uint64_t lba, u_int8_t command, u_int8_t control, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_unmap(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int8_t *data_ptr, u_int16_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, int start, int load_eject, int immediate, u_int8_t sense_len, u_int32_t timeout); void scsi_read_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t service_action, uint32_t element, u_int8_t elem_type, int logical_volume, int partition, u_int32_t first_attribute, int cache, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout); void scsi_write_attribute(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, uint32_t element, int logical_volume, int partition, int wtc, u_int8_t *data_ptr, u_int32_t length, int sense_len, u_int32_t timeout); void scsi_security_protocol_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_security_protocol_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *,union ccb *), uint8_t tag_action, uint32_t security_protocol, uint32_t security_protocol_specific, int byte4, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_persistent_reserve_in(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *,union ccb *), uint8_t tag_action, int service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_persistent_reserve_out(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int service_action, int scope, int res_type, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); void scsi_report_supported_opcodes(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, int options, int req_opcode, int req_service_action, uint8_t *data_ptr, uint32_t dxfer_len, int sense_len, int timeout); int scsi_inquiry_match(caddr_t inqbuffer, caddr_t table_entry); int scsi_static_inquiry_match(caddr_t inqbuffer, caddr_t table_entry); int scsi_devid_match(uint8_t *rhs, size_t rhs_len, uint8_t *lhs, size_t lhs_len); void scsi_extract_sense(struct scsi_sense_data *sense, int *error_code, int *sense_key, int *asc, int *ascq); int scsi_extract_sense_ccb(union ccb *ccb, int *error_code, int *sense_key, int *asc, int *ascq); void scsi_extract_sense_len(struct scsi_sense_data *sense, u_int sense_len, int *error_code, int *sense_key, int *asc, int *ascq, int show_errors); int scsi_get_sense_key(struct scsi_sense_data *sense, u_int sense_len, int show_errors); int scsi_get_asc(struct scsi_sense_data *sense, u_int sense_len, int show_errors); int scsi_get_ascq(struct scsi_sense_data *sense, u_int sense_len, int show_errors); static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes); static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes); static __inline uint32_t scsi_2btoul(const uint8_t *bytes); static __inline uint32_t scsi_3btoul(const uint8_t *bytes); static __inline int32_t scsi_3btol(const uint8_t *bytes); static __inline uint32_t scsi_4btoul(const uint8_t *bytes); static __inline uint64_t scsi_8btou64(const uint8_t *bytes); static __inline void *find_mode_page_6(struct scsi_mode_header_6 *mode_header); static __inline void *find_mode_page_10(struct scsi_mode_header_10 *mode_header); static __inline void scsi_ulto2b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 8) & 0xff; bytes[1] = val & 0xff; } static __inline void scsi_ulto3b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 16) & 0xff; bytes[1] = (val >> 8) & 0xff; bytes[2] = val & 0xff; } static __inline void scsi_ulto4b(u_int32_t val, u_int8_t *bytes) { bytes[0] = (val >> 24) & 0xff; bytes[1] = (val >> 16) & 0xff; bytes[2] = (val >> 8) & 0xff; bytes[3] = val & 0xff; } static __inline void scsi_u64to8b(u_int64_t val, u_int8_t *bytes) { bytes[0] = (val >> 56) & 0xff; bytes[1] = (val >> 48) & 0xff; bytes[2] = (val >> 40) & 0xff; bytes[3] = (val >> 32) & 0xff; bytes[4] = (val >> 24) & 0xff; bytes[5] = (val >> 16) & 0xff; bytes[6] = (val >> 8) & 0xff; bytes[7] = val & 0xff; } static __inline uint32_t scsi_2btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 8) | bytes[1]; return (rv); } static __inline uint32_t scsi_3btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 16) | (bytes[1] << 8) | bytes[2]; return (rv); } static __inline int32_t scsi_3btol(const uint8_t *bytes) { uint32_t rc = scsi_3btoul(bytes); if (rc & 0x00800000) rc |= 0xff000000; return (int32_t) rc; } static __inline uint32_t scsi_4btoul(const uint8_t *bytes) { uint32_t rv; rv = (bytes[0] << 24) | (bytes[1] << 16) | (bytes[2] << 8) | bytes[3]; return (rv); } static __inline uint64_t scsi_8btou64(const uint8_t *bytes) { uint64_t rv; rv = (((uint64_t)bytes[0]) << 56) | (((uint64_t)bytes[1]) << 48) | (((uint64_t)bytes[2]) << 40) | (((uint64_t)bytes[3]) << 32) | (((uint64_t)bytes[4]) << 24) | (((uint64_t)bytes[5]) << 16) | (((uint64_t)bytes[6]) << 8) | bytes[7]; return (rv); } /* * Given the pointer to a returned mode sense buffer, return a pointer to * the start of the first mode page. */ static __inline void * find_mode_page_6(struct scsi_mode_header_6 *mode_header) { void *page_start; page_start = (void *)((u_int8_t *)&mode_header[1] + mode_header->blk_desc_len); return(page_start); } static __inline void * find_mode_page_10(struct scsi_mode_header_10 *mode_header) { void *page_start; page_start = (void *)((u_int8_t *)&mode_header[1] + scsi_2btoul(mode_header->blk_desc_len)); return(page_start); } __END_DECLS #endif /*_SCSI_SCSI_ALL_H*/ Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c (revision 300206) +++ head/sys/cam/scsi/scsi_da.c (revision 300207) @@ -1,4169 +1,5895 @@ /*- * Implementation of SCSI Direct Access Peripheral driver for CAM. * * Copyright (c) 1997 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifdef _KERNEL #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #endif /* _KERNEL */ #ifndef _KERNEL #include #include #endif /* _KERNEL */ #include #include #include #include #include #include #include - -#ifndef _KERNEL #include -#endif /* !_KERNEL */ #ifdef _KERNEL +/* + * Note that there are probe ordering dependencies here. The order isn't + * controlled by this enumeration, but by explicit state transitions in + * dastart() and dadone(). Here are some of the dependencies: + * + * 1. RC should come first, before RC16, unless there is evidence that RC16 + * is supported. + * 2. BDC needs to come before any of the ATA probes, or the ZONE probe. + * 3. The ATA probes should go in this order: + * ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE + */ typedef enum { DA_STATE_PROBE_RC, DA_STATE_PROBE_RC16, DA_STATE_PROBE_LBP, DA_STATE_PROBE_BLK_LIMITS, DA_STATE_PROBE_BDC, DA_STATE_PROBE_ATA, + DA_STATE_PROBE_ATA_LOGDIR, + DA_STATE_PROBE_ATA_IDDIR, + DA_STATE_PROBE_ATA_SUP, + DA_STATE_PROBE_ATA_ZONE, + DA_STATE_PROBE_ZONE, DA_STATE_NORMAL } da_state; typedef enum { - DA_FLAG_PACK_INVALID = 0x001, - DA_FLAG_NEW_PACK = 0x002, - DA_FLAG_PACK_LOCKED = 0x004, - DA_FLAG_PACK_REMOVABLE = 0x008, - DA_FLAG_NEED_OTAG = 0x020, - DA_FLAG_WAS_OTAG = 0x040, - DA_FLAG_RETRY_UA = 0x080, - DA_FLAG_OPEN = 0x100, - DA_FLAG_SCTX_INIT = 0x200, - DA_FLAG_CAN_RC16 = 0x400, - DA_FLAG_PROBED = 0x800, - DA_FLAG_DIRTY = 0x1000, - DA_FLAG_ANNOUNCED = 0x2000 + DA_FLAG_PACK_INVALID = 0x000001, + DA_FLAG_NEW_PACK = 0x000002, + DA_FLAG_PACK_LOCKED = 0x000004, + DA_FLAG_PACK_REMOVABLE = 0x000008, + DA_FLAG_NEED_OTAG = 0x000020, + DA_FLAG_WAS_OTAG = 0x000040, + DA_FLAG_RETRY_UA = 0x000080, + DA_FLAG_OPEN = 0x000100, + DA_FLAG_SCTX_INIT = 0x000200, + DA_FLAG_CAN_RC16 = 0x000400, + DA_FLAG_PROBED = 0x000800, + DA_FLAG_DIRTY = 0x001000, + DA_FLAG_ANNOUNCED = 0x002000, + DA_FLAG_CAN_ATA_DMA = 0x004000, + DA_FLAG_CAN_ATA_LOG = 0x008000, + DA_FLAG_CAN_ATA_IDLOG = 0x010000, + DA_FLAG_CAN_ATA_SUPCAP = 0x020000, + DA_FLAG_CAN_ATA_ZONE = 0x040000 } da_flags; typedef enum { DA_Q_NONE = 0x00, DA_Q_NO_SYNC_CACHE = 0x01, DA_Q_NO_6_BYTE = 0x02, DA_Q_NO_PREVENT = 0x04, DA_Q_4K = 0x08, DA_Q_NO_RC16 = 0x10, DA_Q_NO_UNMAP = 0x20, - DA_Q_RETRY_BUSY = 0x40 + DA_Q_RETRY_BUSY = 0x40, + DA_Q_SMR_DM = 0x80 } da_quirks; #define DA_Q_BIT_STRING \ "\020" \ "\001NO_SYNC_CACHE" \ "\002NO_6_BYTE" \ "\003NO_PREVENT" \ "\0044K" \ "\005NO_RC16" \ "\006NO_UNMAP" \ - "\007RETRY_BUSY" + "\007RETRY_BUSY" \ + "\008SMR_DM" typedef enum { DA_CCB_PROBE_RC = 0x01, DA_CCB_PROBE_RC16 = 0x02, DA_CCB_PROBE_LBP = 0x03, DA_CCB_PROBE_BLK_LIMITS = 0x04, DA_CCB_PROBE_BDC = 0x05, DA_CCB_PROBE_ATA = 0x06, DA_CCB_BUFFER_IO = 0x07, DA_CCB_DUMP = 0x0A, DA_CCB_DELETE = 0x0B, DA_CCB_TUR = 0x0C, - DA_CCB_TYPE_MASK = 0x0F, - DA_CCB_RETRY_UA = 0x10 + DA_CCB_PROBE_ZONE = 0x0D, + DA_CCB_PROBE_ATA_LOGDIR = 0x0E, + DA_CCB_PROBE_ATA_IDDIR = 0x0F, + DA_CCB_PROBE_ATA_SUP = 0x10, + DA_CCB_PROBE_ATA_ZONE = 0x11, + DA_CCB_TYPE_MASK = 0x1F, + DA_CCB_RETRY_UA = 0x20 } da_ccb_state; /* * Order here is important for method choice * * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes * using ATA_TRIM than the corresponding UNMAP results for a real world mysql * import taking 5mins. * */ typedef enum { DA_DELETE_NONE, DA_DELETE_DISABLE, DA_DELETE_ATA_TRIM, DA_DELETE_UNMAP, DA_DELETE_WS16, DA_DELETE_WS10, DA_DELETE_ZERO, DA_DELETE_MIN = DA_DELETE_ATA_TRIM, DA_DELETE_MAX = DA_DELETE_ZERO } da_delete_methods; +/* + * For SCSI, host managed drives show up as a separate device type. For + * ATA, host managed drives also have a different device signature. + * XXX KDM figure out the ATA host managed signature. + */ +typedef enum { + DA_ZONE_NONE = 0x00, + DA_ZONE_DRIVE_MANAGED = 0x01, + DA_ZONE_HOST_AWARE = 0x02, + DA_ZONE_HOST_MANAGED = 0x03 +} da_zone_mode; + +/* + * We distinguish between these interface cases in addition to the drive type: + * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC + * o ATA drive behind a SCSI translation layer that does not know about + * ZBC/ZAC, and so needs to be managed via ATA passthrough. In this + * case, we would need to share the ATA code with the ada(4) driver. + * o SCSI drive. + */ +typedef enum { + DA_ZONE_IF_SCSI, + DA_ZONE_IF_ATA_PASS, + DA_ZONE_IF_ATA_SAT, +} da_zone_interface; + +typedef enum { + DA_ZONE_FLAG_RZ_SUP = 0x0001, + DA_ZONE_FLAG_OPEN_SUP = 0x0002, + DA_ZONE_FLAG_CLOSE_SUP = 0x0004, + DA_ZONE_FLAG_FINISH_SUP = 0x0008, + DA_ZONE_FLAG_RWP_SUP = 0x0010, + DA_ZONE_FLAG_SUP_MASK = (DA_ZONE_FLAG_RZ_SUP | + DA_ZONE_FLAG_OPEN_SUP | + DA_ZONE_FLAG_CLOSE_SUP | + DA_ZONE_FLAG_FINISH_SUP | + DA_ZONE_FLAG_RWP_SUP), + DA_ZONE_FLAG_URSWRZ = 0x0020, + DA_ZONE_FLAG_OPT_SEQ_SET = 0x0040, + DA_ZONE_FLAG_OPT_NONSEQ_SET = 0x0080, + DA_ZONE_FLAG_MAX_SEQ_SET = 0x0100, + DA_ZONE_FLAG_SET_MASK = (DA_ZONE_FLAG_OPT_SEQ_SET | + DA_ZONE_FLAG_OPT_NONSEQ_SET | + DA_ZONE_FLAG_MAX_SEQ_SET) +} da_zone_flags; + +static struct da_zone_desc { + da_zone_flags value; + const char *desc; +} da_zone_desc_table[] = { + {DA_ZONE_FLAG_RZ_SUP, "Report Zones" }, + {DA_ZONE_FLAG_OPEN_SUP, "Open" }, + {DA_ZONE_FLAG_CLOSE_SUP, "Close" }, + {DA_ZONE_FLAG_FINISH_SUP, "Finish" }, + {DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" }, +}; + typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb, struct bio *bp); static da_delete_func_t da_delete_trim; static da_delete_func_t da_delete_unmap; static da_delete_func_t da_delete_ws; static const void * da_delete_functions[] = { NULL, NULL, da_delete_trim, da_delete_unmap, da_delete_ws, da_delete_ws, da_delete_ws }; static const char *da_delete_method_names[] = { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" }; static const char *da_delete_method_desc[] = { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP", "WRITE SAME(10) with UNMAP", "ZERO" }; /* Offsets into our private area for storing information */ #define ccb_state ppriv_field0 #define ccb_bp ppriv_ptr1 struct disk_params { u_int8_t heads; u_int32_t cylinders; u_int8_t secs_per_track; u_int32_t secsize; /* Number of bytes/sector */ u_int64_t sectors; /* total number sectors */ u_int stripesize; u_int stripeoffset; }; #define UNMAP_RANGE_MAX 0xffffffff #define UNMAP_HEAD_SIZE 8 #define UNMAP_RANGE_SIZE 16 #define UNMAP_MAX_RANGES 2048 /* Protocol Max is 4095 */ #define UNMAP_BUF_SIZE ((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \ UNMAP_HEAD_SIZE) #define WS10_MAX_BLKS 0xffff #define WS16_MAX_BLKS 0xffffffff #define ATA_TRIM_MAX_RANGES ((UNMAP_BUF_SIZE / \ (ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE) #define DA_WORK_TUR (1 << 16) struct da_softc { struct cam_iosched_softc *cam_iosched; struct bio_queue_head delete_run_queue; LIST_HEAD(, ccb_hdr) pending_ccbs; int refcount; /* Active xpt_action() calls */ da_state state; da_flags flags; da_quirks quirks; int minimum_cmd_size; int error_inject; int trim_max_ranges; int delete_available; /* Delete methods possibly available */ - u_int maxio; + da_zone_mode zone_mode; + da_zone_interface zone_interface; + da_zone_flags zone_flags; + struct ata_gp_log_dir ata_logdir; + int valid_logdir_len; + struct ata_identify_log_pages ata_iddir; + int valid_iddir_len; + uint64_t optimal_seq_zones; + uint64_t optimal_nonseq_zones; + uint64_t max_seq_zones; + u_int maxio; uint32_t unmap_max_ranges; uint32_t unmap_max_lba; /* Max LBAs in UNMAP req */ uint64_t ws_max_blks; da_delete_methods delete_method_pref; da_delete_methods delete_method; da_delete_func_t *delete_func; int unmappedio; int rotating; struct disk_params params; struct disk *disk; union ccb saved_ccb; struct task sysctl_task; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; struct callout sendordered_c; uint64_t wwpn; uint8_t unmap_buf[UNMAP_BUF_SIZE]; struct scsi_read_capacity_data_long rcaplong; struct callout mediapoll_c; #ifdef CAM_IO_STATS struct sysctl_ctx_list sysctl_stats_ctx; struct sysctl_oid *sysctl_stats_tree; u_int errors; u_int timeouts; u_int invalidations; #endif }; #define dadeleteflag(softc, delete_method, enable) \ if (enable) { \ softc->delete_available |= (1 << delete_method); \ } else { \ softc->delete_available &= ~(1 << delete_method); \ } struct da_quirk_entry { struct scsi_inquiry_pattern inq_pat; da_quirks quirks; }; static const char quantum[] = "QUANTUM"; static const char microp[] = "MICROP"; static struct da_quirk_entry da_quirk_table[] = { /* SPI, FC devices */ { /* * Fujitsu M2513A MO drives. * Tested devices: M2513A2 firmware versions 1200 & 1300. * (dip switch selects whether T_DIRECT or T_OPTICAL device) * Reported by: W.Scholten */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* See above. */ {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This particular Fujitsu drive doesn't like the * synchronize cache command. * Reported by: Tom Jackson */ {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Matthew Jacob * in NetBSD PR kern/6027, August 24, 1998. */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * This drive doesn't like the synchronize cache command * either. Reported by: Hellmuth Michaelis (hm@kts.org) * (PR 8882). */ {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: Blaz Zupan */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Doesn't work correctly with 6 byte reads/writes. * Returns illegal request, and points to byte 9 of the * 6-byte CDB. * Reported by: Adam McDougall */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* See above. */ {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, /*quirks*/ DA_Q_NO_6_BYTE }, { /* * Doesn't like the synchronize cache command. * Reported by: walter@pelissero.de */ {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The CISS RAID controllers do not support SYNC_CACHE */ {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * The STEC SSDs sometimes hang on UNMAP. */ {T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"}, /*quirks*/ DA_Q_NO_UNMAP }, { /* * VMware returns BUSY status when storage has transient * connectivity problems, so better wait. */ {T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"}, /*quirks*/ DA_Q_RETRY_BUSY }, /* USB mass storage devices supported by umass(4) */ { /* * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player * PR: kern/51675 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Power Quotient Int. (PQI) USB flash key * PR: kern/53067 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative Nomad MUVO mp3 player (USB) * PR: kern/53094 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Jungsoft NEXDISK USB flash key * PR: kern/54737 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * FreeDik USB Mini Data Drive * PR: kern/54786 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sigmatel USB Flash MP3 Player * PR: kern/57046 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * Neuros USB Digital Audio Computer * PR: kern/63645 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SEAGRAND NP-900 MP3 Player * PR: kern/64563 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * iRiver iFP MP3 player (with UMS Firmware) * PR: kern/54881, i386/63941, kern/66124 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 * PR: kern/70158 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ZICPlay USB MP3 Player with FM * PR: kern/75057 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TEAC USB floppy mechanisms */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler II+ USB Pen-Drive. * Reported by: Pawel Jakub Dawidek */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * USB DISK Pro PMAP * Reported by: jhs * PR: usb/96381 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Motorola E398 Mobile Phone (TransFlash memory card). * Reported by: Wojciech A. Koszek * PR: usb/89889 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Qware BeatZkey! Pro * PR: usb/79164 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Time DPA20B 1GB MP3 Player * PR: usb/81846 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung USB key 128Mb * PR: usb/90081 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Kingston DataTraveler 2.0 USB Flash memory. * PR: usb/89196 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Creative MUVO Slim mp3 player (USB) * PR: usb/86131 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT }, { /* * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3) * PR: usb/80487 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SanDisk Micro Cruzer 128MB * PR: usb/75970 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * TOSHIBA TransMemory USB sticks * PR: kern/94660 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * PNY USB 3.0 Flash Drives */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16 }, { /* * PNY USB Flash keys * PR: usb/75578, usb/72344, usb/65436 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Genesys 6-in-1 Card Reader * PR: usb/94647 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Rekam Digital CAMERA * PR: usb/98713 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver H10 MP3 player * PR: usb/102547 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * iRiver U10 MP3 player * PR: usb/92306 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * X-Micro Flash Disk * PR: usb/96901 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * EasyMP3 EM732X USB 2.0 Flash MP3 Player * PR: usb/96546 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*", "1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Denver MP3 player * PR: usb/107101 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Philips USB Key Audio KEY013 * PR: usb/68412 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { /* * JNC MP3 Player * PR: usb/94439 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * SAMSUNG MP0402H * PR: usb/108427 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * I/O Magic USB flash - Giga Bank * PR: usb/108810 */ {T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * JoyFly 128mb USB Flash Drive * PR: 96133 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * ChipsBnk usb stick * PR: 103702 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A * PR: 129858 */ {T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Samsung YP-U3 mp3-player * PR: 125398 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*", "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Sony Cyber-Shot DSC cameras * PR: usb/137035 */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT }, { {T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3", "1.00"}, /*quirks*/ DA_Q_NO_PREVENT }, { /* At least several Transcent USB sticks lie on RC16. */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*", "*"}, /*quirks*/ DA_Q_NO_RC16 }, /* ATA/SATA devices over SAS/USB/... */ { /* Hitachi Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Samsung Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Barracuda Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" }, /*quirks*/DA_Q_4K }, { /* Seagate Momentus Thin Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Caviar Green Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Black Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* WDC Scorpio Blue Advanced Format (4k) drives */ { T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Olympus FE-210 camera */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LG UP3S MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * Laser MP3-2GA13 MP3 player */ {T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, { /* * LaCie external 250GB Hard drive des by Porsche * Submitted by: Ben Stuyts * PR: 121474 */ {T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE }, /* SATA SSDs */ { /* * Corsair Force 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Neutron GTX SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" }, /*quirks*/DA_Q_4K }, { /* * Corsair Force GT & GS SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial M4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" }, /*quirks*/DA_Q_4K }, { /* * Crucial RealSSD C300 SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 320 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 330 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 510 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel 520 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" }, /*quirks*/DA_Q_4K }, { /* * Intel X25-M Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston E100 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Kingston HyperX 3k SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" }, /*quirks*/DA_Q_4K }, { /* * Marvell SSDs (entry taken from OpenSolaris) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 2 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Agility 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Deneva R Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 2 SSDs (inc pro series) * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 3 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" }, /*quirks*/DA_Q_4K }, { /* * OCZ Vertex 4 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 830 Series SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 840 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 850 SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" }, /*quirks*/DA_Q_4K }, { /* * Samsung 843T Series SSDs (MZ7WD*) * Samsung PM851 Series SSDs (MZ7TE*) * Samsung PM853T Series SSDs (MZ7GE*) * Samsung SM863 Series SSDs (MZ7KM*) * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" }, /*quirks*/DA_Q_4K }, { /* * SuperTalent TeraDrive CT SSDs * 4k optimised & trim only works in 4k requests + 4k aligned */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" }, /*quirks*/DA_Q_4K }, { /* * XceedIOPS SATA SSDs * 4k optimised */ { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" }, /*quirks*/DA_Q_4K }, { /* * Hama Innostor USB-Stick */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" }, /*quirks*/DA_Q_NO_RC16 }, { /* + * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR) + * Drive Managed SATA hard drive. This drive doesn't report + * in firmware that it is a drive managed SMR drive. + */ + { T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS0002*", "*" }, + /*quirks*/DA_Q_SMR_DM + }, + { + /* * MX-ES USB Drive by Mach Xtreme */ { T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"}, /*quirks*/DA_Q_NO_RC16 }, }; static disk_strategy_t dastrategy; static dumper_t dadump; static periph_init_t dainit; static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void dasysctlinit(void *context, int pending); static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS); static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS); +static int dazonemodesysctl(SYSCTL_HANDLER_ARGS); +static int dazonesupsysctl(SYSCTL_HANDLER_ARGS); static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS); static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method); static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method); static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method); static void daprobedone(struct cam_periph *periph, union ccb *ccb); static periph_ctor_t daregister; static periph_dtor_t dacleanup; static periph_start_t dastart; static periph_oninv_t daoninvalidate; +static void dazonedone(struct cam_periph *periph, union ccb *ccb); static void dadone(struct cam_periph *periph, union ccb *done_ccb); static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags); static void daprevent(struct cam_periph *periph, int action); static void dareprobe(struct cam_periph *periph); static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_size); static timeout_t dasendorderedtag; static void dashutdown(void *arg, int howto); static timeout_t damediapoll; #ifndef DA_DEFAULT_POLL_PERIOD #define DA_DEFAULT_POLL_PERIOD 3 #endif #ifndef DA_DEFAULT_TIMEOUT #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ #endif #ifndef DA_DEFAULT_SOFTTIMEOUT #define DA_DEFAULT_SOFTTIMEOUT 0 #endif #ifndef DA_DEFAULT_RETRY #define DA_DEFAULT_RETRY 4 #endif #ifndef DA_DEFAULT_SEND_ORDERED #define DA_DEFAULT_SEND_ORDERED 1 #endif static int da_poll_period = DA_DEFAULT_POLL_PERIOD; static int da_retry_count = DA_DEFAULT_RETRY; static int da_default_timeout = DA_DEFAULT_TIMEOUT; static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT; static int da_send_ordered = DA_DEFAULT_SEND_ORDERED; static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, "CAM Direct Access Disk driver"); SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN, &da_poll_period, 0, "Media polling period in seconds"); SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN, &da_retry_count, 0, "Normal I/O retry count"); SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN, &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN, &da_send_ordered, 0, "Send Ordered Tags"); SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout, CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I", "Soft I/O timeout (ms)"); TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout); /* * DA_ORDEREDTAG_INTERVAL determines how often, relative * to the default timeout, we check to see whether an ordered * tagged transaction is appropriate to prevent simple tag * starvation. Since we'd like to ensure that there is at least * 1/2 of the timeout length left for a starved transaction to * complete after we've sent an ordered tag, we must poll at least * four times in every timeout period. This takes care of the worst * case where a starved transaction starts during an interval that * meets the requirement "don't send an ordered tag" test so it takes * us two intervals to determine that a tag must be sent. */ #ifndef DA_ORDEREDTAG_INTERVAL #define DA_ORDEREDTAG_INTERVAL 4 #endif static struct periph_driver dadriver = { dainit, "da", TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 }; PERIPHDRIVER_DECLARE(da, dadriver); static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers"); static int daopen(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; int error; periph = (struct cam_periph *)dp->d_drv1; if (cam_periph_acquire(periph) != CAM_REQ_CMP) { return (ENXIO); } cam_periph_lock(periph); if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { cam_periph_unlock(periph); cam_periph_release(periph); return (error); } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daopen\n")); softc = (struct da_softc *)periph->softc; dareprobe(periph); /* Wait for the disk size update. */ error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO, "dareprobe", 0); if (error != 0) xpt_print(periph->path, "unable to retrieve capacity data\n"); if (periph->flags & CAM_PERIPH_INVALID) error = ENXIO; if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_PREVENT); if (error == 0) { softc->flags &= ~DA_FLAG_PACK_INVALID; softc->flags |= DA_FLAG_OPEN; } cam_periph_unhold(periph); cam_periph_unlock(periph); if (error != 0) cam_periph_release(periph); return (error); } static int daclose(struct disk *dp) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; periph = (struct cam_periph *)dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH, ("daclose\n")); if (cam_periph_hold(periph, PRIBIO) == 0) { /* Flush disk cache. */ if ((softc->flags & DA_FLAG_DIRTY) != 0 && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, 5 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR, softc->disk->d_devstat); if (error == 0) softc->flags &= ~DA_FLAG_DIRTY; xpt_release_ccb(ccb); } /* Allow medium removal. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && (softc->quirks & DA_Q_NO_PREVENT) == 0) daprevent(periph, PR_ALLOW); cam_periph_unhold(periph); } /* * If we've got removeable media, mark the blocksize as * unavailable, since it could change when new media is * inserted. */ if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; softc->flags &= ~DA_FLAG_OPEN; while (softc->refcount != 0) cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1); cam_periph_unlock(periph); cam_periph_release(periph); return (0); } static void daschedule(struct cam_periph *periph) { struct da_softc *softc = (struct da_softc *)periph->softc; if (softc->state != DA_STATE_NORMAL) return; cam_iosched_schedule(softc->cam_iosched, periph); } /* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ static void dastrategy(struct bio *bp) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)bp->bio_disk->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); /* * If the device has been made invalid, error out */ if ((softc->flags & DA_FLAG_PACK_INVALID)) { cam_periph_unlock(periph); biofinish(bp, NULL, ENXIO); return; } CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp)); /* + * Zone commands must be ordered, because they can depend on the + * effects of previously issued commands, and they may affect + * commands after them. + */ + if (bp->bio_cmd == BIO_ZONE) + bp->bio_flags |= BIO_ORDERED; + + /* * Place it in the queue of disk activities for this disk */ cam_iosched_queue_work(softc->cam_iosched, bp); /* * Schedule ourselves for performing the work. */ daschedule(periph); cam_periph_unlock(periph); return; } static int dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct cam_periph *periph; struct da_softc *softc; u_int secsize; struct ccb_scsiio csio; struct disk *dp; int error = 0; dp = arg; periph = dp->d_drv1; softc = (struct da_softc *)periph->softc; cam_periph_lock(periph); secsize = softc->params.secsize; if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { cam_periph_unlock(periph); return (ENXIO); } if (length > 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_read_write(&csio, /*retries*/0, dadone, MSG_ORDERED_Q_TAG, /*read*/SCSI_RW_WRITE, /*byte2*/0, /*minimum_cmd_size*/ softc->minimum_cmd_size, offset / secsize, length / secsize, /*data_ptr*/(u_int8_t *) virtual, /*dxfer_len*/length, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) printf("Aborting dump due to I/O error.\n"); cam_periph_unlock(periph); return (error); } /* * Sync the disk cache contents to the physical media. */ if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL); csio.ccb_h.ccb_state = DA_CCB_DUMP; scsi_synchronize_cache(&csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0,/* Cover the whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 5 * 1000); xpt_polled_action((union ccb *)&csio); error = cam_periph_error((union ccb *)&csio, 0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL); if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); } cam_periph_unlock(periph); return (error); } static int dagetattr(struct bio *bp) { int ret; struct cam_periph *periph; periph = (struct cam_periph *)bp->bio_disk->d_drv1; cam_periph_lock(periph); ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute, periph->path); cam_periph_unlock(periph); if (ret == 0) bp->bio_completed = bp->bio_length; return ret; } static void dainit(void) { cam_status status; /* * Install a global async callback. This callback will * receive async callbacks like "new device found". */ status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL); if (status != CAM_REQ_CMP) { printf("da: Failed to attach master async callback " "due to status 0x%x!\n", status); } else if (da_send_ordered) { /* Register our shutdown event handler */ if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) printf("dainit: shutdown event registration failed!\n"); } } /* * Callback from GEOM, called when it has finished cleaning up its * resources. */ static void dadiskgonecb(struct disk *dp) { struct cam_periph *periph; periph = (struct cam_periph *)dp->d_drv1; cam_periph_release(periph); } static void daoninvalidate(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; /* * De-register any async callbacks. */ xpt_register_async(0, daasync, periph, periph->path); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif /* * Return all queued I/O with ENXIO. * XXX Handle any transactions queued to the card * with XPT_ABORT_CCB. */ cam_iosched_flush(softc->cam_iosched, NULL, ENXIO); /* * Tell GEOM that we've gone away, we'll get a callback when it is * done cleaning up its resources. */ disk_gone(softc->disk); } static void dacleanup(struct cam_periph *periph) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; cam_periph_unlock(periph); cam_iosched_fini(softc->cam_iosched); /* * If we can't free the sysctl tree, oh well... */ if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) { #ifdef CAM_IO_STATS if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0) xpt_print(periph->path, "can't remove sysctl stats context\n"); #endif if (sysctl_ctx_free(&softc->sysctl_ctx) != 0) xpt_print(periph->path, "can't remove sysctl context\n"); } callout_drain(&softc->mediapoll_c); disk_destroy(softc->disk); callout_drain(&softc->sendordered_c); free(softc, M_DEVBUF); cam_periph_lock(periph); } static void daasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct cam_periph *periph; struct da_softc *softc; periph = (struct cam_periph *)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cam_status status; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) break; if (cgd->protocol != PROTO_SCSI) break; if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED) break; if (SID_TYPE(&cgd->inq_data) != T_DIRECT && SID_TYPE(&cgd->inq_data) != T_RBC - && SID_TYPE(&cgd->inq_data) != T_OPTICAL) + && SID_TYPE(&cgd->inq_data) != T_OPTICAL + && SID_TYPE(&cgd->inq_data) != T_ZBC_HM) break; /* * Allocate a peripheral instance for * this device and start the probe * process. */ status = cam_periph_alloc(daregister, daoninvalidate, dacleanup, dastart, "da", CAM_PERIPH_BIO, path, daasync, AC_FOUND_DEVICE, cgd); if (status != CAM_REQ_CMP && status != CAM_REQ_INPROG) printf("daasync: Unable to attach to new device " "due to status 0x%x\n", status); return; } case AC_ADVINFO_CHANGED: { uintptr_t buftype; buftype = (uintptr_t)arg; if (buftype == CDAI_TYPE_PHYS_PATH) { struct da_softc *softc; softc = periph->softc; disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); } break; } case AC_UNIT_ATTENTION: { union ccb *ccb; int error_code, sense_key, asc, ascq; softc = (struct da_softc *)periph->softc; ccb = (union ccb *)arg; /* * Handle all UNIT ATTENTIONs except our own, * as they will be handled by daerror(). */ if (xpt_path_periph(ccb->ccb_h.path) != periph && scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (asc == 0x2A && ascq == 0x09) { xpt_print(ccb->ccb_h.path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } else if (asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (asc == 0x3F && ascq == 0x03) { xpt_print(ccb->ccb_h.path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); } } cam_periph_async(periph, code, path, arg); break; } case AC_SCSI_AEN: softc = (struct da_softc *)periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* FALLTHROUGH */ case AC_SENT_BDR: case AC_BUS_RESET: { struct ccb_hdr *ccbh; softc = (struct da_softc *)periph->softc; /* * Don't fail on the expected unit attention * that will occur. */ softc->flags |= DA_FLAG_RETRY_UA; LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) ccbh->ccb_state |= DA_CCB_RETRY_UA; break; } case AC_INQ_CHANGED: softc = (struct da_softc *)periph->softc; softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); break; default: break; } cam_periph_async(periph, code, path, arg); } static void dasysctlinit(void *context, int pending) { struct cam_periph *periph; struct da_softc *softc; char tmpstr[80], tmpstr2[80]; struct ccb_trans_settings cts; periph = (struct cam_periph *)context; /* * periph was held for us when this task was enqueued */ if (periph->flags & CAM_PERIPH_INVALID) { cam_periph_release(periph); return; } softc = (struct da_softc *)periph->softc; snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); sysctl_ctx_init(&softc->sysctl_ctx); softc->flags |= DA_FLAG_SCTX_INIT; softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (softc->sysctl_tree == NULL) { printf("dasysctlinit: unable to allocate sysctl tree\n"); cam_periph_release(periph); return; } /* * Now register the sysctl handler, so the user can change the value on * the fly. */ SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0, dadeletemethodsysctl, "A", "BIO_DELETE execution method"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW, softc, 0, dadeletemaxsysctl, "Q", "Maximum BIO_DELETE size"); SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", "Minimum CDB size"); + SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), + OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD, + softc, 0, dazonemodesysctl, "A", + "Zone Mode"); + SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), + OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD, + softc, 0, dazonesupsysctl, "A", + "Zone Support"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones, + "Optimal Number of Open Sequential Write Preferred Zones"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "optimal_nonseq_zones", CTLFLAG_RD, + &softc->optimal_nonseq_zones, + "Optimal Number of Non-Sequentially Written Sequential Write " + "Preferred Zones"); + SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, + SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, + "max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones, + "Maximum Number of Open Sequential Write Required Zones"); + SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "error_inject", CTLFLAG_RW, &softc->error_inject, 0, "error_inject leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "unmapped_io", CTLFLAG_RD, &softc->unmappedio, 0, "Unmapped I/O leaf"); SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "rotating", CTLFLAG_RD, &softc->rotating, 0, "Rotating media"); /* * Add some addressing info. */ memset(&cts, 0, sizeof (cts)); xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; cts.type = CTS_TYPE_CURRENT_SETTINGS; cam_periph_lock(periph); xpt_action((union ccb *)&cts); cam_periph_unlock(periph); if (cts.ccb_h.status != CAM_REQ_CMP) { cam_periph_release(periph); return; } if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) { struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; if (fc->valid & CTS_FC_VALID_WWPN) { softc->wwpn = fc->wwpn; SYSCTL_ADD_UQUAD(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "wwpn", CTLFLAG_RD, &softc->wwpn, "World Wide Port Name"); } } #ifdef CAM_IO_STATS /* * Now add some useful stats. * XXX These should live in cam_periph and be common to all periphs */ softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "Statistics"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "errors", CTLFLAG_RD, &softc->errors, 0, "Transport errors reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "timeouts", CTLFLAG_RD, &softc->timeouts, 0, "Device timeouts reported by the SIM"); SYSCTL_ADD_INT(&softc->sysctl_stats_ctx, SYSCTL_CHILDREN(softc->sysctl_stats_tree), OID_AUTO, "pack_invalidations", CTLFLAG_RD, &softc->invalidations, 0, "Device pack invalidations"); #endif cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx, softc->sysctl_tree); cam_periph_release(periph); } static int dadeletemaxsysctl(SYSCTL_HANDLER_ARGS) { int error; uint64_t value; struct da_softc *softc; softc = (struct da_softc *)arg1; value = softc->disk->d_delmaxsize; error = sysctl_handle_64(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* only accept values smaller than the calculated value */ if (value > dadeletemaxsize(softc, softc->delete_method)) { return (EINVAL); } softc->disk->d_delmaxsize = value; return (0); } static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* * Acceptable values here are 6, 10, 12 or 16. */ if (value < 6) value = 6; else if ((value > 6) && (value <= 10)) value = 10; else if ((value > 10) && (value <= 12)) value = 12; else if (value > 12) value = 16; *(int *)arg1 = value; return (0); } static int dasysctlsofttimeout(SYSCTL_HANDLER_ARGS) { sbintime_t value; int error; value = da_default_softtimeout / SBT_1MS; error = sysctl_handle_int(oidp, (int *)&value, 0, req); if ((error != 0) || (req->newptr == NULL)) return (error); /* XXX Should clip this to a reasonable level */ if (value > da_default_timeout * 1000) return (EINVAL); da_default_softtimeout = value * SBT_1MS; return (0); } static void dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method) { softc->delete_method = delete_method; softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method); softc->delete_func = da_delete_functions[delete_method]; if (softc->delete_method > DA_DELETE_DISABLE) softc->disk->d_flags |= DISKFLAG_CANDELETE; else softc->disk->d_flags &= ~DISKFLAG_CANDELETE; } static off_t dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method) { off_t sectors; switch(delete_method) { case DA_DELETE_UNMAP: sectors = (off_t)softc->unmap_max_lba; break; case DA_DELETE_ATA_TRIM: sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges; break; case DA_DELETE_WS16: sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS); break; case DA_DELETE_ZERO: case DA_DELETE_WS10: sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS); break; default: return 0; } return (off_t)softc->params.secsize * omin(sectors, softc->params.sectors); } static void daprobedone(struct cam_periph *periph, union ccb *ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; dadeletemethodchoose(softc, DA_DELETE_NONE); if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) { char buf[80]; int i, sep; snprintf(buf, sizeof(buf), "Delete methods: <"); sep = 0; for (i = 0; i <= DA_DELETE_MAX; i++) { if ((softc->delete_available & (1 << i)) == 0 && i != softc->delete_method) continue; if (sep) strlcat(buf, ",", sizeof(buf)); strlcat(buf, da_delete_method_names[i], sizeof(buf)); if (i == softc->delete_method) strlcat(buf, "(*)", sizeof(buf)); sep = 1; } strlcat(buf, ">", sizeof(buf)); printf("%s%d: %s\n", periph->periph_name, periph->unit_number, buf); } /* * Since our peripheral may be invalidated by an error * above or an external event, we must release our CCB * before releasing the probe lock on the peripheral. * The peripheral will only go away once the last lock * is removed, and we need it around for the CCB release * operation. */ xpt_release_ccb(ccb); softc->state = DA_STATE_NORMAL; softc->flags |= DA_FLAG_PROBED; daschedule(periph); wakeup(&softc->disk->d_mediasize); if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) { softc->flags |= DA_FLAG_ANNOUNCED; cam_periph_unhold(periph); } else cam_periph_release_locked(periph); } static void dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method) { int i, methods; /* If available, prefer the method requested by user. */ i = softc->delete_method_pref; methods = softc->delete_available | (1 << DA_DELETE_DISABLE); if (methods & (1 << i)) { dadeletemethodset(softc, i); return; } /* Use the pre-defined order to choose the best performing delete. */ for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) { if (i == DA_DELETE_ZERO) continue; if (softc->delete_available & (1 << i)) { dadeletemethodset(softc, i); return; } } /* Fallback to default. */ dadeletemethodset(softc, default_method); } static int dadeletemethodsysctl(SYSCTL_HANDLER_ARGS) { char buf[16]; const char *p; struct da_softc *softc; int i, error, methods, value; softc = (struct da_softc *)arg1; value = softc->delete_method; if (value < 0 || value > DA_DELETE_MAX) p = "UNKNOWN"; else p = da_delete_method_names[value]; strncpy(buf, p, sizeof(buf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); methods = softc->delete_available | (1 << DA_DELETE_DISABLE); for (i = 0; i <= DA_DELETE_MAX; i++) { if (strcmp(buf, da_delete_method_names[i]) == 0) break; } if (i > DA_DELETE_MAX) return (EINVAL); softc->delete_method_pref = i; dadeletemethodchoose(softc, DA_DELETE_NONE); return (0); } +static int +dazonemodesysctl(SYSCTL_HANDLER_ARGS) +{ + char tmpbuf[40]; + struct da_softc *softc; + int error; + + softc = (struct da_softc *)arg1; + + switch (softc->zone_mode) { + case DA_ZONE_DRIVE_MANAGED: + snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed"); + break; + case DA_ZONE_HOST_AWARE: + snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware"); + break; + case DA_ZONE_HOST_MANAGED: + snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed"); + break; + case DA_ZONE_NONE: + default: + snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned"); + break; + } + + error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req); + + return (error); +} + +static int +dazonesupsysctl(SYSCTL_HANDLER_ARGS) +{ + char tmpbuf[180]; + struct da_softc *softc; + struct sbuf sb; + int error, first; + unsigned int i; + + softc = (struct da_softc *)arg1; + + error = 0; + first = 1; + sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0); + + for (i = 0; i < sizeof(da_zone_desc_table) / + sizeof(da_zone_desc_table[0]); i++) { + if (softc->zone_flags & da_zone_desc_table[i].value) { + if (first == 0) + sbuf_printf(&sb, ", "); + else + first = 0; + sbuf_cat(&sb, da_zone_desc_table[i].desc); + } + } + + if (first == 1) + sbuf_printf(&sb, "None"); + + sbuf_finish(&sb); + + error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); + + return (error); +} + static cam_status daregister(struct cam_periph *periph, void *arg) { struct da_softc *softc; struct ccb_pathinq cpi; struct ccb_getdev *cgd; char tmpstr[80]; caddr_t match; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { printf("daregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, M_NOWAIT|M_ZERO); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } if (cam_iosched_init(&softc->cam_iosched, periph) != 0) { printf("daregister: Unable to probe new device. " "Unable to allocate iosched memory\n"); return(CAM_REQ_CMP_ERR); } LIST_INIT(&softc->pending_ccbs); softc->state = DA_STATE_PROBE_RC; bioq_init(&softc->delete_run_queue); if (SID_IS_REMOVABLE(&cgd->inq_data)) softc->flags |= DA_FLAG_PACK_REMOVABLE; softc->unmap_max_ranges = UNMAP_MAX_RANGES; softc->unmap_max_lba = UNMAP_RANGE_MAX; softc->ws_max_blks = WS16_MAX_BLKS; softc->trim_max_ranges = ATA_TRIM_MAX_RANGES; softc->rotating = 1; periph->softc = softc; /* * See if this device has any quirks. */ match = cam_quirkmatch((caddr_t)&cgd->inq_data, (caddr_t)da_quirk_table, nitems(da_quirk_table), sizeof(*da_quirk_table), scsi_inquiry_match); if (match != NULL) softc->quirks = ((struct da_quirk_entry *)match)->quirks; else softc->quirks = DA_Q_NONE; /* Check if the SIM does not want 6 byte commands */ bzero(&cpi, sizeof(cpi)); xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cpi.ccb_h.func_code = XPT_PATH_INQ; xpt_action((union ccb *)&cpi); if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) softc->quirks |= DA_Q_NO_6_BYTE; + if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM) + softc->zone_mode = DA_ZONE_HOST_MANAGED; + else if (softc->quirks & DA_Q_SMR_DM) + softc->zone_mode = DA_ZONE_DRIVE_MANAGED; + else + softc->zone_mode = DA_ZONE_NONE; + + if (softc->zone_mode != DA_ZONE_NONE) { + if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { + if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) + softc->zone_interface = DA_ZONE_IF_ATA_SAT; + else + softc->zone_interface = DA_ZONE_IF_ATA_PASS; + } else + softc->zone_interface = DA_ZONE_IF_SCSI; + } + TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); /* * Take an exclusive refcount on the periph while dastart is called * to finish the probe. The reference will be dropped in dadone at * the end of probe. */ (void)cam_periph_hold(periph, PRIBIO); /* * Schedule a periodic event to occasionally send an * ordered tag to a device. */ callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0); callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); cam_periph_unlock(periph); /* * RBC devices don't have to support READ(6), only READ(10). */ if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) softc->minimum_cmd_size = 10; else softc->minimum_cmd_size = 6; /* * Load the user's default, if any. */ snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", periph->unit_number); TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); /* * 6, 10, 12 and 16 are the currently permissible values. */ if (softc->minimum_cmd_size < 6) softc->minimum_cmd_size = 6; else if ((softc->minimum_cmd_size > 6) && (softc->minimum_cmd_size <= 10)) softc->minimum_cmd_size = 10; else if ((softc->minimum_cmd_size > 10) && (softc->minimum_cmd_size <= 12)) softc->minimum_cmd_size = 12; else if (softc->minimum_cmd_size > 12) softc->minimum_cmd_size = 16; /* Predict whether device may support READ CAPACITY(16). */ if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 && (softc->quirks & DA_Q_NO_RC16) == 0) { softc->flags |= DA_FLAG_CAN_RC16; softc->state = DA_STATE_PROBE_RC16; } /* * Register this media as a disk. */ softc->disk = disk_alloc(); softc->disk->d_devstat = devstat_new_entry(periph->periph_name, periph->unit_number, 0, DEVSTAT_BS_UNAVAILABLE, SID_TYPE(&cgd->inq_data) | XPORT_DEVSTAT_TYPE(cpi.transport), DEVSTAT_PRIORITY_DISK); softc->disk->d_open = daopen; softc->disk->d_close = daclose; softc->disk->d_strategy = dastrategy; softc->disk->d_dump = dadump; softc->disk->d_getattr = dagetattr; softc->disk->d_gone = dadiskgonecb; softc->disk->d_name = "da"; softc->disk->d_drv1 = periph; if (cpi.maxio == 0) softc->maxio = DFLTPHYS; /* traditional default */ else if (cpi.maxio > MAXPHYS) softc->maxio = MAXPHYS; /* for safety */ else softc->maxio = cpi.maxio; softc->disk->d_maxsize = softc->maxio; softc->disk->d_unit = periph->unit_number; - softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION; + softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE; if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((cpi.hba_misc & PIM_UNMAPPED) != 0) { softc->unmappedio = 1; softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO; xpt_print(periph->path, "UNMAPPED\n"); } cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor, sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr)); strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr)); cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)], cgd->inq_data.product, sizeof(cgd->inq_data.product), sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr)); softc->disk->d_hba_vendor = cpi.hba_vendor; softc->disk->d_hba_device = cpi.hba_device; softc->disk->d_hba_subvendor = cpi.hba_subvendor; softc->disk->d_hba_subdevice = cpi.hba_subdevice; /* * Acquire a reference to the periph before we register with GEOM. * We'll release this reference once GEOM calls us back (via * dadiskgonecb()) telling us that our provider has been freed. */ if (cam_periph_acquire(periph) != CAM_REQ_CMP) { xpt_print(periph->path, "%s: lost periph during " "registration!\n", __func__); cam_periph_lock(periph); return (CAM_REQ_CMP_ERR); } disk_create(softc->disk, DISK_VERSION); cam_periph_lock(periph); /* * Add async callbacks for events of interest. * I don't bother checking if this fails as, * in most cases, the system will function just * fine without them and the only alternative * would be to not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE | AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION | AC_INQ_CHANGED, daasync, periph, periph->path); /* * Emit an attribute changed notification just in case * physical path information arrived before our async * event handler was registered, but after anyone attaching * to our disk device polled it. */ disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT); /* * Schedule a periodic media polling events. */ callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0); if ((softc->flags & DA_FLAG_PACK_REMOVABLE) && (cgd->inq_flags & SID_AEN) == 0 && da_poll_period != 0) callout_reset(&softc->mediapoll_c, da_poll_period * hz, damediapoll, periph); xpt_schedule(periph, CAM_PRIORITY_DEV); return(CAM_REQ_CMP); } +static int +da_zone_bio_to_scsi(int disk_zone_cmd) +{ + switch (disk_zone_cmd) { + case DISK_ZONE_OPEN: + return ZBC_OUT_SA_OPEN; + case DISK_ZONE_CLOSE: + return ZBC_OUT_SA_CLOSE; + case DISK_ZONE_FINISH: + return ZBC_OUT_SA_FINISH; + case DISK_ZONE_RWP: + return ZBC_OUT_SA_RWP; + } + + return -1; +} + +static int +da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp, + int *queue_ccb) +{ + struct da_softc *softc; + int error; + + error = 0; + + if (bp->bio_cmd != BIO_ZONE) { + error = EINVAL; + goto bailout; + } + + softc = periph->softc; + + switch (bp->bio_zone.zone_cmd) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: { + int zone_flags; + int zone_sa; + uint64_t lba; + + zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd); + if (zone_sa == -1) { + xpt_print(periph->path, "Cannot translate zone " + "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd); + error = EINVAL; + goto bailout; + } + + zone_flags = 0; + lba = bp->bio_zone.zone_params.rwp.id; + + if (bp->bio_zone.zone_params.rwp.flags & + DISK_ZONE_RWP_FLAG_ALL) + zone_flags |= ZBC_OUT_ALL; + + if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { + scsi_zbc_out(&ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*service_action*/ zone_sa, + /*zone_id*/ lba, + /*zone_flags*/ zone_flags, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + } else { + /* + * Note that in this case, even though we can + * technically use NCQ, we don't bother for several + * reasons: + * 1. It hasn't been tested on a SAT layer that + * supports it. This is new as of SAT-4. + * 2. Even when there is a SAT layer that supports + * it, that SAT layer will also probably support + * ZBC -> ZAC translation, since they are both + * in the SAT-4 spec. + * 3. Translation will likely be preferable to ATA + * passthrough. LSI / Avago at least single + * steps ATA passthrough commands in the HBA, + * regardless of protocol, so unless that + * changes, there is a performance penalty for + * doing ATA passthrough no matter whether + * you're using NCQ/FPDMA, DMA or PIO. + * 4. It requires a 32-byte CDB, which at least at + * this point in CAM requires a CDB pointer, which + * would require us to allocate an additional bit + * of storage separate from the CCB. + */ + error = scsi_ata_zac_mgmt_out(&ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*use_ncq*/ 0, + /*zm_action*/ zone_sa, + /*zone_id*/ lba, + /*zone_flags*/ zone_flags, + /*data_ptr*/ NULL, + /*dxfer_len*/ 0, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + if (error != 0) { + error = EINVAL; + xpt_print(periph->path, + "scsi_ata_zac_mgmt_out() returned an " + "error!"); + goto bailout; + } + } + *queue_ccb = 1; + + break; + } + case DISK_ZONE_REPORT_ZONES: { + uint8_t *rz_ptr; + uint32_t num_entries, alloc_size; + struct disk_zone_report *rep; + + rep = &bp->bio_zone.zone_params.report; + + num_entries = rep->entries_allocated; + if (num_entries == 0) { + xpt_print(periph->path, "No entries allocated for " + "Report Zones request\n"); + error = EINVAL; + goto bailout; + } + alloc_size = sizeof(struct scsi_report_zones_hdr) + + (sizeof(struct scsi_report_zones_desc) * num_entries); + alloc_size = min(alloc_size, softc->disk->d_maxsize); + rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO); + if (rz_ptr == NULL) { + xpt_print(periph->path, "Unable to allocate memory " + "for Report Zones request\n"); + error = ENOMEM; + goto bailout; + } + + if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) { + scsi_zbc_in(&ccb->csio, + /*retries*/ da_retry_count, + /*cbcfnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*service_action*/ ZBC_IN_SA_REPORT_ZONES, + /*zone_start_lba*/ rep->starting_id, + /*zone_options*/ rep->rep_options, + /*data_ptr*/ rz_ptr, + /*dxfer_len*/ alloc_size, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + } else { + /* + * Note that in this case, even though we can + * technically use NCQ, we don't bother for several + * reasons: + * 1. It hasn't been tested on a SAT layer that + * supports it. This is new as of SAT-4. + * 2. Even when there is a SAT layer that supports + * it, that SAT layer will also probably support + * ZBC -> ZAC translation, since they are both + * in the SAT-4 spec. + * 3. Translation will likely be preferable to ATA + * passthrough. LSI / Avago at least single + * steps ATA passthrough commands in the HBA, + * regardless of protocol, so unless that + * changes, there is a performance penalty for + * doing ATA passthrough no matter whether + * you're using NCQ/FPDMA, DMA or PIO. + * 4. It requires a 32-byte CDB, which at least at + * this point in CAM requires a CDB pointer, which + * would require us to allocate an additional bit + * of storage separate from the CCB. + */ + error = scsi_ata_zac_mgmt_in(&ccb->csio, + /*retries*/ da_retry_count, + /*cbcfnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*use_ncq*/ 0, + /*zm_action*/ ATA_ZM_REPORT_ZONES, + /*zone_id*/ rep->starting_id, + /*zone_flags*/ rep->rep_options, + /*data_ptr*/ rz_ptr, + /*dxfer_len*/ alloc_size, + /*cdb_storage*/ NULL, + /*cdb_storage_len*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + if (error != 0) { + error = EINVAL; + xpt_print(periph->path, + "scsi_ata_zac_mgmt_in() returned an " + "error!"); + goto bailout; + } + } + + /* + * For BIO_ZONE, this isn't normally needed. However, it + * is used by devstat_end_transaction_bio() to determine + * how much data was transferred. + */ + /* + * XXX KDM we have a problem. But I'm not sure how to fix + * it. devstat uses bio_bcount - bio_resid to calculate + * the amount of data transferred. The GEOM disk code + * uses bio_length - bio_resid to calculate the amount of + * data in bio_completed. We have different structure + * sizes above and below the ada(4) driver. So, if we + * use the sizes above, the amount transferred won't be + * quite accurate for devstat. If we use different sizes + * for bio_bcount and bio_length (above and below + * respectively), then the residual needs to match one or + * the other. Everything is calculated after the bio + * leaves the driver, so changing the values around isn't + * really an option. For now, just set the count to the + * passed in length. This means that the calculations + * above (e.g. bio_completed) will be correct, but the + * amount of data reported to devstat will be slightly + * under or overstated. + */ + bp->bio_bcount = bp->bio_length; + + *queue_ccb = 1; + + break; + } + case DISK_ZONE_GET_PARAMS: { + struct disk_zone_disk_params *params; + + params = &bp->bio_zone.zone_params.disk_params; + bzero(params, sizeof(*params)); + + switch (softc->zone_mode) { + case DA_ZONE_DRIVE_MANAGED: + params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED; + break; + case DA_ZONE_HOST_AWARE: + params->zone_mode = DISK_ZONE_MODE_HOST_AWARE; + break; + case DA_ZONE_HOST_MANAGED: + params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED; + break; + default: + case DA_ZONE_NONE: + params->zone_mode = DISK_ZONE_MODE_NONE; + break; + } + + if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ) + params->flags |= DISK_ZONE_DISK_URSWRZ; + + if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) { + params->optimal_seq_zones = softc->optimal_seq_zones; + params->flags |= DISK_ZONE_OPT_SEQ_SET; + } + + if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) { + params->optimal_nonseq_zones = + softc->optimal_nonseq_zones; + params->flags |= DISK_ZONE_OPT_NONSEQ_SET; + } + + if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) { + params->max_seq_zones = softc->max_seq_zones; + params->flags |= DISK_ZONE_MAX_SEQ_SET; + } + if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP) + params->flags |= DISK_ZONE_RZ_SUP; + + if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP) + params->flags |= DISK_ZONE_OPEN_SUP; + + if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP) + params->flags |= DISK_ZONE_CLOSE_SUP; + + if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP) + params->flags |= DISK_ZONE_FINISH_SUP; + + if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP) + params->flags |= DISK_ZONE_RWP_SUP; + break; + } + default: + break; + } +bailout: + return (error); +} + static void dastart(struct cam_periph *periph, union ccb *start_ccb) { struct da_softc *softc; softc = (struct da_softc *)periph->softc; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n")); skipstate: switch (softc->state) { case DA_STATE_NORMAL: { struct bio *bp; uint8_t tag_code; more: bp = cam_iosched_next_bio(softc->cam_iosched); if (bp == NULL) { if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); scsi_test_unit_ready(&start_ccb->csio, /*retries*/ da_retry_count, dadone, MSG_SIMPLE_Q_TAG, SSD_FULL_SIZE, da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_TUR; xpt_action(start_ccb); } else xpt_release_ccb(start_ccb); break; } if (bp->bio_cmd == BIO_DELETE) { if (softc->delete_func != NULL) { softc->delete_func(periph, start_ccb, bp); goto out; } else { /* Not sure this is possible, but failsafe by lying and saying "sure, done." */ biofinish(bp, NULL, 0); goto more; } } if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) { cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR); cam_periph_release_locked(periph); /* XXX is this still valid? I think so but unverified */ } if ((bp->bio_flags & BIO_ORDERED) != 0 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { softc->flags &= ~DA_FLAG_NEED_OTAG; softc->flags |= DA_FLAG_WAS_OTAG; tag_code = MSG_ORDERED_Q_TAG; } else { tag_code = MSG_SIMPLE_Q_TAG; } switch (bp->bio_cmd) { case BIO_WRITE: case BIO_READ: { void *data_ptr; int rw_op; if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; } else { rw_op = SCSI_RW_READ; } data_ptr = bp->bio_data; if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) { rw_op |= SCSI_RW_BIO; data_ptr = bp; } scsi_read_write(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/tag_code, rw_op, /*byte2*/0, softc->minimum_cmd_size, /*lba*/bp->bio_pblkno, /*block_count*/bp->bio_bcount / softc->params.secsize, data_ptr, /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); break; } case BIO_FLUSH: /* * BIO_FLUSH doesn't currently communicate * range data, so we synchronize the cache * over the whole disk. We also force * ordered tag semantics the flush applies * to all previously queued I/O. */ scsi_synchronize_cache(&start_ccb->csio, /*retries*/1, /*cbfcnp*/dadone, MSG_ORDERED_Q_TAG, /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE, da_default_timeout*1000); break; + case BIO_ZONE: { + int error, queue_ccb; + + queue_ccb = 0; + + error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb); + if ((error != 0) + || (queue_ccb == 0)) { + biofinish(bp, NULL, error); + xpt_release_ccb(start_ccb); + return; + } + break; } + } start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; start_ccb->ccb_h.flags |= CAM_UNLOCKED; start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout); out: LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, periph_links.le); /* We expect a unit attention from this device */ if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; softc->flags &= ~DA_FLAG_RETRY_UA; } start_ccb->ccb_h.ccb_bp = bp; softc->refcount++; cam_periph_unlock(periph); xpt_action(start_ccb); cam_periph_lock(periph); softc->refcount--; /* May have more work to do, so ensure we stay scheduled */ daschedule(periph); break; } case DA_STATE_PROBE_RC: { struct scsi_read_capacity_data *rcap; rcap = (struct scsi_read_capacity_data *) malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcap == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity(&start_ccb->csio, /*retries*/da_retry_count, dadone, MSG_SIMPLE_Q_TAG, rcap, SSD_FULL_SIZE, /*timeout*/5000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_RC16: { struct scsi_read_capacity_data_long *rcaplong; rcaplong = (struct scsi_read_capacity_data_long *) malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO); if (rcaplong == NULL) { printf("dastart: Couldn't malloc read_capacity data\n"); /* da_free_periph??? */ break; } scsi_read_capacity_16(&start_ccb->csio, /*retries*/ da_retry_count, /*cbfcnp*/ dadone, /*tag_action*/ MSG_SIMPLE_Q_TAG, /*lba*/ 0, /*reladr*/ 0, /*pmi*/ 0, /*rcap_buf*/ (uint8_t *)rcaplong, /*rcap_buf_len*/ sizeof(*rcaplong), /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16; xpt_action(start_ccb); break; } case DA_STATE_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; if (!scsi_vpd_supported_page(periph, SVPD_LBP)) { /* * If we get here we don't support any SBC-3 delete * methods with UNMAP as the Logical Block Provisioning * VPD page support is required for devices which * support it according to T10/1799-D Revision 31 * however older revisions of the spec don't mandate * this so we currently don't remove these methods * from the available set. */ softc->state = DA_STATE_PROBE_BLK_LIMITS; goto skipstate; } lbp = (struct scsi_vpd_logical_block_prov *) malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO); if (lbp == NULL) { printf("dastart: Couldn't malloc lbp data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)lbp, /*inq_len*/sizeof(*lbp), /*evpd*/TRUE, /*page_code*/SVPD_LBP, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) { /* Not supported skip to next probe */ softc->state = DA_STATE_PROBE_BDC; goto skipstate; } block_limits = (struct scsi_vpd_block_limits *) malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO); if (block_limits == NULL) { printf("dastart: Couldn't malloc block_limits data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)block_limits, /*inq_len*/sizeof(*block_limits), /*evpd*/TRUE, /*page_code*/SVPD_BLOCK_LIMITS, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS; xpt_action(start_ccb); break; } case DA_STATE_PROBE_BDC: { struct scsi_vpd_block_characteristics *bdc; if (!scsi_vpd_supported_page(periph, SVPD_BDC)) { softc->state = DA_STATE_PROBE_ATA; goto skipstate; } bdc = (struct scsi_vpd_block_characteristics *) malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); if (bdc == NULL) { printf("dastart: Couldn't malloc bdc data\n"); /* da_free_periph??? */ break; } scsi_inquiry(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*inq_buf*/(u_int8_t *)bdc, /*inq_len*/sizeof(*bdc), /*evpd*/TRUE, /*page_code*/SVPD_BDC, /*sense_len*/SSD_MIN_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC; xpt_action(start_ccb); break; } case DA_STATE_PROBE_ATA: { struct ata_params *ata_params; if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) { + if ((softc->zone_mode == DA_ZONE_HOST_AWARE) + || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { + /* + * Note that if the ATA VPD page isn't + * supported, we aren't talking to an ATA + * device anyway. Support for that VPD + * page is mandatory for SCSI to ATA (SAT) + * translation layers. + */ + softc->state = DA_STATE_PROBE_ZONE; + goto skipstate; + } daprobedone(periph, start_ccb); break; } ata_params = (struct ata_params*) - malloc(sizeof(*ata_params), M_SCSIDA, M_NOWAIT|M_ZERO); + malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO); if (ata_params == NULL) { - printf("dastart: Couldn't malloc ata_params data\n"); + xpt_print(periph->path, "Couldn't malloc ata_params " + "data\n"); /* da_free_periph??? */ break; } scsi_ata_identify(&start_ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*data_ptr*/(u_int8_t *)ata_params, /*dxfer_len*/sizeof(*ata_params), /*sense_len*/SSD_FULL_SIZE, /*timeout*/da_default_timeout * 1000); start_ccb->ccb_h.ccb_bp = NULL; start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA; xpt_action(start_ccb); break; } + case DA_STATE_PROBE_ATA_LOGDIR: + { + struct ata_gp_log_dir *log_dir; + int retval; + + retval = 0; + + if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) { + /* + * If we don't have log support, not much point in + * trying to probe zone support. + */ + daprobedone(periph, start_ccb); + break; + } + + /* + * If we have an ATA device (the SCSI ATA Information VPD + * page should be present and the ATA identify should have + * succeeded) and it supports logs, ask for the log directory. + */ + + log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO); + if (log_dir == NULL) { + xpt_print(periph->path, "Couldn't malloc log_dir " + "data\n"); + daprobedone(periph, start_ccb); + break; + } + + retval = scsi_ata_read_log(&start_ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*log_address*/ ATA_LOG_DIRECTORY, + /*page_number*/ 0, + /*block_count*/ 1, + /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? + AP_PROTO_DMA : AP_PROTO_PIO_IN, + /*data_ptr*/ (uint8_t *)log_dir, + /*dxfer_len*/ sizeof(*log_dir), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + + if (retval != 0) { + xpt_print(periph->path, "scsi_ata_read_log() failed!"); + free(log_dir, M_SCSIDA); + daprobedone(periph, start_ccb); + break; + } + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR; + xpt_action(start_ccb); + break; } + case DA_STATE_PROBE_ATA_IDDIR: + { + struct ata_identify_log_pages *id_dir; + int retval; + + retval = 0; + + /* + * Check here to see whether the Identify Device log is + * supported in the directory of logs. If so, continue + * with requesting the log of identify device pages. + */ + if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) { + daprobedone(periph, start_ccb); + break; + } + + id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO); + if (id_dir == NULL) { + xpt_print(periph->path, "Couldn't malloc id_dir " + "data\n"); + daprobedone(periph, start_ccb); + break; + } + + retval = scsi_ata_read_log(&start_ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_PAGE_LIST, + /*block_count*/ 1, + /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? + AP_PROTO_DMA : AP_PROTO_PIO_IN, + /*data_ptr*/ (uint8_t *)id_dir, + /*dxfer_len*/ sizeof(*id_dir), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + + if (retval != 0) { + xpt_print(periph->path, "scsi_ata_read_log() failed!"); + free(id_dir, M_SCSIDA); + daprobedone(periph, start_ccb); + break; + } + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR; + xpt_action(start_ccb); + break; + } + case DA_STATE_PROBE_ATA_SUP: + { + struct ata_identify_log_sup_cap *sup_cap; + int retval; + + retval = 0; + + /* + * Check here to see whether the Supported Capabilities log + * is in the list of Identify Device logs. + */ + if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) { + daprobedone(periph, start_ccb); + break; + } + + sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO); + if (sup_cap == NULL) { + xpt_print(periph->path, "Couldn't malloc sup_cap " + "data\n"); + daprobedone(periph, start_ccb); + break; + } + + retval = scsi_ata_read_log(&start_ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_SUP_CAP, + /*block_count*/ 1, + /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? + AP_PROTO_DMA : AP_PROTO_PIO_IN, + /*data_ptr*/ (uint8_t *)sup_cap, + /*dxfer_len*/ sizeof(*sup_cap), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + + if (retval != 0) { + xpt_print(periph->path, "scsi_ata_read_log() failed!"); + free(sup_cap, M_SCSIDA); + daprobedone(periph, start_ccb); + break; + + } + + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP; + xpt_action(start_ccb); + break; + } + case DA_STATE_PROBE_ATA_ZONE: + { + struct ata_zoned_info_log *ata_zone; + int retval; + + retval = 0; + + /* + * Check here to see whether the zoned device information + * page is supported. If so, continue on to request it. + * If not, skip to DA_STATE_PROBE_LOG or done. + */ + if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) { + daprobedone(periph, start_ccb); + break; + } + ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA, + M_NOWAIT|M_ZERO); + if (ata_zone == NULL) { + xpt_print(periph->path, "Couldn't malloc ata_zone " + "data\n"); + daprobedone(periph, start_ccb); + break; + } + + retval = scsi_ata_read_log(&start_ccb->csio, + /*retries*/ da_retry_count, + /*cbfcnp*/ dadone, + /*tag_action*/ MSG_SIMPLE_Q_TAG, + /*log_address*/ ATA_IDENTIFY_DATA_LOG, + /*page_number*/ ATA_IDL_ZDI, + /*block_count*/ 1, + /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ? + AP_PROTO_DMA : AP_PROTO_PIO_IN, + /*data_ptr*/ (uint8_t *)ata_zone, + /*dxfer_len*/ sizeof(*ata_zone), + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ da_default_timeout * 1000); + + if (retval != 0) { + xpt_print(periph->path, "scsi_ata_read_log() failed!"); + free(ata_zone, M_SCSIDA); + daprobedone(periph, start_ccb); + break; + } + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE; + xpt_action(start_ccb); + + break; + } + case DA_STATE_PROBE_ZONE: + { + struct scsi_vpd_zoned_bdc *bdc; + + /* + * Note that this page will be supported for SCSI protocol + * devices that support ZBC (SMR devices), as well as ATA + * protocol devices that are behind a SAT (SCSI to ATA + * Translation) layer that supports converting ZBC commands + * to their ZAC equivalents. + */ + if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) { + daprobedone(periph, start_ccb); + break; + } + bdc = (struct scsi_vpd_zoned_bdc *) + malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO); + + if (bdc == NULL) { + xpt_release_ccb(start_ccb); + xpt_print(periph->path, "Couldn't malloc zone VPD " + "data\n"); + break; + } + scsi_inquiry(&start_ccb->csio, + /*retries*/da_retry_count, + /*cbfcnp*/dadone, + /*tag_action*/MSG_SIMPLE_Q_TAG, + /*inq_buf*/(u_int8_t *)bdc, + /*inq_len*/sizeof(*bdc), + /*evpd*/TRUE, + /*page_code*/SVPD_ZONED_BDC, + /*sense_len*/SSD_FULL_SIZE, + /*timeout*/da_default_timeout * 1000); + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE; + xpt_action(start_ccb); + break; + } + } } /* * In each of the methods below, while its the caller's * responsibility to ensure the request will fit into a * single device request, we might have changed the delete * method due to the device incorrectly advertising either * its supported methods or limits. * * To prevent this causing further issues we validate the * against the methods limits, and warn which would * otherwise be unnecessary. */ static void da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc;; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lba, lastlba = (uint64_t)-1; uint64_t totalcount = 0; uint64_t count; uint32_t lastcount = 0, c; uint32_t off, ranges = 0; /* * Currently this doesn't take the UNMAP * Granularity and Granularity Alignment * fields into account. * * This could result in both unoptimal unmap * requests as as well as UNMAP calls unmapping * fewer LBA's than requested. */ bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { /* * Note: ada and da are different in how they store the * pending bp's in a trim. ada stores all of them in the * trim_req.bps. da stores all but the first one in the * delete_run_queue. ada then completes all the bps in * its adadone() loop. da completes all the bps in the * delete_run_queue in dadone, and relies on the biodone * after to complete. This should be reconciled since there's * no real reason to do it differently. XXX */ if (bp1 != bp) bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, UNMAP_RANGE_MAX - lastcount); lastcount += c; off = ((ranges - 1) * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; scsi_ulto4b(lastcount, &buf[off + 8]); count -= c; lba +=c; totalcount += c; } while (count > 0) { c = omin(count, UNMAP_RANGE_MAX); if (totalcount + c > softc->unmap_max_lba || ranges >= softc->unmap_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld" "|| %d >= %d", da_delete_method_desc[softc->delete_method], totalcount + c, softc->unmap_max_lba, ranges, softc->unmap_max_ranges); break; } off = (ranges * UNMAP_RANGE_SIZE) + UNMAP_HEAD_SIZE; scsi_u64to8b(lba, &buf[off + 0]); scsi_ulto4b(c, &buf[off + 8]); lba += c; totalcount += c; ranges++; count -= c; lastcount = c; } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (ranges >= softc->unmap_max_ranges || totalcount + bp1->bio_bcount / softc->params.secsize > softc->unmap_max_lba) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_ulto2b(ranges * 16 + 6, &buf[0]); scsi_ulto2b(ranges * 16, &buf[2]); scsi_unmap(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/0, /*data_ptr*/ buf, /*dxfer_len*/ ranges * 16 + 8, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static void da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc = (struct da_softc *)periph->softc; struct bio *bp1; uint8_t *buf = softc->unmap_buf; uint64_t lastlba = (uint64_t)-1; uint64_t count; uint64_t lba; uint32_t lastcount = 0, c, requestcount; int ranges = 0, off, block_count; bzero(softc->unmap_buf, sizeof(softc->unmap_buf)); bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); lba = bp1->bio_pblkno; count = bp1->bio_bcount / softc->params.secsize; requestcount = count; /* Try to extend the previous range. */ if (lba == lastlba) { c = omin(count, ATA_DSM_RANGE_MAX - lastcount); lastcount += c; off = (ranges - 1) * 8; buf[off + 6] = lastcount & 0xff; buf[off + 7] = (lastcount >> 8) & 0xff; count -= c; lba += c; } while (count > 0) { c = omin(count, ATA_DSM_RANGE_MAX); off = ranges * 8; buf[off + 0] = lba & 0xff; buf[off + 1] = (lba >> 8) & 0xff; buf[off + 2] = (lba >> 16) & 0xff; buf[off + 3] = (lba >> 24) & 0xff; buf[off + 4] = (lba >> 32) & 0xff; buf[off + 5] = (lba >> 40) & 0xff; buf[off + 6] = c & 0xff; buf[off + 7] = (c >> 8) & 0xff; lba += c; ranges++; count -= c; lastcount = c; if (count != 0 && ranges == softc->trim_max_ranges) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], requestcount, (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX); break; } } lastlba = lba; bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (bp1->bio_bcount / softc->params.secsize > (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); block_count = howmany(ranges, ATA_DSM_BLK_RANGES); scsi_ata_trim(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, block_count, /*data_ptr*/buf, /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } /* * We calculate ws_max_blks here based off d_delmaxsize instead * of using softc->ws_max_blks as it is absolute max for the * device not the protocol max which may well be lower. */ static void da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp) { struct da_softc *softc; struct bio *bp1; uint64_t ws_max_blks; uint64_t lba; uint64_t count; /* forward compat with WS32 */ softc = (struct da_softc *)periph->softc; ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize; lba = bp->bio_pblkno; count = 0; bp1 = bp; do { if (bp1 != bp)//XXX imp XXX bioq_insert_tail(&softc->delete_run_queue, bp1); count += bp1->bio_bcount / softc->params.secsize; if (count > ws_max_blks) { xpt_print(periph->path, "%s issuing short delete %ld > %ld\n", da_delete_method_desc[softc->delete_method], count, ws_max_blks); count = omin(count, ws_max_blks); break; } bp1 = cam_iosched_next_trim(softc->cam_iosched); if (bp1 == NULL) break; if (lba + count != bp1->bio_pblkno || count + bp1->bio_bcount / softc->params.secsize > ws_max_blks) { cam_iosched_put_back_trim(softc->cam_iosched, bp1); break; } } while (1); scsi_write_same(&ccb->csio, /*retries*/da_retry_count, /*cbfcnp*/dadone, /*tag_action*/MSG_SIMPLE_Q_TAG, /*byte2*/softc->delete_method == DA_DELETE_ZERO ? 0 : SWS_UNMAP, softc->delete_method == DA_DELETE_WS16 ? 16 : 10, /*lba*/lba, /*block_count*/count, /*data_ptr*/ __DECONST(void *, zero_region), /*dxfer_len*/ softc->params.secsize, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); ccb->ccb_h.ccb_state = DA_CCB_DELETE; ccb->ccb_h.flags |= CAM_UNLOCKED; cam_iosched_submit_trim(softc->cam_iosched); } static int cmd6workaround(union ccb *ccb) { struct scsi_rw_6 cmd6; struct scsi_rw_10 *cmd10; struct da_softc *softc; u_int8_t *cdb; struct bio *bp; int frozen; cdb = ccb->csio.cdb_io.cdb_bytes; softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) { da_delete_methods old_method = softc->delete_method; /* * Typically there are two reasons for failure here * 1. Delete method was detected as supported but isn't * 2. Delete failed due to invalid params e.g. too big * * While we will attempt to choose an alternative delete method * this may result in short deletes if the existing delete * requests from geom are big for the new method chosen. * * This method assumes that the error which triggered this * will not retry the io otherwise a panic will occur */ dadeleteflag(softc, old_method, 0); dadeletemethodchoose(softc, DA_DELETE_DISABLE); if (softc->delete_method == DA_DELETE_DISABLE) xpt_print(ccb->ccb_h.path, "%s failed, disabling BIO_DELETE\n", da_delete_method_desc[old_method]); else xpt_print(ccb->ccb_h.path, "%s failed, switching to %s BIO_DELETE\n", da_delete_method_desc[old_method], da_delete_method_desc[softc->delete_method]); while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL) cam_iosched_queue_work(softc->cam_iosched, bp); cam_iosched_queue_work(softc->cam_iosched, (struct bio *)ccb->ccb_h.ccb_bp); ccb->ccb_h.ccb_bp = NULL; return (0); } /* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == PREVENT_ALLOW) && (softc->quirks & DA_Q_NO_PREVENT) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "PREVENT ALLOW MEDIUM REMOVAL not supported.\n"); softc->quirks |= DA_Q_NO_PREVENT; return (0); } /* Detect unsupported SYNCHRONIZE CACHE(10). */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 && (*cdb == SYNCHRONIZE_CACHE) && (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { if (bootverbose) xpt_print(ccb->ccb_h.path, "SYNCHRONIZE CACHE(10) not supported.\n"); softc->quirks |= DA_Q_NO_SYNC_CACHE; softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE; return (0); } /* Translation only possible if CDB is an array and cmd is R/W6 */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || (*cdb != READ_6 && *cdb != WRITE_6)) return 0; xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, " "increasing minimum_cmd_size to 10.\n"); softc->minimum_cmd_size = 10; bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); cmd10 = (struct scsi_rw_10 *)cdb; cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; cmd10->byte2 = 0; scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); cmd10->reserved = 0; scsi_ulto2b(cmd6.length, cmd10->length); cmd10->control = cmd6.control; ccb->csio.cdb_len = sizeof(*cmd10); /* Requeue request, unfreezing queue if necessary */ frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; ccb->ccb_h.status = CAM_REQUEUE_REQ; xpt_action(ccb); if (frozen) { cam_release_devq(ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } return (ERESTART); } static void +dazonedone(struct cam_periph *periph, union ccb *ccb) +{ + struct da_softc *softc; + struct bio *bp; + + softc = periph->softc; + bp = (struct bio *)ccb->ccb_h.ccb_bp; + + switch (bp->bio_zone.zone_cmd) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: + break; + case DISK_ZONE_REPORT_ZONES: { + uint32_t avail_len; + struct disk_zone_report *rep; + struct scsi_report_zones_hdr *hdr; + struct scsi_report_zones_desc *desc; + struct disk_zone_rep_entry *entry; + uint32_t num_alloced, hdr_len, num_avail; + uint32_t num_to_fill, i; + int ata; + + rep = &bp->bio_zone.zone_params.report; + avail_len = ccb->csio.dxfer_len - ccb->csio.resid; + /* + * Note that bio_resid isn't normally used for zone + * commands, but it is used by devstat_end_transaction_bio() + * to determine how much data was transferred. Because + * the size of the SCSI/ATA data structures is different + * than the size of the BIO interface structures, the + * amount of data actually transferred from the drive will + * be different than the amount of data transferred to + * the user. + */ + bp->bio_resid = ccb->csio.resid; + num_alloced = rep->entries_allocated; + hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr; + if (avail_len < sizeof(*hdr)) { + /* + * Is there a better error than EIO here? We asked + * for at least the header, and we got less than + * that. + */ + bp->bio_error = EIO; + bp->bio_flags |= BIO_ERROR; + bp->bio_resid = bp->bio_bcount; + break; + } + + if (softc->zone_interface == DA_ZONE_IF_ATA_PASS) + ata = 1; + else + ata = 0; + + hdr_len = ata ? le32dec(hdr->length) : + scsi_4btoul(hdr->length); + if (hdr_len > 0) + rep->entries_available = hdr_len / sizeof(*desc); + else + rep->entries_available = 0; + /* + * NOTE: using the same values for the BIO version of the + * same field as the SCSI/ATA values. This means we could + * get some additional values that aren't defined in bio.h + * if more values of the same field are defined later. + */ + rep->header.same = hdr->byte4 & SRZ_SAME_MASK; + rep->header.maximum_lba = ata ? le64dec(hdr->maximum_lba) : + scsi_8btou64(hdr->maximum_lba); + /* + * If the drive reports no entries that match the query, + * we're done. + */ + if (hdr_len == 0) { + rep->entries_filled = 0; + break; + } + + num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc), + hdr_len / sizeof(*desc)); + /* + * If the drive didn't return any data, then we're done. + */ + if (num_avail == 0) { + rep->entries_filled = 0; + break; + } + + num_to_fill = min(num_avail, rep->entries_allocated); + /* + * If the user didn't allocate any entries for us to fill, + * we're done. + */ + if (num_to_fill == 0) { + rep->entries_filled = 0; + break; + } + + for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0]; + i < num_to_fill; i++, desc++, entry++) { + /* + * NOTE: we're mapping the values here directly + * from the SCSI/ATA bit definitions to the bio.h + * definitons. There is also a warning in + * disk_zone.h, but the impact is that if + * additional values are added in the SCSI/ATA + * specs these will be visible to consumers of + * this interface. + */ + entry->zone_type = desc->zone_type & SRZ_TYPE_MASK; + entry->zone_condition = + (desc->zone_flags & SRZ_ZONE_COND_MASK) >> + SRZ_ZONE_COND_SHIFT; + entry->zone_flags |= desc->zone_flags & + (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET); + entry->zone_length = + ata ? le64dec(desc->zone_length) : + scsi_8btou64(desc->zone_length); + entry->zone_start_lba = + ata ? le64dec(desc->zone_start_lba) : + scsi_8btou64(desc->zone_start_lba); + entry->write_pointer_lba = + ata ? le64dec(desc->write_pointer_lba) : + scsi_8btou64(desc->write_pointer_lba); + } + rep->entries_filled = num_to_fill; + break; + } + case DISK_ZONE_GET_PARAMS: + default: + /* + * In theory we should not get a GET_PARAMS bio, since it + * should be handled without queueing the command to the + * drive. + */ + panic("%s: Invalid zone command %d", __func__, + bp->bio_zone.zone_cmd); + break; + } + + if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) + free(ccb->csio.data_ptr, M_SCSIDA); +} + +static void dadone(struct cam_periph *periph, union ccb *done_ccb) { struct da_softc *softc; struct ccb_scsiio *csio; u_int32_t priority; da_ccb_state state; softc = (struct da_softc *)periph->softc; priority = done_ccb->ccb_h.pinfo.priority; CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: case DA_CCB_DELETE: { struct bio *bp, *bp1; cam_periph_lock(periph); bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { int error; int sf; if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) sf = SF_RETRY_UA; else sf = 0; error = daerror(done_ccb, CAM_RETRY_SELTO, sf); if (error == ERESTART) { /* * A retry was scheduled, so * just return. */ cam_periph_unlock(periph); return; } bp = (struct bio *)done_ccb->ccb_h.ccb_bp; if (error != 0) { int queued_error; /* * return all queued I/O with EIO, so that * the client can retry these I/Os in the * proper order should it attempt to recover. */ queued_error = EIO; if (error == ENXIO && (softc->flags & DA_FLAG_PACK_INVALID)== 0) { /* * Catastrophic error. Mark our pack as * invalid. */ /* * XXX See if this is really a media * XXX change first? */ xpt_print(periph->path, "Invalidating pack\n"); softc->flags |= DA_FLAG_PACK_INVALID; #ifdef CAM_IO_STATS softc->invalidations++; #endif queued_error = ENXIO; } cam_iosched_flush(softc->cam_iosched, NULL, queued_error); if (bp != NULL) { bp->bio_error = error; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; } } else if (bp != NULL) { if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; bp->bio_error = 0; if (bp->bio_resid != 0) bp->bio_flags |= BIO_ERROR; } if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } else if (bp != NULL) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) panic("REQ_CMP with QFRZN"); - if (state == DA_CCB_DELETE) + if (bp->bio_cmd == BIO_ZONE) + dazonedone(periph, done_ccb); + else if (state == DA_CCB_DELETE) bp->bio_resid = 0; else bp->bio_resid = csio->resid; - if (csio->resid > 0) + if ((csio->resid > 0) + && (bp->bio_cmd != BIO_ZONE)) bp->bio_flags |= BIO_ERROR; if (softc->error_inject != 0) { bp->bio_error = softc->error_inject; bp->bio_resid = bp->bio_bcount; bp->bio_flags |= BIO_ERROR; softc->error_inject = 0; } } LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb); xpt_release_ccb(done_ccb); if (state == DA_CCB_DELETE) { TAILQ_HEAD(, bio) queue; TAILQ_INIT(&queue); TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue); softc->delete_run_queue.insert_point = NULL; /* * Normally, the xpt_release_ccb() above would make sure * that when we have more work to do, that work would * get kicked off. However, we specifically keep * delete_running set to 0 before the call above to * allow other I/O to progress when many BIO_DELETE * requests are pushed down. We set delete_running to 0 * and call daschedule again so that we don't stall if * there are no other I/Os pending apart from BIO_DELETEs. */ cam_iosched_trim_done(softc->cam_iosched); daschedule(periph); cam_periph_unlock(periph); while ((bp1 = TAILQ_FIRST(&queue)) != NULL) { TAILQ_REMOVE(&queue, bp1, bio_queue); bp1->bio_error = bp->bio_error; if (bp->bio_flags & BIO_ERROR) { bp1->bio_flags |= BIO_ERROR; bp1->bio_resid = bp1->bio_bcount; } else bp1->bio_resid = 0; biodone(bp1); } } else { daschedule(periph); cam_periph_unlock(periph); } if (bp != NULL) biodone(bp); return; } case DA_CCB_PROBE_RC: case DA_CCB_PROBE_RC16: { struct scsi_read_capacity_data *rdcap; struct scsi_read_capacity_data_long *rcaplong; char announce_buf[80]; int lbp; lbp = 0; rdcap = NULL; rcaplong = NULL; if (state == DA_CCB_PROBE_RC) rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; else rcaplong = (struct scsi_read_capacity_data_long *) csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { struct disk_params *dp; uint32_t block_size; uint64_t maxsector; u_int lalba; /* Lowest aligned LBA. */ if (state == DA_CCB_PROBE_RC) { block_size = scsi_4btoul(rdcap->length); maxsector = scsi_4btoul(rdcap->addr); lalba = 0; /* * According to SBC-2, if the standard 10 * byte READ CAPACITY command returns 2^32, * we should issue the 16 byte version of * the command, since the device in question * has more sectors than can be represented * with the short version of the command. */ if (maxsector == 0xffffffff) { free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC16; xpt_schedule(periph, priority); return; } } else { block_size = scsi_4btoul(rcaplong->length); maxsector = scsi_8btou64(rcaplong->addr); lalba = scsi_2btoul(rcaplong->lalba_lbp); } /* * Because GEOM code just will panic us if we * give them an 'illegal' value we'll avoid that * here. */ if (block_size == 0) { block_size = 512; if (maxsector == 0) maxsector = -1; } if (block_size >= MAXPHYS) { xpt_print(periph->path, "unsupportable block size %ju\n", (uintmax_t) block_size); announce_buf[0] = '\0'; cam_periph_invalidate(periph); } else { /* * We pass rcaplong into dasetgeom(), * because it will only use it if it is * non-NULL. */ dasetgeom(periph, block_size, maxsector, rcaplong, sizeof(*rcaplong)); lbp = (lalba & SRC16_LBPME_A); dp = &softc->params; snprintf(announce_buf, sizeof(announce_buf), "%juMB (%ju %u byte sectors)", ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024), (uintmax_t)dp->sectors, dp->secsize); } } else { int error; announce_buf[0] = '\0'; /* * Retry any UNIT ATTENTION type errors. They * are expected at boot. */ error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) { /* * A retry was scheuled, so * just return. */ return; } else if (error != 0) { int asc, ascq; int sense_key, error_code; int have_sense; cam_status status; struct ccb_getdev cgd; /* Don't wedge this device's queue */ status = done_ccb->ccb_h.status; if ((status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, CAM_PRIORITY_NORMAL); cgd.ccb_h.func_code = XPT_GDEV_TYPE; xpt_action((union ccb *)&cgd); if (scsi_extract_sense_ccb(done_ccb, &error_code, &sense_key, &asc, &ascq)) have_sense = TRUE; else have_sense = FALSE; /* * If we tried READ CAPACITY(16) and failed, * fallback to READ CAPACITY(10). */ if ((state == DA_CCB_PROBE_RC16) && (softc->flags & DA_FLAG_CAN_RC16) && (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) || ((have_sense) && (error_code == SSD_CURRENT_ERROR) && (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) { softc->flags &= ~DA_FLAG_CAN_RC16; free(rdcap, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; } /* * Attach to anything that claims to be a * direct access or optical disk device, * as long as it doesn't return a "Logical * unit not supported" (0x25) error. */ if ((have_sense) && (asc != 0x25) && (error_code == SSD_CURRENT_ERROR)) { const char *sense_key_desc; const char *asc_desc; dasetgeom(periph, 512, -1, NULL, 0); scsi_sense_desc(sense_key, asc, ascq, &cgd.inq_data, &sense_key_desc, &asc_desc); snprintf(announce_buf, sizeof(announce_buf), "Attempt to query device " "size failed: %s, %s", sense_key_desc, asc_desc); } else { if (have_sense) scsi_sense_print( &done_ccb->csio); else { xpt_print(periph->path, "got CAM status %#x\n", done_ccb->ccb_h.status); } xpt_print(periph->path, "fatal error, " "failed to attach to device\n"); /* * Free up resources. */ cam_periph_invalidate(periph); } } } free(csio->data_ptr, M_SCSIDA); if (announce_buf[0] != '\0' && ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) { /* * Create our sysctl variables, now that we know * we have successfully attached. */ /* increase the refcount */ if (cam_periph_acquire(periph) == CAM_REQ_CMP) { taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task); xpt_announce_periph(periph, announce_buf); xpt_announce_quirks(periph, softc->quirks, DA_Q_BIT_STRING); } else { xpt_print(periph->path, "fatal error, " "could not acquire reference count\n"); } } /* We already probed the device. */ if (softc->flags & DA_FLAG_PROBED) { daprobedone(periph, done_ccb); return; } /* Ensure re-probe doesn't see old delete. */ softc->delete_available = 0; dadeleteflag(softc, DA_DELETE_ZERO, 1); if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) { /* * Based on older SBC-3 spec revisions * any of the UNMAP methods "may" be * available via LBP given this flag so * we flag all of them as available and * then remove those which further * probes confirm aren't available * later. * * We could also check readcap(16) p_type * flag to exclude one or more invalid * write same (X) types here */ dadeleteflag(softc, DA_DELETE_WS16, 1); dadeleteflag(softc, DA_DELETE_WS10, 1); dadeleteflag(softc, DA_DELETE_UNMAP, 1); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_LBP; xpt_schedule(periph, priority); return; } xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_LBP: { struct scsi_vpd_logical_block_prov *lbp; lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { /* * T10/1799-D Revision 31 states at least one of these * must be supported but we don't currently enforce this. */ dadeleteflag(softc, DA_DELETE_WS16, (lbp->flags & SVPD_LBP_WS16)); dadeleteflag(softc, DA_DELETE_WS10, (lbp->flags & SVPD_LBP_WS10)); dadeleteflag(softc, DA_DELETE_UNMAP, (lbp->flags & SVPD_LBP_UNMAP)); } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure indicates we don't support any SBC-3 * delete methods with UNMAP */ } } free(lbp, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BLK_LIMITS; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BLK_LIMITS: { struct scsi_vpd_block_limits *block_limits; block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint32_t max_txfer_len = scsi_4btoul( block_limits->max_txfer_len); uint32_t max_unmap_lba_cnt = scsi_4btoul( block_limits->max_unmap_lba_cnt); uint32_t max_unmap_blk_cnt = scsi_4btoul( block_limits->max_unmap_blk_cnt); uint64_t ws_max_blks = scsi_8btou64( block_limits->max_write_same_length); if (max_txfer_len != 0) { softc->disk->d_maxsize = MIN(softc->maxio, (off_t)max_txfer_len * softc->params.secsize); } /* * We should already support UNMAP but we check lba * and block count to be sure */ if (max_unmap_lba_cnt != 0x00L && max_unmap_blk_cnt != 0x00L) { softc->unmap_max_lba = max_unmap_lba_cnt; softc->unmap_max_ranges = min(max_unmap_blk_cnt, UNMAP_MAX_RANGES); } else { /* * Unexpected UNMAP limits which means the * device doesn't actually support UNMAP */ dadeleteflag(softc, DA_DELETE_UNMAP, 0); } if (ws_max_blks != 0x00L) softc->ws_max_blks = ws_max_blks; } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } /* * Failure here doesn't mean UNMAP is not * supported as this is an optional page. */ softc->unmap_max_lba = 1; softc->unmap_max_ranges = 1; } } free(block_limits, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_BDC; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_BDC: { - struct scsi_vpd_block_characteristics *bdc; + struct scsi_vpd_block_device_characteristics *bdc; - bdc = (struct scsi_vpd_block_characteristics *)csio->data_ptr; + bdc = (struct scsi_vpd_block_device_characteristics *) + csio->data_ptr; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + uint32_t valid_len; + /* * Disable queue sorting for non-rotational media * by default. */ u_int16_t old_rate = softc->disk->d_rotation_rate; - softc->disk->d_rotation_rate = - scsi_2btoul(bdc->medium_rotation_rate); - if (softc->disk->d_rotation_rate == - SVPD_BDC_RATE_NON_ROTATING) { - cam_iosched_set_sort_queue(softc->cam_iosched, 0); - softc->rotating = 0; + valid_len = csio->dxfer_len - csio->resid; + if (SBDC_IS_PRESENT(bdc, valid_len, + medium_rotation_rate)) { + softc->disk->d_rotation_rate = + scsi_2btoul(bdc->medium_rotation_rate); + if (softc->disk->d_rotation_rate == + SVPD_BDC_RATE_NON_ROTATING) { + cam_iosched_set_sort_queue( + softc->cam_iosched, 0); + softc->rotating = 0; + } + if (softc->disk->d_rotation_rate != old_rate) { + disk_attr_changed(softc->disk, + "GEOM::rotation_rate", M_NOWAIT); + } } - if (softc->disk->d_rotation_rate != old_rate) { - disk_attr_changed(softc->disk, - "GEOM::rotation_rate", M_NOWAIT); + if ((SBDC_IS_PRESENT(bdc, valid_len, flags)) + && (softc->zone_mode == DA_ZONE_NONE)) { + int ata_proto; + + if (scsi_vpd_supported_page(periph, + SVPD_ATA_INFORMATION)) + ata_proto = 1; + else + ata_proto = 0; + + /* + * The Zoned field will only be set for + * Drive Managed and Host Aware drives. If + * they are Host Managed, the device type + * in the standard INQUIRY data should be + * set to T_ZBC_HM (0x14). + */ + if ((bdc->flags & SVPD_ZBC_MASK) == + SVPD_HAW_ZBC) { + softc->zone_mode = DA_ZONE_HOST_AWARE; + softc->zone_interface = (ata_proto) ? + DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; + } else if ((bdc->flags & SVPD_ZBC_MASK) == + SVPD_DM_ZBC) { + softc->zone_mode =DA_ZONE_DRIVE_MANAGED; + softc->zone_interface = (ata_proto) ? + DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI; + } else if ((bdc->flags & SVPD_ZBC_MASK) != + SVPD_ZBC_NR) { + xpt_print(periph->path, "Unknown zoned " + "type %#x", + bdc->flags & SVPD_ZBC_MASK); + } } } else { int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(bdc, M_SCSIDA); xpt_release_ccb(done_ccb); softc->state = DA_STATE_PROBE_ATA; xpt_schedule(periph, priority); return; } case DA_CCB_PROBE_ATA: { int i; struct ata_params *ata_params; + int continue_probe; + int error; int16_t *ptr; ata_params = (struct ata_params *)csio->data_ptr; ptr = (uint16_t *)ata_params; + continue_probe = 0; + error = 0; if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { uint16_t old_rate; for (i = 0; i < sizeof(*ata_params) / 2; i++) ptr[i] = le16toh(ptr[i]); if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM && (softc->quirks & DA_Q_NO_UNMAP) == 0) { dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1); if (ata_params->max_dsm_blocks != 0) softc->trim_max_ranges = min( softc->trim_max_ranges, ata_params->max_dsm_blocks * ATA_DSM_BLK_RANGES); } /* * Disable queue sorting for non-rotational media * by default. */ old_rate = softc->disk->d_rotation_rate; softc->disk->d_rotation_rate = ata_params->media_rotation_rate; if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) { cam_iosched_set_sort_queue(softc->cam_iosched, 0); softc->rotating = 0; } if (softc->disk->d_rotation_rate != old_rate) { disk_attr_changed(softc->disk, "GEOM::rotation_rate", M_NOWAIT); } + + if (ata_params->capabilities1 & ATA_SUPPORT_DMA) + softc->flags |= DA_FLAG_CAN_ATA_DMA; + + if (ata_params->support.extension & + ATA_SUPPORT_GENLOG) + softc->flags |= DA_FLAG_CAN_ATA_LOG; + + /* + * At this point, if we have a SATA host aware drive, + * we communicate via ATA passthrough unless the + * SAT layer supports ZBC -> ZAC translation. In + * that case, + */ + /* + * XXX KDM figure out how to detect a host managed + * SATA drive. + */ + if (softc->zone_mode == DA_ZONE_NONE) { + /* + * Note that we don't override the zone + * mode or interface if it has already been + * set. This is because it has either been + * set as a quirk, or when we probed the + * SCSI Block Device Characteristics page, + * the zoned field was set. The latter + * means that the SAT layer supports ZBC to + * ZAC translation, and we would prefer to + * use that if it is available. + */ + if ((ata_params->support3 & + ATA_SUPPORT_ZONE_MASK) == + ATA_SUPPORT_ZONE_HOST_AWARE) { + softc->zone_mode = DA_ZONE_HOST_AWARE; + softc->zone_interface = + DA_ZONE_IF_ATA_PASS; + } else if ((ata_params->support3 & + ATA_SUPPORT_ZONE_MASK) == + ATA_SUPPORT_ZONE_DEV_MANAGED) { + softc->zone_mode =DA_ZONE_DRIVE_MANAGED; + softc->zone_interface = + DA_ZONE_IF_ATA_PASS; + } + } + } else { - int error; error = daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA|SF_NO_PRINT); if (error == ERESTART) return; else if (error != 0) { - if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { /* Don't wedge this device's queue */ cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } } } free(ata_params, M_SCSIDA); + if ((softc->zone_mode == DA_ZONE_HOST_AWARE) + || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) { + /* + * If the ATA IDENTIFY failed, we could be talking + * to a SCSI drive, although that seems unlikely, + * since the drive did report that it supported the + * ATA Information VPD page. If the ATA IDENTIFY + * succeeded, and the SAT layer doesn't support + * ZBC -> ZAC translation, continue on to get the + * directory of ATA logs, and complete the rest of + * the ZAC probe. If the SAT layer does support + * ZBC -> ZAC translation, we want to use that, + * and we'll probe the SCSI Zoned Block Device + * Characteristics VPD page next. + */ + if ((error == 0) + && (softc->flags & DA_FLAG_CAN_ATA_LOG) + && (softc->zone_interface == DA_ZONE_IF_ATA_PASS)) + softc->state = DA_STATE_PROBE_ATA_LOGDIR; + else + softc->state = DA_STATE_PROBE_ZONE; + continue_probe = 1; + } + if (continue_probe != 0) { + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + return; + } else + daprobedone(periph, done_ccb); + return; + } + case DA_CCB_PROBE_ATA_LOGDIR: + { + int error; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + error = 0; + softc->valid_logdir_len = 0; + bzero(&softc->ata_logdir, sizeof(softc->ata_logdir)); + softc->valid_logdir_len = + csio->dxfer_len - csio->resid; + if (softc->valid_logdir_len > 0) + bcopy(csio->data_ptr, &softc->ata_logdir, + min(softc->valid_logdir_len, + sizeof(softc->ata_logdir))); + /* + * Figure out whether the Identify Device log is + * supported. The General Purpose log directory + * has a header, and lists the number of pages + * available for each GP log identified by the + * offset into the list. + */ + if ((softc->valid_logdir_len >= + ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t))) + && (le16dec(softc->ata_logdir.header) == + ATA_GP_LOG_DIR_VERSION) + && (le16dec(&softc->ata_logdir.num_pages[ + (ATA_IDENTIFY_DATA_LOG * + sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){ + softc->flags |= DA_FLAG_CAN_ATA_IDLOG; + } else { + softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; + } + } else { + error = daerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA log directory, + * then ATA logs are effectively not + * supported even if the bit is set in the + * identify data. + */ + softc->flags &= ~(DA_FLAG_CAN_ATA_LOG | + DA_FLAG_CAN_ATA_IDLOG); + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + + free(csio->data_ptr, M_SCSIDA); + + if ((error == 0) + && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) { + softc->state = DA_STATE_PROBE_ATA_IDDIR; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + return; + } daprobedone(periph, done_ccb); return; } + case DA_CCB_PROBE_ATA_IDDIR: + { + int error; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + off_t entries_offset, max_entries; + error = 0; + + softc->valid_iddir_len = 0; + bzero(&softc->ata_iddir, sizeof(softc->ata_iddir)); + softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP | + DA_FLAG_CAN_ATA_ZONE); + softc->valid_iddir_len = + csio->dxfer_len - csio->resid; + if (softc->valid_iddir_len > 0) + bcopy(csio->data_ptr, &softc->ata_iddir, + min(softc->valid_iddir_len, + sizeof(softc->ata_iddir))); + + entries_offset = + __offsetof(struct ata_identify_log_pages,entries); + max_entries = softc->valid_iddir_len - entries_offset; + if ((softc->valid_iddir_len > (entries_offset + 1)) + && (le64dec(softc->ata_iddir.header) == + ATA_IDLOG_REVISION) + && (softc->ata_iddir.entry_count > 0)) { + int num_entries, i; + + num_entries = softc->ata_iddir.entry_count; + num_entries = min(num_entries, + softc->valid_iddir_len - entries_offset); + for (i = 0; i < num_entries && + i < max_entries; i++) { + if (softc->ata_iddir.entries[i] == + ATA_IDL_SUP_CAP) + softc->flags |= + DA_FLAG_CAN_ATA_SUPCAP; + else if (softc->ata_iddir.entries[i]== + ATA_IDL_ZDI) + softc->flags |= + DA_FLAG_CAN_ATA_ZONE; + + if ((softc->flags & + DA_FLAG_CAN_ATA_SUPCAP) + && (softc->flags & + DA_FLAG_CAN_ATA_ZONE)) + break; + } + } + } else { + error = daerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA Identify Data log + * directory, then it effectively isn't + * supported even if the ATA Log directory + * a non-zero number of pages present for + * this log. + */ + softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG; + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + + free(csio->data_ptr, M_SCSIDA); + + if ((error == 0) + && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) { + softc->state = DA_STATE_PROBE_ATA_SUP; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + return; + } + daprobedone(periph, done_ccb); + return; + } + case DA_CCB_PROBE_ATA_SUP: + { + int error; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + uint32_t valid_len; + size_t needed_size; + struct ata_identify_log_sup_cap *sup_cap; + error = 0; + + sup_cap = (struct ata_identify_log_sup_cap *) + csio->data_ptr; + valid_len = csio->dxfer_len - csio->resid; + needed_size = + __offsetof(struct ata_identify_log_sup_cap, + sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap); + if (valid_len >= needed_size) { + uint64_t zoned, zac_cap; + + zoned = le64dec(sup_cap->zoned_cap); + if (zoned & ATA_ZONED_VALID) { + /* + * This should have already been + * set, because this is also in the + * ATA identify data. + */ + if ((zoned & ATA_ZONED_MASK) == + ATA_SUPPORT_ZONE_HOST_AWARE) + softc->zone_mode = + DA_ZONE_HOST_AWARE; + else if ((zoned & ATA_ZONED_MASK) == + ATA_SUPPORT_ZONE_DEV_MANAGED) + softc->zone_mode = + DA_ZONE_DRIVE_MANAGED; + } + + zac_cap = le64dec(sup_cap->sup_zac_cap); + if (zac_cap & ATA_SUP_ZAC_CAP_VALID) { + if (zac_cap & ATA_REPORT_ZONES_SUP) + softc->zone_flags |= + DA_ZONE_FLAG_RZ_SUP; + if (zac_cap & ATA_ND_OPEN_ZONE_SUP) + softc->zone_flags |= + DA_ZONE_FLAG_OPEN_SUP; + if (zac_cap & ATA_ND_CLOSE_ZONE_SUP) + softc->zone_flags |= + DA_ZONE_FLAG_CLOSE_SUP; + if (zac_cap & ATA_ND_FINISH_ZONE_SUP) + softc->zone_flags |= + DA_ZONE_FLAG_FINISH_SUP; + if (zac_cap & ATA_ND_RWP_SUP) + softc->zone_flags |= + DA_ZONE_FLAG_RWP_SUP; + } else { + /* + * This field was introduced in + * ACS-4, r08 on April 28th, 2015. + * If the drive firmware was written + * to an earlier spec, it won't have + * the field. So, assume all + * commands are supported. + */ + softc->zone_flags |= + DA_ZONE_FLAG_SUP_MASK; + } + + } + } else { + error = daerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + /* + * If we can't get the ATA Identify Data + * Supported Capabilities page, clear the + * flag... + */ + softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP; + /* + * And clear zone capabilities. + */ + softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK; + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + + free(csio->data_ptr, M_SCSIDA); + + if ((error == 0) + && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) { + softc->state = DA_STATE_PROBE_ATA_ZONE; + xpt_release_ccb(done_ccb); + xpt_schedule(periph, priority); + return; + } + daprobedone(periph, done_ccb); + return; + } + case DA_CCB_PROBE_ATA_ZONE: + { + int error; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + struct ata_zoned_info_log *zi_log; + uint32_t valid_len; + size_t needed_size; + + zi_log = (struct ata_zoned_info_log *)csio->data_ptr; + + valid_len = csio->dxfer_len - csio->resid; + needed_size = __offsetof(struct ata_zoned_info_log, + version_info) + 1 + sizeof(zi_log->version_info); + if (valid_len >= needed_size) { + uint64_t tmpvar; + + tmpvar = le64dec(zi_log->zoned_cap); + if (tmpvar & ATA_ZDI_CAP_VALID) { + if (tmpvar & ATA_ZDI_CAP_URSWRZ) + softc->zone_flags |= + DA_ZONE_FLAG_URSWRZ; + else + softc->zone_flags &= + ~DA_ZONE_FLAG_URSWRZ; + } + tmpvar = le64dec(zi_log->optimal_seq_zones); + if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) { + softc->zone_flags |= + DA_ZONE_FLAG_OPT_SEQ_SET; + softc->optimal_seq_zones = (tmpvar & + ATA_ZDI_OPT_SEQ_MASK); + } else { + softc->zone_flags &= + ~DA_ZONE_FLAG_OPT_SEQ_SET; + softc->optimal_seq_zones = 0; + } + + tmpvar =le64dec(zi_log->optimal_nonseq_zones); + if (tmpvar & ATA_ZDI_OPT_NS_VALID) { + softc->zone_flags |= + DA_ZONE_FLAG_OPT_NONSEQ_SET; + softc->optimal_nonseq_zones = + (tmpvar & ATA_ZDI_OPT_NS_MASK); + } else { + softc->zone_flags &= + ~DA_ZONE_FLAG_OPT_NONSEQ_SET; + softc->optimal_nonseq_zones = 0; + } + + tmpvar = le64dec(zi_log->max_seq_req_zones); + if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) { + softc->zone_flags |= + DA_ZONE_FLAG_MAX_SEQ_SET; + softc->max_seq_zones = + (tmpvar & ATA_ZDI_MAX_SEQ_MASK); + } else { + softc->zone_flags &= + ~DA_ZONE_FLAG_MAX_SEQ_SET; + softc->max_seq_zones = 0; + } + } + } else { + error = daerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + softc->flags &= ~DA_FLAG_CAN_ATA_ZONE; + softc->flags &= ~DA_ZONE_FLAG_SET_MASK; + + if ((done_ccb->ccb_h.status & + CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + + } + free(csio->data_ptr, M_SCSIDA); + + daprobedone(periph, done_ccb); + return; + } + case DA_CCB_PROBE_ZONE: + { + int error; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + uint32_t valid_len; + size_t needed_len; + struct scsi_vpd_zoned_bdc *zoned_bdc; + + error = 0; + zoned_bdc = (struct scsi_vpd_zoned_bdc *) + csio->data_ptr; + valid_len = csio->dxfer_len - csio->resid; + needed_len = __offsetof(struct scsi_vpd_zoned_bdc, + max_seq_req_zones) + 1 + + sizeof(zoned_bdc->max_seq_req_zones); + if ((valid_len >= needed_len) + && (scsi_2btoul(zoned_bdc->page_length) >= + SVPD_ZBDC_PL)) { + if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ) + softc->zone_flags |= + DA_ZONE_FLAG_URSWRZ; + else + softc->zone_flags &= + ~DA_ZONE_FLAG_URSWRZ; + softc->optimal_seq_zones = + scsi_4btoul(zoned_bdc->optimal_seq_zones); + softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET; + softc->optimal_nonseq_zones = scsi_4btoul( + zoned_bdc->optimal_nonseq_zones); + softc->zone_flags |= + DA_ZONE_FLAG_OPT_NONSEQ_SET; + softc->max_seq_zones = + scsi_4btoul(zoned_bdc->max_seq_req_zones); + softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET; + } + /* + * All of the zone commands are mandatory for SCSI + * devices. + * + * XXX KDM this is valid as of September 2015. + * Re-check this assumption once the SAT spec is + * updated to support SCSI ZBC to ATA ZAC mapping. + * Since ATA allows zone commands to be reported + * as supported or not, this may not necessarily + * be true for an ATA device behind a SAT (SCSI to + * ATA Translation) layer. + */ + softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK; + } else { + error = daerror(done_ccb, CAM_RETRY_SELTO, + SF_RETRY_UA|SF_NO_PRINT); + if (error == ERESTART) + return; + else if (error != 0) { + if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } + } + } + daprobedone(periph, done_ccb); + return; + } case DA_CCB_DUMP: /* No-op. We're polling */ return; case DA_CCB_TUR: { if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { if (daerror(done_ccb, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART) return; if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(done_ccb->ccb_h.path, /*relsim_flags*/0, /*reduction*/0, /*timeout*/0, /*getcount_only*/0); } xpt_release_ccb(done_ccb); cam_periph_release_locked(periph); return; } default: break; } xpt_release_ccb(done_ccb); } static void dareprobe(struct cam_periph *periph) { struct da_softc *softc; cam_status status; softc = (struct da_softc *)periph->softc; /* Probe in progress; don't interfere. */ if (softc->state != DA_STATE_NORMAL) return; status = cam_periph_acquire(periph); KASSERT(status == CAM_REQ_CMP, ("dareprobe: cam_periph_acquire failed")); if (softc->flags & DA_FLAG_CAN_RC16) softc->state = DA_STATE_PROBE_RC16; else softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, CAM_PRIORITY_DEV); } static int daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) { struct da_softc *softc; struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; /* * Automatically detect devices that do not support * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. */ error = 0; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { error = cmd6workaround(ccb); } else if (scsi_extract_sense_ccb(ccb, &error_code, &sense_key, &asc, &ascq)) { if (sense_key == SSD_KEY_ILLEGAL_REQUEST) error = cmd6workaround(ccb); /* * If the target replied with CAPACITY DATA HAS CHANGED UA, * query the capacity and notify upper layers. */ else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x2A && ascq == 0x09) { xpt_print(periph->path, "Capacity data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x28 && ascq == 0x00) { softc->flags &= ~DA_FLAG_PROBED; disk_media_changed(softc->disk, M_NOWAIT); } else if (sense_key == SSD_KEY_UNIT_ATTENTION && asc == 0x3F && ascq == 0x03) { xpt_print(periph->path, "INQUIRY data has changed\n"); softc->flags &= ~DA_FLAG_PROBED; dareprobe(periph); sense_flags |= SF_NO_PRINT; } else if (sense_key == SSD_KEY_NOT_READY && asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) { softc->flags |= DA_FLAG_PACK_INVALID; disk_media_gone(softc->disk, M_NOWAIT); } } if (error == ERESTART) return (ERESTART); #ifdef CAM_IO_STATS switch (ccb->ccb_h.status & CAM_STATUS_MASK) { case CAM_CMD_TIMEOUT: softc->timeouts++; break; case CAM_REQ_ABORTED: case CAM_REQ_CMP_ERR: case CAM_REQ_TERMIO: case CAM_UNREC_HBA_ERROR: case CAM_DATA_RUN_ERR: softc->errors++; break; default: break; } #endif /* * XXX * Until we have a better way of doing pack validation, * don't treat UAs as errors. */ sense_flags |= SF_RETRY_UA; if (softc->quirks & DA_Q_RETRY_BUSY) sense_flags |= SF_RETRY_BUSY; return(cam_periph_error(ccb, cam_flags, sense_flags, &softc->saved_ccb)); } static void damediapoll(void *arg) { struct cam_periph *periph = arg; struct da_softc *softc = periph->softc; if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) && LIST_EMPTY(&softc->pending_ccbs)) { if (cam_periph_acquire(periph) == CAM_REQ_CMP) { cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR); daschedule(periph); } } /* Queue us up again */ if (da_poll_period != 0) callout_schedule(&softc->mediapoll_c, da_poll_period * hz); } static void daprevent(struct cam_periph *periph, int action) { struct da_softc *softc; union ccb *ccb; int error; softc = (struct da_softc *)periph->softc; if (((action == PR_ALLOW) && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) || ((action == PR_PREVENT) && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { return; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_prevent(&ccb->csio, /*retries*/1, /*cbcfp*/dadone, MSG_SIMPLE_Q_TAG, action, SSD_FULL_SIZE, 5000); error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO, SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat); if (error == 0) { if (action == PR_ALLOW) softc->flags &= ~DA_FLAG_PACK_LOCKED; else softc->flags |= DA_FLAG_PACK_LOCKED; } xpt_release_ccb(ccb); } static void dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector, struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len) { struct ccb_calc_geometry ccg; struct da_softc *softc; struct disk_params *dp; u_int lbppbe, lalba; int error; softc = (struct da_softc *)periph->softc; dp = &softc->params; dp->secsize = block_len; dp->sectors = maxsector + 1; if (rcaplong != NULL) { lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE; lalba = scsi_2btoul(rcaplong->lalba_lbp); lalba &= SRC16_LALBA_A; } else { lbppbe = 0; lalba = 0; } if (lbppbe > 0) { dp->stripesize = block_len << lbppbe; dp->stripeoffset = (dp->stripesize - block_len * lalba) % dp->stripesize; } else if (softc->quirks & DA_Q_4K) { dp->stripesize = 4096; dp->stripeoffset = 0; } else { dp->stripesize = 0; dp->stripeoffset = 0; } /* * Have the controller provide us with a geometry * for this disk. The only time the geometry * matters is when we boot and the controller * is the only one knowledgeable enough to come * up with something that will make this a bootable * device. */ xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL); ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; ccg.block_size = dp->secsize; ccg.volume_size = dp->sectors; ccg.heads = 0; ccg.secs_per_track = 0; ccg.cylinders = 0; xpt_action((union ccb*)&ccg); if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { /* * We don't know what went wrong here- but just pick * a geometry so we don't have nasty things like divide * by zero. */ dp->heads = 255; dp->secs_per_track = 255; dp->cylinders = dp->sectors / (255 * 255); if (dp->cylinders == 0) { dp->cylinders = 1; } } else { dp->heads = ccg.heads; dp->secs_per_track = ccg.secs_per_track; dp->cylinders = ccg.cylinders; } /* * If the user supplied a read capacity buffer, and if it is * different than the previous buffer, update the data in the EDT. * If it's the same, we don't bother. This avoids sending an * update every time someone opens this device. */ if ((rcaplong != NULL) && (bcmp(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)) != 0)) { struct ccb_dev_advinfo cdai; xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL); cdai.ccb_h.func_code = XPT_DEV_ADVINFO; cdai.buftype = CDAI_TYPE_RCAPLONG; cdai.flags = CDAI_FLAG_STORE; cdai.bufsiz = rcap_len; cdai.buf = (uint8_t *)rcaplong; xpt_action((union ccb *)&cdai); if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); if (cdai.ccb_h.status != CAM_REQ_CMP) { xpt_print(periph->path, "%s: failed to set read " "capacity advinfo\n", __func__); /* Use cam_error_print() to decode the status */ cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS, CAM_EPF_ALL); } else { bcopy(rcaplong, &softc->rcaplong, min(sizeof(softc->rcaplong), rcap_len)); } } softc->disk->d_sectorsize = softc->params.secsize; softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; softc->disk->d_stripesize = softc->params.stripesize; softc->disk->d_stripeoffset = softc->params.stripeoffset; /* XXX: these are not actually "firmware" values, so they may be wrong */ softc->disk->d_fwsectors = softc->params.secs_per_track; softc->disk->d_fwheads = softc->params.heads; softc->disk->d_devstat->block_size = softc->params.secsize; softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; error = disk_resize(softc->disk, M_NOWAIT); if (error != 0) xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error); } static void dasendorderedtag(void *arg) { struct da_softc *softc = arg; if (da_send_ordered) { if (!LIST_EMPTY(&softc->pending_ccbs)) { if ((softc->flags & DA_FLAG_WAS_OTAG) == 0) softc->flags |= DA_FLAG_NEED_OTAG; softc->flags &= ~DA_FLAG_WAS_OTAG; } } /* Queue us up again */ callout_reset(&softc->sendordered_c, (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL, dasendorderedtag, softc); } /* * Step through all DA peripheral drivers, and if the device is still open, * sync the disk cache to physical media. */ static void dashutdown(void * arg, int howto) { struct cam_periph *periph; struct da_softc *softc; union ccb *ccb; int error; CAM_PERIPH_FOREACH(periph, &dadriver) { softc = (struct da_softc *)periph->softc; if (SCHEDULER_STOPPED()) { /* If we paniced with the lock held, do not recurse. */ if (!cam_periph_owned(periph) && (softc->flags & DA_FLAG_OPEN)) { dadump(softc->disk, NULL, 0, 0, 0); } continue; } cam_periph_lock(periph); /* * We only sync the cache if the drive is still open, and * if the drive is capable of it.. */ if (((softc->flags & DA_FLAG_OPEN) == 0) || (softc->quirks & DA_Q_NO_SYNC_CACHE)) { cam_periph_unlock(periph); continue; } ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); scsi_synchronize_cache(&ccb->csio, /*retries*/0, /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG, /*begin_lba*/0, /* whole disk */ /*lb_count*/0, SSD_FULL_SIZE, 60 * 60 * 1000); error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, softc->disk->d_devstat); if (error != 0) xpt_print(periph->path, "Synchronize cache failed\n"); xpt_release_ccb(ccb); cam_periph_unlock(periph); } } #else /* !_KERNEL */ /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_format_unit *scsi_cmd; scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = FORMAT_UNIT; scsi_cmd->byte2 = byte2; scsi_ulto2b(ileave, scsi_cmd->interleave); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout) { uint8_t cdb_len; /* * These conditions allow using the 10 byte command. Otherwise we * need to use the 12 byte command. */ if ((minimum_cmd_size <= 10) && (addr_desc_index == 0) && (dxfer_len <= SRDD10_MAX_LENGTH)) { struct scsi_read_defect_data_10 *cdb10; cdb10 = (struct scsi_read_defect_data_10 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb10); bzero(cdb10, cdb_len); cdb10->opcode = READ_DEFECT_DATA_10; cdb10->format = list_format; scsi_ulto2b(dxfer_len, cdb10->alloc_length); } else { struct scsi_read_defect_data_12 *cdb12; cdb12 = (struct scsi_read_defect_data_12 *) &csio->cdb_io.cdb_bytes; cdb_len = sizeof(*cdb12); bzero(cdb12, cdb_len); cdb12->opcode = READ_DEFECT_DATA_12; cdb12->format = list_format; scsi_ulto4b(dxfer_len, cdb12->alloc_length); scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index); } cam_fill_csio(csio, retries, cbfcnp, /*flags*/ CAM_DIR_IN, tag_action, data_ptr, dxfer_len, sense_len, cdb_len, timeout); } void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) { struct scsi_sanitize *scsi_cmd; scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes; scsi_cmd->opcode = SANITIZE; scsi_cmd->byte2 = byte2; scsi_cmd->control = control; scsi_ulto2b(dxfer_len, scsi_cmd->length); cam_fill_csio(csio, retries, cbfcnp, /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, tag_action, data_ptr, dxfer_len, sense_len, sizeof(*scsi_cmd), timeout); } #endif /* _KERNEL */ + +void +scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint8_t service_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t sense_len, uint32_t timeout) +{ + struct scsi_zbc_out *scsi_cmd; + + scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes; + scsi_cmd->opcode = ZBC_OUT; + scsi_cmd->service_action = service_action; + scsi_u64to8b(zone_id, scsi_cmd->zone_id); + scsi_cmd->zone_flags = zone_flags; + + cam_fill_csio(csio, + retries, + cbfcnp, + /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, + tag_action, + data_ptr, + dxfer_len, + sense_len, + sizeof(*scsi_cmd), + timeout); +} + +void +scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba, + uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t sense_len, uint32_t timeout) +{ + struct scsi_zbc_in *scsi_cmd; + + scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes; + scsi_cmd->opcode = ZBC_IN; + scsi_cmd->service_action = service_action; + scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba); + scsi_cmd->zone_options = zone_options; + + cam_fill_csio(csio, + retries, + cbfcnp, + /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE, + tag_action, + data_ptr, + dxfer_len, + sense_len, + sizeof(*scsi_cmd), + timeout); + +} + +int +scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, int use_ncq, + uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, + uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t *cdb_storage, size_t cdb_storage_len, + uint8_t sense_len, uint32_t timeout) +{ + uint8_t command_out, protocol, ata_flags; + uint16_t features_out; + uint32_t sectors_out, auxiliary; + int retval; + + retval = 0; + + if (use_ncq == 0) { + command_out = ATA_ZAC_MANAGEMENT_OUT; + features_out = (zm_action & 0xf) | (zone_flags << 8), + ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; + if (dxfer_len == 0) { + protocol = AP_PROTO_NON_DATA; + ata_flags |= AP_FLAG_TLEN_NO_DATA; + sectors_out = 0; + } else { + protocol = AP_PROTO_DMA; + ata_flags |= AP_FLAG_TLEN_SECT_CNT | + AP_FLAG_TDIR_TO_DEV; + sectors_out = ((dxfer_len >> 9) & 0xffff); + } + auxiliary = 0; + } else { + ata_flags = AP_FLAG_BYT_BLOK_BLOCKS; + if (dxfer_len == 0) { + command_out = ATA_NCQ_NON_DATA; + features_out = ATA_NCQ_ZAC_MGMT_OUT; + /* + * We're assuming the SCSI to ATA translation layer + * will set the NCQ tag number in the tag field. + * That isn't clear from the SAT-4 spec (as of rev 05). + */ + sectors_out = 0; + ata_flags |= AP_FLAG_TLEN_NO_DATA; + } else { + command_out = ATA_SEND_FPDMA_QUEUED; + /* + * Note that we're defaulting to normal priority, + * and assuming that the SCSI to ATA translation + * layer will insert the NCQ tag number in the tag + * field. That isn't clear in the SAT-4 spec (as + * of rev 05). + */ + sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8; + + ata_flags |= AP_FLAG_TLEN_FEAT | + AP_FLAG_TDIR_TO_DEV; + + /* + * For SEND FPDMA QUEUED, the transfer length is + * encoded in the FEATURE register, and 0 means + * that 65536 512 byte blocks are to be tranferred. + * In practice, it seems unlikely that we'll see + * a transfer that large, and it may confuse the + * the SAT layer, because generally that means that + * 0 bytes should be transferred. + */ + if (dxfer_len == (65536 * 512)) { + features_out = 0; + } else if (dxfer_len <= (65535 * 512)) { + features_out = ((dxfer_len >> 9) & 0xffff); + } else { + /* The transfer is too big. */ + retval = 1; + goto bailout; + } + + } + + auxiliary = (zm_action & 0xf) | (zone_flags << 8); + protocol = AP_PROTO_FPDMA; + } + + protocol |= AP_EXTEND; + + retval = scsi_ata_pass(csio, + retries, + cbfcnp, + /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, + tag_action, + /*protocol*/ protocol, + /*ata_flags*/ ata_flags, + /*features*/ features_out, + /*sector_count*/ sectors_out, + /*lba*/ zone_id, + /*command*/ command_out, + /*device*/ 0, + /*icc*/ 0, + /*auxiliary*/ auxiliary, + /*control*/ 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ dxfer_len, + /*cdb_storage*/ cdb_storage, + /*cdb_storage_len*/ cdb_storage_len, + /*minimum_cmd_size*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout); + +bailout: + + return (retval); +} + +int +scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, int use_ncq, + uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags, + uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t *cdb_storage, size_t cdb_storage_len, + uint8_t sense_len, uint32_t timeout) +{ + uint8_t command_out, protocol; + uint16_t features_out, sectors_out; + uint32_t auxiliary; + int ata_flags; + int retval; + + retval = 0; + ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS; + + if (use_ncq == 0) { + command_out = ATA_ZAC_MANAGEMENT_IN; + /* XXX KDM put a macro here */ + features_out = (zm_action & 0xf) | (zone_flags << 8), + sectors_out = dxfer_len >> 9, /* XXX KDM macro*/ + protocol = AP_PROTO_DMA; + ata_flags |= AP_FLAG_TLEN_SECT_CNT; + auxiliary = 0; + } else { + ata_flags |= AP_FLAG_TLEN_FEAT; + + command_out = ATA_RECV_FPDMA_QUEUED; + sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8; + + /* + * For RECEIVE FPDMA QUEUED, the transfer length is + * encoded in the FEATURE register, and 0 means + * that 65536 512 byte blocks are to be tranferred. + * In practice, it seems unlikely that we'll see + * a transfer that large, and it may confuse the + * the SAT layer, because generally that means that + * 0 bytes should be transferred. + */ + if (dxfer_len == (65536 * 512)) { + features_out = 0; + } else if (dxfer_len <= (65535 * 512)) { + features_out = ((dxfer_len >> 9) & 0xffff); + } else { + /* The transfer is too big. */ + retval = 1; + goto bailout; + } + auxiliary = (zm_action & 0xf) | (zone_flags << 8), + protocol = AP_PROTO_FPDMA; + } + + protocol |= AP_EXTEND; + + retval = scsi_ata_pass(csio, + retries, + cbfcnp, + /*flags*/ CAM_DIR_IN, + tag_action, + /*protocol*/ protocol, + /*ata_flags*/ ata_flags, + /*features*/ features_out, + /*sector_count*/ sectors_out, + /*lba*/ zone_id, + /*command*/ command_out, + /*device*/ 0, + /*icc*/ 0, + /*auxiliary*/ auxiliary, + /*control*/ 0, + /*data_ptr*/ data_ptr, + /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */ + /*cdb_storage*/ cdb_storage, + /*cdb_storage_len*/ cdb_storage_len, + /*minimum_cmd_size*/ 0, + /*sense_len*/ SSD_FULL_SIZE, + /*timeout*/ timeout); + +bailout: + return (retval); +} Index: head/sys/cam/scsi/scsi_da.h =================================================================== --- head/sys/cam/scsi/scsi_da.h (revision 300206) +++ head/sys/cam/scsi/scsi_da.h (revision 300207) @@ -1,586 +1,698 @@ /* * Structures and definitions for SCSI commands to Direct Access Devices */ /*- * Some lines of this file come from a file of the name "scsi.h" * distributed by OSF as part of mach2.5, * so the following disclaimer has been kept. * * Copyright 1990 by Open Software Foundation, * Grenoble, FRANCE * * All Rights Reserved * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby granted, * provided that the above copyright notice appears in all copies and * that both the copyright notice and this permission notice appear in * supporting documentation, and that the name of OSF or Open Software * Foundation not be used in advertising or publicity pertaining to * distribution of the software without specific, written prior * permission. * * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /*- * Largely written by Julian Elischer (julian@tfs.com) * for TRW Financial Systems. * * TRW Financial Systems, in accordance with their agreement with Carnegie * Mellon University, makes this software available to CMU to distribute * or use in any manner that they see fit as long as this message is kept with * the software. For this reason TFS also grants any other persons or * organisations permission to use or modify this software. * * TFS supplies this software to be publicly redistributed * on the understanding that TFS is not responsible for the correct * functioning of this software in any circumstances. * * Ported to run under 386BSD by Julian Elischer (julian@tfs.com) Sept 1992 * * $FreeBSD$ */ #ifndef _SCSI_SCSI_DA_H #define _SCSI_SCSI_DA_H 1 #include struct scsi_rezero_unit { u_int8_t opcode; #define SRZU_LUN_MASK 0xE0 u_int8_t byte2; u_int8_t reserved[3]; u_int8_t control; }; /* * NOTE: The lower three bits of byte2 of the format CDB are the same as * the lower three bits of byte2 of the read defect data CDB, below. */ struct scsi_format_unit { u_int8_t opcode; u_int8_t byte2; #define FU_FORMAT_MASK SRDD10_DLIST_FORMAT_MASK #define FU_BLOCK_FORMAT SRDD10_BLOCK_FORMAT #define FU_BFI_FORMAT SRDD10_BYTES_FROM_INDEX_FORMAT #define FU_PHYS_FORMAT SRDD10_PHYSICAL_SECTOR_FORMAT #define FU_CMPLST 0x08 #define FU_FMT_DATA 0x10 u_int8_t vendor_specific; u_int8_t interleave[2]; u_int8_t control; }; struct scsi_reassign_blocks { u_int8_t opcode; u_int8_t byte2; u_int8_t unused[3]; u_int8_t control; }; struct scsi_read_defect_data_10 { uint8_t opcode; uint8_t byte2; #define SRDD10_GLIST 0x08 #define SRDD10_PLIST 0x10 #define SRDD10_DLIST_FORMAT_MASK 0x07 #define SRDD10_BLOCK_FORMAT 0x00 #define SRDD10_EXT_BFI_FORMAT 0x01 #define SRDD10_EXT_PHYS_FORMAT 0x02 #define SRDD10_LONG_BLOCK_FORMAT 0x03 #define SRDD10_BYTES_FROM_INDEX_FORMAT 0x04 #define SRDD10_PHYSICAL_SECTOR_FORMAT 0x05 #define SRDD10_VENDOR_FORMAT 0x06 uint8_t format; uint8_t reserved[4]; uint8_t alloc_length[2]; #define SRDD10_MAX_LENGTH 0xffff uint8_t control; }; struct scsi_sanitize { u_int8_t opcode; u_int8_t byte2; #define SSZ_SERVICE_ACTION_OVERWRITE 0x01 #define SSZ_SERVICE_ACTION_BLOCK_ERASE 0x02 #define SSZ_SERVICE_ACTION_CRYPTO_ERASE 0x03 #define SSZ_SERVICE_ACTION_EXIT_MODE_FAILURE 0x1F #define SSZ_UNRESTRICTED_EXIT 0x20 #define SSZ_IMMED 0x80 u_int8_t reserved[5]; u_int8_t length[2]; u_int8_t control; }; struct scsi_sanitize_parameter_list { u_int8_t byte1; #define SSZPL_INVERT 0x80 u_int8_t reserved; u_int8_t length[2]; /* Variable length initialization pattern. */ #define SSZPL_MAX_PATTERN_LENGTH 65535 }; struct scsi_read_defect_data_12 { uint8_t opcode; #define SRDD12_GLIST 0x08 #define SRDD12_PLIST 0x10 #define SRDD12_DLIST_FORMAT_MASK 0x07 #define SRDD12_BLOCK_FORMAT SRDD10_BLOCK_FORMAT #define SRDD12_BYTES_FROM_INDEX_FORMAT SRDD10_BYTES_FROM_INDEX_FORMAT #define SRDD12_PHYSICAL_SECTOR_FORMAT SRDD10_PHYSICAL_SECTOR_FORMAT uint8_t format; uint8_t address_descriptor_index[4]; uint8_t alloc_length[4]; #define SRDD12_MAX_LENGTH 0xffffffff uint8_t reserved; uint8_t control; }; +struct scsi_zbc_out +{ + uint8_t opcode; + uint8_t service_action; +#define ZBC_OUT_SA_CLOSE 0x01 +#define ZBC_OUT_SA_FINISH 0x02 +#define ZBC_OUT_SA_OPEN 0x03 +#define ZBC_OUT_SA_RWP 0x04 + uint8_t zone_id[8]; + uint8_t reserved[4]; + uint8_t zone_flags; +#define ZBC_OUT_ALL 0x01 + uint8_t control; +}; +struct scsi_zbc_in +{ + uint8_t opcode; + uint8_t service_action; +#define ZBC_IN_SA_REPORT_ZONES 0x00 + uint8_t zone_start_lba[8]; + uint8_t length[4]; + uint8_t zone_options; +#define ZBC_IN_PARTIAL 0x80 +#define ZBC_IN_REP_ALL_ZONES 0x00 +#define ZBC_IN_REP_EMPTY 0x01 +#define ZBC_IN_REP_IMP_OPEN 0x02 +#define ZBC_IN_REP_EXP_OPEN 0x03 +#define ZBC_IN_REP_CLOSED 0x04 +#define ZBC_IN_REP_FULL 0x05 +#define ZBC_IN_REP_READONLY 0x06 +#define ZBC_IN_REP_OFFLINE 0x07 +#define ZBC_IN_REP_RESET 0x10 +#define ZBC_IN_REP_NON_SEQ 0x11 +#define ZBC_IN_REP_NON_WP 0x3f +#define ZBC_IN_REP_MASK 0x3f + uint8_t control; +}; + +struct scsi_report_zones_desc { + uint8_t zone_type; +#define SRZ_TYPE_CONVENTIONAL 0x01 +#define SRZ_TYPE_SEQ_REQUIRED 0x02 +#define SRZ_TYPE_SEQ_PREFERRED 0x03 +#define SRZ_TYPE_MASK 0x0f + uint8_t zone_flags; +#define SRZ_ZONE_COND_SHIFT 4 +#define SRZ_ZONE_COND_MASK 0xf0 +#define SRZ_ZONE_COND_NWP 0x00 +#define SRZ_ZONE_COND_EMPTY 0x10 +#define SRZ_ZONE_COND_IMP_OPEN 0x20 +#define SRZ_ZONE_COND_EXP_OPEN 0x30 +#define SRZ_ZONE_COND_CLOSED 0x40 +#define SRZ_ZONE_COND_READONLY 0xd0 +#define SRZ_ZONE_COND_FULL 0xe0 +#define SRZ_ZONE_COND_OFFLINE 0xf0 +#define SRZ_ZONE_NON_SEQ 0x02 +#define SRZ_ZONE_RESET 0x01 + uint8_t reserved[6]; + uint8_t zone_length[8]; + uint8_t zone_start_lba[8]; + uint8_t write_pointer_lba[8]; + uint8_t reserved2[32]; +}; + +struct scsi_report_zones_hdr { + uint8_t length[4]; + uint8_t byte4; +#define SRZ_SAME_ALL_DIFFERENT 0x00 /* Lengths and types vary */ +#define SRZ_SAME_ALL_SAME 0x01 /* Lengths and types the same */ +#define SRZ_SAME_LAST_DIFFERENT 0x02 /* Types same, last length varies */ +#define SRZ_SAME_TYPES_DIFFERENT 0x03 /* Types vary, length the same */ +#define SRZ_SAME_MASK 0x0f + uint8_t reserved[3]; + uint8_t maximum_lba[8]; + uint8_t reserved2[48]; + struct scsi_report_zones_desc desc_list[]; +}; + /* * Opcodes */ #define REZERO_UNIT 0x01 #define FORMAT_UNIT 0x04 #define REASSIGN_BLOCKS 0x07 #define MODE_SELECT 0x15 #define MODE_SENSE 0x1a #define READ_FORMAT_CAPACITIES 0x23 #define WRITE_AND_VERIFY 0x2e #define VERIFY 0x2f #define READ_DEFECT_DATA_10 0x37 #define SANITIZE 0x48 +#define ZBC_OUT 0x94 +#define ZBC_IN 0x95 #define READ_DEFECT_DATA_12 0xb7 struct format_defect_list_header { u_int8_t reserved; u_int8_t byte2; #define FU_DLH_VS 0x01 #define FU_DLH_IMMED 0x02 #define FU_DLH_DSP 0x04 #define FU_DLH_IP 0x08 #define FU_DLH_STPF 0x10 #define FU_DLH_DCRT 0x20 #define FU_DLH_DPRY 0x40 #define FU_DLH_FOV 0x80 u_int8_t defect_list_length[2]; }; struct format_ipat_descriptor { u_int8_t byte1; #define FU_INIT_NO_HDR 0x00 #define FU_INIT_LBA_MSB 0x40 #define FU_INIT_LBA_EACH 0x80 #define FU_INIT_SI 0x20 u_int8_t pattern_type; #define FU_INIT_PAT_DEFAULT 0x00 #define FU_INIT_PAT_REPEAT 0x01 u_int8_t pat_length[2]; }; struct scsi_read_format_capacities { uint8_t opcode; /* READ_FORMAT_CAPACITIES */ uint8_t byte2; #define SRFC_LUN_MASK 0xE0 uint8_t reserved0[5]; uint8_t alloc_length[2]; uint8_t reserved1[3]; }; struct scsi_verify_10 { uint8_t opcode; /* VERIFY(10) */ uint8_t byte2; #define SVFY_LUN_MASK 0xE0 #define SVFY_RELADR 0x01 #define SVFY_BYTCHK 0x02 #define SVFY_DPO 0x10 uint8_t addr[4]; /* LBA to begin verification at */ uint8_t group; uint8_t length[2]; /* number of blocks to verify */ uint8_t control; }; struct scsi_verify_12 { uint8_t opcode; /* VERIFY(12) */ uint8_t byte2; uint8_t addr[4]; /* LBA to begin verification at */ uint8_t length[4]; /* number of blocks to verify */ uint8_t group; uint8_t control; }; struct scsi_verify_16 { uint8_t opcode; /* VERIFY(16) */ uint8_t byte2; uint8_t addr[8]; /* LBA to begin verification at */ uint8_t length[4]; /* number of blocks to verify */ uint8_t group; uint8_t control; }; struct scsi_compare_and_write { uint8_t opcode; /* COMPARE AND WRITE */ uint8_t byte2; uint8_t addr[8]; /* LBA to begin verification at */ uint8_t reserved[3]; uint8_t length; /* number of blocks */ uint8_t group; uint8_t control; }; struct scsi_write_and_verify { uint8_t opcode; /* WRITE_AND_VERIFY */ uint8_t byte2; #define SWVY_LUN_MASK 0xE0 #define SWVY_RELADR 0x01 #define SWVY_BYTECHK 0x02 #define SWVY_DPO 0x10 uint8_t addr[4]; /* LBA to begin verification at */ uint8_t reserved0[1]; uint8_t len[2]; /* number of blocks to write and verify */ uint8_t reserved1[3]; }; /* * Replies to READ_FORMAT_CAPACITIES look like this: * * struct format_capacity_list_header * struct format_capacity_descriptor[1..n] * * These are similar, but not totally identical to, the * defect list used to format a rigid disk. * * The appropriate csio_decode() format string looks like this: * "{} *i3 {Len} i1 {Blocks} i4 {} *b6 {Code} b2 {Blocklen} i3" * * If the capacity_list_length is greater than * sizeof(struct format_capacity_descriptor), then there are * additional format capacity descriptors available which * denote which format(s) the drive can handle. * * (Source: USB Mass Storage UFI Specification) */ struct format_capacity_list_header { uint8_t unused[3]; uint8_t capacity_list_length; }; struct format_capacity_descriptor { uint8_t nblocks[4]; /* total number of LBAs */ uint8_t byte4; /* only present in max/cur descriptor */ #define FCD_CODE_MASK 0x03 /* mask for code field above */ #define FCD_UNFORMATTED 0x01 /* unformatted media present, * maximum capacity returned */ #define FCD_FORMATTED 0x02 /* formatted media present, * current capacity returned */ #define FCD_NOMEDIA 0x03 /* no media present, * maximum device capacity returned */ uint8_t block_length[3]; /* length of an LBA in bytes */ }; struct scsi_reassign_blocks_data { u_int8_t reserved[2]; u_int8_t length[2]; struct { u_int8_t dlbaddr[4]; /* defect logical block address */ } defect_descriptor[1]; }; /* * This is the list header for the READ DEFECT DATA(10) command above. * It may be a bit wrong to append the 10 at the end of the data structure, * since it's only 4 bytes but it does tie it to the 10 byte command. */ struct scsi_read_defect_data_hdr_10 { u_int8_t reserved; #define SRDDH10_GLIST 0x08 #define SRDDH10_PLIST 0x10 #define SRDDH10_DLIST_FORMAT_MASK 0x07 #define SRDDH10_BLOCK_FORMAT 0x00 #define SRDDH10_BYTES_FROM_INDEX_FORMAT 0x04 #define SRDDH10_PHYSICAL_SECTOR_FORMAT 0x05 u_int8_t format; u_int8_t length[2]; #define SRDDH10_MAX_LENGTH SRDD10_MAX_LENGTH - \ sizeof(struct scsi_read_defect_data_hdr_10) }; struct scsi_defect_desc_block { u_int8_t address[4]; }; struct scsi_defect_desc_long_block { u_int8_t address[8]; }; struct scsi_defect_desc_bytes_from_index { u_int8_t cylinder[3]; u_int8_t head; #define SDD_EXT_BFI_MADS 0x80000000 #define SDD_EXT_BFI_FLAG_MASK 0xf0000000 #define SDD_EXT_BFI_ENTIRE_TRACK 0x0fffffff u_int8_t bytes_from_index[4]; }; struct scsi_defect_desc_phys_sector { u_int8_t cylinder[3]; u_int8_t head; #define SDD_EXT_PHYS_MADS 0x80000000 #define SDD_EXT_PHYS_FLAG_MASK 0xf0000000 #define SDD_EXT_PHYS_ENTIRE_TRACK 0x0fffffff u_int8_t sector[4]; }; struct scsi_read_defect_data_hdr_12 { u_int8_t reserved; #define SRDDH12_GLIST 0x08 #define SRDDH12_PLIST 0x10 #define SRDDH12_DLIST_FORMAT_MASK 0x07 #define SRDDH12_BLOCK_FORMAT 0x00 #define SRDDH12_BYTES_FROM_INDEX_FORMAT 0x04 #define SRDDH12_PHYSICAL_SECTOR_FORMAT 0x05 u_int8_t format; u_int8_t generation[2]; u_int8_t length[4]; #define SRDDH12_MAX_LENGTH SRDD12_MAX_LENGTH - \ sizeof(struct scsi_read_defect_data_hdr_12) }; union disk_pages /* this is the structure copied from osf */ { struct format_device_page { u_int8_t pg_code; /* page code (should be 3) */ #define SMS_FORMAT_DEVICE_PAGE 0x03 /* only 6 bits valid */ u_int8_t pg_length; /* page length (should be 0x16) */ #define SMS_FORMAT_DEVICE_PLEN 0x16 u_int8_t trk_z_1; /* tracks per zone (MSB) */ u_int8_t trk_z_0; /* tracks per zone (LSB) */ u_int8_t alt_sec_1; /* alternate sectors per zone (MSB) */ u_int8_t alt_sec_0; /* alternate sectors per zone (LSB) */ u_int8_t alt_trk_z_1; /* alternate tracks per zone (MSB) */ u_int8_t alt_trk_z_0; /* alternate tracks per zone (LSB) */ u_int8_t alt_trk_v_1; /* alternate tracks per volume (MSB) */ u_int8_t alt_trk_v_0; /* alternate tracks per volume (LSB) */ u_int8_t ph_sec_t_1; /* physical sectors per track (MSB) */ u_int8_t ph_sec_t_0; /* physical sectors per track (LSB) */ u_int8_t bytes_s_1; /* bytes per sector (MSB) */ u_int8_t bytes_s_0; /* bytes per sector (LSB) */ u_int8_t interleave_1; /* interleave (MSB) */ u_int8_t interleave_0; /* interleave (LSB) */ u_int8_t trk_skew_1; /* track skew factor (MSB) */ u_int8_t trk_skew_0; /* track skew factor (LSB) */ u_int8_t cyl_skew_1; /* cylinder skew (MSB) */ u_int8_t cyl_skew_0; /* cylinder skew (LSB) */ u_int8_t flags; /* various */ #define DISK_FMT_SURF 0x10 #define DISK_FMT_RMB 0x20 #define DISK_FMT_HSEC 0x40 #define DISK_FMT_SSEC 0x80 u_int8_t reserved21; u_int8_t reserved22; u_int8_t reserved23; } format_device; struct rigid_geometry_page { u_int8_t pg_code; /* page code (should be 4) */ #define SMS_RIGID_GEOMETRY_PAGE 0x04 u_int8_t pg_length; /* page length (should be 0x16) */ #define SMS_RIGID_GEOMETRY_PLEN 0x16 u_int8_t ncyl_2; /* number of cylinders (MSB) */ u_int8_t ncyl_1; /* number of cylinders */ u_int8_t ncyl_0; /* number of cylinders (LSB) */ u_int8_t nheads; /* number of heads */ u_int8_t st_cyl_wp_2; /* starting cyl., write precomp (MSB) */ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp */ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */ u_int8_t st_cyl_rwc_2; /* starting cyl., red. write cur (MSB)*/ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur */ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/ u_int8_t driv_step_1; /* drive step rate (MSB) */ u_int8_t driv_step_0; /* drive step rate (LSB) */ u_int8_t land_zone_2; /* landing zone cylinder (MSB) */ u_int8_t land_zone_1; /* landing zone cylinder */ u_int8_t land_zone_0; /* landing zone cylinder (LSB) */ u_int8_t rpl; /* rotational position locking (2 bits) */ u_int8_t rot_offset; /* rotational offset */ u_int8_t reserved19; u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */ u_int8_t reserved22; u_int8_t reserved23; } rigid_geometry; struct flexible_disk_page { u_int8_t pg_code; /* page code (should be 5) */ #define SMS_FLEXIBLE_GEOMETRY_PAGE 0x05 u_int8_t pg_length; /* page length (should be 0x1E) */ #define SMS_FLEXIBLE_GEOMETRY_PLEN 0x1E u_int8_t xfr_rate_1; /* transfer rate (MSB) */ u_int8_t xfr_rate_0; /* transfer rate (LSB) */ u_int8_t nheads; /* number of heads */ u_int8_t sec_per_track; /* Sectors per track */ u_int8_t bytes_s_1; /* bytes per sector (MSB) */ u_int8_t bytes_s_0; /* bytes per sector (LSB) */ u_int8_t ncyl_1; /* number of cylinders (MSB) */ u_int8_t ncyl_0; /* number of cylinders (LSB) */ u_int8_t st_cyl_wp_1; /* starting cyl., write precomp (MSB) */ u_int8_t st_cyl_wp_0; /* starting cyl., write precomp (LSB) */ u_int8_t st_cyl_rwc_1; /* starting cyl., red. write cur (MSB)*/ u_int8_t st_cyl_rwc_0; /* starting cyl., red. write cur (LSB)*/ u_int8_t driv_step_1; /* drive step rate (MSB) */ u_int8_t driv_step_0; /* drive step rate (LSB) */ u_int8_t driv_step_pw; /* drive step pulse width */ u_int8_t head_stl_del_1;/* Head settle delay (MSB) */ u_int8_t head_stl_del_0;/* Head settle delay (LSB) */ u_int8_t motor_on_del; /* Motor on delay */ u_int8_t motor_off_del; /* Motor off delay */ u_int8_t trdy_ssn_mo; /* XXX ??? */ u_int8_t spc; /* XXX ??? */ u_int8_t write_comp; /* Write compensation */ u_int8_t head_load_del; /* Head load delay */ u_int8_t head_uload_del;/* Head un-load delay */ u_int8_t pin32_pin2; u_int8_t pin4_pint1; u_int8_t medium_rot_rate_1; /* medium rotation rate (RPM) (MSB) */ u_int8_t medium_rot_rate_0; /* medium rotation rate (RPM) (LSB) */ u_int8_t reserved30; u_int8_t reserved31; } flexible_disk; }; /* * XXX KDM * Here for CTL compatibility, reconcile this. */ struct scsi_format_page { uint8_t page_code; uint8_t page_length; uint8_t tracks_per_zone[2]; uint8_t alt_sectors_per_zone[2]; uint8_t alt_tracks_per_zone[2]; uint8_t alt_tracks_per_lun[2]; uint8_t sectors_per_track[2]; uint8_t bytes_per_sector[2]; uint8_t interleave[2]; uint8_t track_skew[2]; uint8_t cylinder_skew[2]; uint8_t flags; #define SFP_SSEC 0x80 #define SFP_HSEC 0x40 #define SFP_RMB 0x20 #define SFP_SURF 0x10 uint8_t reserved[3]; }; /* * XXX KDM * Here for CTL compatibility, reconcile this. */ struct scsi_rigid_disk_page { uint8_t page_code; #define SMS_RIGID_DISK_PAGE 0x04 uint8_t page_length; uint8_t cylinders[3]; uint8_t heads; uint8_t start_write_precomp[3]; uint8_t start_reduced_current[3]; uint8_t step_rate[2]; uint8_t landing_zone_cylinder[3]; uint8_t rpl; #define SRDP_RPL_DISABLED 0x00 #define SRDP_RPL_SLAVE 0x01 #define SRDP_RPL_MASTER 0x02 #define SRDP_RPL_MASTER_CONTROL 0x03 uint8_t rotational_offset; uint8_t reserved1; uint8_t rotation_rate[2]; uint8_t reserved2[2]; }; struct scsi_da_rw_recovery_page { u_int8_t page_code; #define SMS_RW_ERROR_RECOVERY_PAGE 0x01 u_int8_t page_length; u_int8_t byte3; #define SMS_RWER_AWRE 0x80 #define SMS_RWER_ARRE 0x40 #define SMS_RWER_TB 0x20 #define SMS_RWER_RC 0x10 #define SMS_RWER_EER 0x08 #define SMS_RWER_PER 0x04 #define SMS_RWER_DTE 0x02 #define SMS_RWER_DCR 0x01 u_int8_t read_retry_count; u_int8_t correction_span; u_int8_t head_offset_count; u_int8_t data_strobe_offset_cnt; u_int8_t byte8; #define SMS_RWER_LBPERE 0x80 u_int8_t write_retry_count; u_int8_t reserved2; u_int8_t recovery_time_limit[2]; }; __BEGIN_DECLS /* * XXX These are only left out of the kernel build to silence warnings. If, * for some reason these functions are used in the kernel, the ifdefs should * be moved so they are included both in the kernel and userland. */ #ifndef _KERNEL void scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); void scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), uint8_t tag_action, uint8_t list_format, uint32_t addr_desc_index, uint8_t *data_ptr, uint32_t dxfer_len, int minimum_cmd_size, uint8_t sense_len, uint32_t timeout); void scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries, void (*cbfcnp)(struct cam_periph *, union ccb *), u_int8_t tag_action, u_int8_t byte2, u_int16_t control, u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout); #endif /* !_KERNEL */ + +void scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint8_t service_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len, + uint8_t sense_len, uint32_t timeout); + +void scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, uint8_t service_action, + uint64_t zone_start_lba, uint8_t zone_options, + uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len, + uint32_t timeout); + +int scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, int use_ncq, + uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, + uint32_t dxfer_len, uint8_t *cdb_storage, + size_t cdb_storage_len, uint8_t sense_len, + uint32_t timeout); + +int scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries, + void (*cbfcnp)(struct cam_periph *, union ccb *), + uint8_t tag_action, int use_ncq, + uint8_t zm_action, uint64_t zone_id, + uint8_t zone_flags, uint8_t *data_ptr, + uint32_t dxfer_len, uint8_t *cdb_storage, + size_t cdb_storage_len, uint8_t sense_len, + uint32_t timeout); + __END_DECLS #endif /* _SCSI_SCSI_DA_H */ Index: head/sys/dev/ahci/ahci.c =================================================================== --- head/sys/dev/ahci/ahci.c (revision 300206) +++ head/sys/dev/ahci/ahci.c (revision 300207) @@ -1,2736 +1,2735 @@ /*- * Copyright (c) 2009-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ahci.h" #include #include #include #include #include /* local prototypes */ static void ahci_intr(void *data); static void ahci_intr_one(void *data); static void ahci_intr_one_edge(void *data); static int ahci_ch_init(device_t dev); static int ahci_ch_deinit(device_t dev); static int ahci_ch_suspend(device_t dev); static int ahci_ch_resume(device_t dev); static void ahci_ch_pm(void *arg); static void ahci_ch_intr(void *arg); static void ahci_ch_intr_direct(void *arg); static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_execute_transaction(struct ahci_slot *slot); static void ahci_timeout(struct ahci_slot *slot); static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); static void ahci_dmainit(device_t dev); static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void ahci_dmafini(device_t dev); static void ahci_slotsalloc(device_t dev); static void ahci_slotsfree(device_t dev); static void ahci_reset(struct ahci_channel *ch); static void ahci_start(struct ahci_channel *ch, int fbs); static void ahci_stop(struct ahci_channel *ch); static void ahci_clo(struct ahci_channel *ch); static void ahci_start_fr(struct ahci_channel *ch); static void ahci_stop_fr(struct ahci_channel *ch); static int ahci_sata_connect(struct ahci_channel *ch); static int ahci_sata_phy_reset(struct ahci_channel *ch); static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); static void ahci_issue_recovery(struct ahci_channel *ch); static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); static void ahciaction(struct cam_sim *sim, union ccb *ccb); static void ahcipoll(struct cam_sim *sim); static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); #define recovery_type spriv_field0 #define RECOVERY_NONE 0 #define RECOVERY_READ_LOG 1 #define RECOVERY_REQUEST_SENSE 2 #define recovery_slot spriv_field1 int ahci_ctlr_setup(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Clear interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); /* Configure CCC */ if (ctlr->ccc) { ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); ATA_OUTL(ctlr->r_mem, AHCI_CCCC, (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | (4 << AHCI_CCCC_CC_SHIFT) | AHCI_CCCC_EN); ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; if (bootverbose) { device_printf(dev, "CCC with %dms/4cmd enabled on vector %d\n", ctlr->ccc, ctlr->cccv); } } /* Enable AHCI interrupts */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); return (0); } int ahci_ctlr_reset(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int timeout; /* Enable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); /* Reset AHCI controller */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); for (timeout = 1000; timeout > 0; timeout--) { DELAY(1000); if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) break; } if (timeout == 0) { device_printf(dev, "AHCI controller reset failure\n"); return (ENXIO); } /* Reenable AHCI mode */ ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { /* * Restore capability field. * This is write to a read-only register to restore its state. * On fully standard-compliant hardware this is not needed and * this operation shall not take place. See ahci_pci.c for * platforms using this quirk. */ ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); } return (0); } int ahci_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int error, i, u, speed, unit; u_int32_t version; device_t child; ctlr->dev = dev; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { ahci_free_mem(dev); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } /* Get the HW capabilities */ version = ATA_INL(ctlr->r_mem, AHCI_VS); ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); if (version >= 0x00010200) ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); if (ctlr->caps & AHCI_CAP_EMS) ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); if (ctlr->quirks & AHCI_Q_FORCE_PI) { /* * Enable ports. * The spec says that BIOS sets up bits corresponding to * available ports. On platforms where this information * is missing, the driver can define available ports on its own. */ int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; int nmask = (1 << nports) - 1; ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", nports, nmask); } ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ if ((ctlr->quirks & AHCI_Q_ALTSIG) && (ctlr->caps & AHCI_CAP_SPM) == 0) ctlr->quirks |= AHCI_Q_NOBSYRES; if (ctlr->quirks & AHCI_Q_1CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->ichannels &= 0x01; } if (ctlr->quirks & AHCI_Q_2CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 1; ctlr->ichannels &= 0x03; } if (ctlr->quirks & AHCI_Q_4CH) { ctlr->caps &= ~AHCI_CAP_NPMASK; ctlr->caps |= 3; ctlr->ichannels &= 0x0f; } ctlr->channels = MAX(flsl(ctlr->ichannels), (ctlr->caps & AHCI_CAP_NPMASK) + 1); if (ctlr->quirks & AHCI_Q_NOPMP) ctlr->caps &= ~AHCI_CAP_SPM; if (ctlr->quirks & AHCI_Q_NONCQ) ctlr->caps &= ~AHCI_CAP_SNCQ; if ((ctlr->caps & AHCI_CAP_CCCS) == 0) ctlr->ccc = 0; ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); /* Create controller-wide DMA tag. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &ctlr->dma_tag)) { ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (ENXIO); } ahci_ctlr_setup(dev); /* Setup interrupts. */ if ((error = ahci_setup_interrupt(dev)) != 0) { bus_dma_tag_destroy(ctlr->dma_tag); ahci_free_mem(dev); rman_fini(&ctlr->sc_iomem); return (error); } i = 0; for (u = ctlr->ichannels; u != 0; u >>= 1) i += (u & 1); ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); resource_int_value(device_get_name(dev), device_get_unit(dev), "direct", &ctlr->direct); /* Announce HW capabilities. */ speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; device_printf(dev, "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), ((version >> 4) & 0xf0) + (version & 0x0f), (ctlr->caps & AHCI_CAP_NPMASK) + 1, ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?"))), (ctlr->caps & AHCI_CAP_SPM) ? "supported" : "not supported", (ctlr->caps & AHCI_CAP_FBSS) ? " with FBS" : ""); if (ctlr->quirks != 0) { device_printf(dev, "quirks=0x%b\n", ctlr->quirks, AHCI_Q_BIT_STRING); } if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", ((speed == 1) ? "1.5":((speed == 2) ? "3": ((speed == 3) ? "6":"?")))); printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", (ctlr->caps & AHCI_CAP_NPMASK) + 1); } if (bootverbose && version >= 0x00010200) { device_printf(dev, "Caps2:%s%s%s%s%s%s\n", (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "ahcich", -1); if (child == NULL) { device_printf(dev, "failed to add channel device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)unit); if ((ctlr->ichannels & (1 << unit)) == 0) device_disable(child); } if (ctlr->caps & AHCI_CAP_EMS) { child = device_add_child(dev, "ahciem", -1); if (child == NULL) device_printf(dev, "failed to add enclosure device\n"); else device_set_ivars(child, (void *)(intptr_t)-1); } bus_generic_attach(dev); return (0); } int ahci_detach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Detach & delete all children */ device_delete_children(dev); /* Free interrupts. */ for (i = 0; i < ctlr->numirqs; i++) { if (ctlr->irqs[i].r_irq) { bus_teardown_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); } } bus_dma_tag_destroy(ctlr->dma_tag); /* Free memory. */ rman_fini(&ctlr->sc_iomem); ahci_free_mem(dev); return (0); } void ahci_free_mem(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); /* Release memory resources */ if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); if (ctlr->r_msix_table) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_tab_rid, ctlr->r_msix_table); if (ctlr->r_msix_pba) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_msix_pba_rid, ctlr->r_msix_pba); ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; } int ahci_setup_interrupt(device_t dev) { struct ahci_controller *ctlr = device_get_softc(dev); int i; /* Check for single MSI vector fallback. */ if (ctlr->numirqs > 1 && (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { device_printf(dev, "Falling back to one MSI\n"); ctlr->numirqs = 1; } /* Ensure we don't overrun irqs. */ if (ctlr->numirqs > AHCI_MAX_IRQS) { device_printf(dev, "Too many irqs %d > %d (clamping)\n", ctlr->numirqs, AHCI_MAX_IRQS); ctlr->numirqs = AHCI_MAX_IRQS; } /* Allocate all IRQs. */ for (i = 0; i < ctlr->numirqs; i++) { ctlr->irqs[i].ctlr = ctlr; ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; else if (ctlr->numirqs == 1 || i >= ctlr->channels || (ctlr->ccc && i == ctlr->cccv)) ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; else if (i == ctlr->numirqs - 1) ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; else ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : ahci_intr_one), &ctlr->irqs[i], &ctlr->irqs[i].handle))) { /* SOS XXX release r_irq */ device_printf(dev, "unable to setup interrupt\n"); return (ENXIO); } if (ctlr->numirqs > 1) { bus_describe_intr(dev, ctlr->irqs[i].r_irq, ctlr->irqs[i].handle, ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? "ch%d" : "%d", i); } } return (0); } /* * Common case interrupt handler. */ static void ahci_intr(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; u_int32_t is, ise = 0; void *arg; int unit; if (irq->mode == AHCI_IRQ_MODE_ALL) { unit = 0; if (ctlr->ccc) is = ctlr->ichannels; else is = ATA_INL(ctlr->r_mem, AHCI_IS); } else { /* AHCI_IRQ_MODE_AFTER */ unit = irq->r_irq_rid - 1; is = ATA_INL(ctlr->r_mem, AHCI_IS); } /* CCC interrupt is edge triggered. */ if (ctlr->ccc) ise = 1 << ctlr->cccv; /* Some controllers have edge triggered IS. */ if (ctlr->quirks & AHCI_Q_EDGEIS) ise |= is; if (ise != 0) ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); for (; unit < ctlr->channels; unit++) { if ((is & (1 << unit)) != 0 && (arg = ctlr->interrupt[unit].argument)) { ctlr->interrupt[unit].function(arg); } } /* AHCI declares level triggered IS. */ if (!(ctlr->quirks & AHCI_Q_EDGEIS)) ATA_OUTL(ctlr->r_mem, AHCI_IS, is); ATA_RBL(ctlr->r_mem, AHCI_IS); } /* * Simplified interrupt handler for multivector MSI mode. */ static void ahci_intr_one(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); /* AHCI declares level triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); ATA_RBL(ctlr->r_mem, AHCI_IS); } static void ahci_intr_one_edge(void *data) { struct ahci_controller_irq *irq = data; struct ahci_controller *ctlr = irq->ctlr; void *arg; int unit; unit = irq->r_irq_rid - 1; /* Some controllers have edge triggered IS. */ ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); if ((arg = ctlr->interrupt[unit].argument)) ctlr->interrupt[unit].function(arg); ATA_RBL(ctlr->r_mem, AHCI_IS); } struct resource * ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ahci_controller *ctlr = device_get_softc(dev); struct resource *res; rman_res_t st; int offset, size, unit; unit = (intptr_t)device_get_ivars(child); res = NULL; switch (type) { case SYS_RES_MEMORY: if (unit >= 0) { offset = AHCI_OFFSET + (unit << 7); size = 128; } else if (*rid == 0) { offset = AHCI_EM_CTL; size = 4; } else { offset = (ctlr->emloc & 0xffff0000) >> 14; size = (ctlr->emloc & 0x0000ffff) << 2; if (*rid != 1) { if (*rid == 2 && (ctlr->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) offset += size; else break; } } st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + size - 1, size, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, 128, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irqs[0].r_irq; break; } return (res); } int ahci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return (ENOENT); return (0); } return (EINVAL); } int ahci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("ahci.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } int ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct ahci_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } int ahci_print_child(device_t dev, device_t child) { int retval, channel; retval = bus_print_child_header(dev, child); channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) retval += printf(" at channel %d", channel); retval += bus_print_child_footer(dev, child); return (retval); } int ahci_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { int channel; channel = (int)(intptr_t)device_get_ivars(child); if (channel >= 0) snprintf(buf, buflen, "channel=%d", channel); return (0); } bus_dma_tag_t ahci_get_dma_tag(device_t dev, device_t child) { struct ahci_controller *ctlr = device_get_softc(dev); return (ctlr->dma_tag); } static int ahci_ch_probe(device_t dev) { device_set_desc_copy(dev, "AHCI channel"); return (BUS_PROBE_DEFAULT); } static int ahci_ch_attach(device_t dev) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ahci_channel *ch = device_get_softc(dev); struct cam_devq *devq; int rid, error, i, sata_rev = 0; u_int32_t version; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); ch->caps = ctlr->caps; ch->caps2 = ctlr->caps2; ch->start = ctlr->ch_start; ch->quirks = ctlr->quirks; ch->vendorid = ctlr->vendorid; ch->deviceid = ctlr->deviceid; ch->subvendorid = ctlr->subvendorid; ch->subdeviceid = ctlr->subdeviceid; ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); ch->pm_level = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); STAILQ_INIT(&ch->doneq); if (ch->pm_level > 3) callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); /* JMicron external ports (0) sometimes limited */ if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) sata_rev = 1; if (ch->quirks & AHCI_Q_SATA2) sata_rev = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &sata_rev); for (i = 0; i < 16; i++) { ch->user[i].revision = sata_rev; ch->user[i].mode = 0; ch->user[i].bytecount = 8192; ch->user[i].tags = ch->numslots; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->pm_level) { ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | CTS_SATA_CAPS_H_APST | CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; } ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | CTS_SATA_CAPS_H_AN; } rid = 0; if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE))) return (ENXIO); ahci_dmainit(dev); ahci_slotsalloc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); rid = ATA_IRQ_RID; if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "Unable to map interrupt\n"); error = ENXIO; goto err0; } if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, ch, &ch->ih))) { device_printf(dev, "Unable to setup interrupt\n"); error = ENXIO; goto err1; } ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); version = ATA_INL(ctlr->r_mem, AHCI_VS); if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) ch->chcaps |= AHCI_P_CMD_FBSCP; if (ch->caps2 & AHCI_CAP2_SDS) ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); if (bootverbose) { device_printf(dev, "Caps:%s%s%s%s%s%s\n", (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); } /* Create the device queue for our SIM. */ devq = cam_simq_alloc(ch->numslots); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, device_get_unit(dev), (struct mtx *)&ch->mtx, min(2, ch->numslots), (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, devq); if (ch->sim == NULL) { cam_simq_free(devq); device_printf(dev, "unable to allocate sim\n"); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } if (ch->pm_level > 3) { callout_reset(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8, ahci_ch_pm, ch); } mtx_unlock(&ch->mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); err1: bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); err0: bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_unlock(&ch->mtx); mtx_destroy(&ch->mtx); return (error); } static int ahci_ch_detach(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_async(AC_LOST_DEVICE, ch->path, NULL); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; xpt_release_simq(ch->sim, TRUE); } xpt_free_path(ch->path); xpt_bus_deregister(cam_sim_path(ch->sim)); cam_sim_free(ch->sim, /*free_devq*/TRUE); mtx_unlock(&ch->mtx); if (ch->pm_level > 3) callout_drain(&ch->pm_timer); callout_drain(&ch->reset_timer); bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ahci_ch_deinit(dev); ahci_slotsfree(dev); ahci_dmafini(dev); bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); mtx_destroy(&ch->mtx); return (0); } static int ahci_ch_init(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); uint64_t work; /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Setup work areas */ work = ch->dma.work_bus + AHCI_CL_OFFSET; ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); work = ch->dma.rfis_bus; ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); /* Activate the channel and power/spin up device */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); ahci_start_fr(ch); ahci_start(ch, 1); return (0); } static int ahci_ch_deinit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); /* Disable port interrupts. */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset command register. */ ahci_stop(ch); ahci_stop_fr(ch); ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); /* Allow everything, including partial and slumber modes. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); /* Request slumber mode transition and give some time to get there. */ ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); DELAY(100); /* Disable PHY. */ ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } static int ahci_ch_suspend(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); xpt_freeze_simq(ch->sim, 1); /* Forget about reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } while (ch->oslots) msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); ahci_ch_deinit(dev); mtx_unlock(&ch->mtx); return (0); } static int ahci_ch_resume(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); mtx_lock(&ch->mtx); ahci_ch_init(dev); ahci_reset(ch); xpt_release_simq(ch->sim, TRUE); mtx_unlock(&ch->mtx); return (0); } devclass_t ahcich_devclass; static device_method_t ahcich_methods[] = { DEVMETHOD(device_probe, ahci_ch_probe), DEVMETHOD(device_attach, ahci_ch_attach), DEVMETHOD(device_detach, ahci_ch_detach), DEVMETHOD(device_suspend, ahci_ch_suspend), DEVMETHOD(device_resume, ahci_ch_resume), DEVMETHOD_END }; static driver_t ahcich_driver = { "ahcich", ahcich_methods, sizeof(struct ahci_channel) }; DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); struct ahci_dc_cb_args { bus_addr_t maddr; int error; }; static void ahci_dmainit(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); struct ahci_dc_cb_args dcba; size_t rfsize; /* Command area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 0, NULL, NULL, &ch->dma.work_tag)) goto error; if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, BUS_DMA_ZERO, &ch->dma.work_map)) goto error; if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); goto error; } ch->dma.work_bus = dcba.maddr; /* FIS receive area. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) rfsize = 4096; else rfsize = 256; if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rfsize, 1, rfsize, 0, NULL, NULL, &ch->dma.rfis_tag)) goto error; if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, &ch->dma.rfis_map)) goto error; if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); goto error; } ch->dma.rfis_bus = dcba.maddr; /* Data area. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, AHCI_SG_ENTRIES, AHCI_PRD_MAX, 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { goto error; } return; error: device_printf(dev, "WARNING - DMA initialization failed\n"); ahci_dmafini(dev); } static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; if (!(dcba->error = error)) dcba->maddr = segs[0].ds_addr; } static void ahci_dmafini(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); if (ch->dma.data_tag) { bus_dma_tag_destroy(ch->dma.data_tag); ch->dma.data_tag = NULL; } if (ch->dma.rfis_bus) { bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); ch->dma.rfis_bus = 0; ch->dma.rfis = NULL; } if (ch->dma.work_bus) { bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); ch->dma.work_bus = 0; ch->dma.work = NULL; } if (ch->dma.work_tag) { bus_dma_tag_destroy(ch->dma.work_tag); ch->dma.work_tag = NULL; } } static void ahci_slotsalloc(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Alloc and setup command/dma slots */ bzero(ch->slot, sizeof(ch->slot)); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; slot->ch = ch; slot->slot = i; slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; callout_init_mtx(&slot->timeout, &ch->mtx, 0); if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) device_printf(ch->dev, "FAILURE - create data_map\n"); } } static void ahci_slotsfree(device_t dev) { struct ahci_channel *ch = device_get_softc(dev); int i; /* Free all dma slots */ for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; callout_drain(&slot->timeout); if (slot->dma.data_map) { bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); slot->dma.data_map = NULL; } } } static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) { if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); union ccb *ccb; if (bootverbose) { if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) device_printf(ch->dev, "CONNECT requested\n"); else device_printf(ch->dev, "DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return (0); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return (0); } xpt_rescan(ccb); return (1); } return (0); } static void ahci_cpd_check_events(struct ahci_channel *ch) { u_int32_t status; union ccb *ccb; device_t dev; if (ch->pm_level == 0) return; status = ATA_INL(ch->r_mem, AHCI_P_CMD); if ((status & AHCI_P_CMD_CPD) == 0) return; if (bootverbose) { dev = ch->dev; if (status & AHCI_P_CMD_CPS) { device_printf(dev, "COLD CONNECT requested\n"); } else device_printf(dev, "COLD DISCONNECT requested\n"); } ahci_reset(ch); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) return; if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } static void ahci_notify_events(struct ahci_channel *ch, u_int32_t status) { struct cam_path *dpath; int i; if (ch->caps & AHCI_CAP_SSNTF) ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); if (bootverbose) device_printf(ch->dev, "SNTF 0x%04x\n", status); for (i = 0; i < 16; i++) { if ((status & (1 << i)) == 0) continue; if (xpt_create_path(&dpath, NULL, xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { xpt_async(AC_SCSI_AEN, dpath, NULL); xpt_free_path(dpath); } } } static void ahci_done(struct ahci_channel *ch, union ccb *ccb) { mtx_assert(&ch->mtx, MA_OWNED); if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || ch->batch == 0) { xpt_done(ccb); return; } STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); } static void ahci_ch_intr(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t istatus; /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus == 0) return; mtx_lock(&ch->mtx); ahci_ch_intr_main(ch, istatus); mtx_unlock(&ch->mtx); } static void ahci_ch_intr_direct(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; struct ccb_hdr *ccb_h; uint32_t istatus; STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); /* Read interrupt statuses. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus == 0) return; mtx_lock(&ch->mtx); ch->batch = 1; ahci_ch_intr_main(ch, istatus); ch->batch = 0; /* * Prevent the possibility of issues caused by processing the queue * while unlocked below by moving the contents to a local queue. */ STAILQ_CONCAT(&tmp_doneq, &ch->doneq); mtx_unlock(&ch->mtx); while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); xpt_done_direct((union ccb *)ccb_h); } } static void ahci_ch_pm(void *arg) { struct ahci_channel *ch = (struct ahci_channel *)arg; uint32_t work; if (ch->numrslots != 0) return; work = ATA_INL(ch->r_mem, AHCI_P_CMD); if (ch->pm_level == 4) work |= AHCI_P_CMD_PARTIAL; else work |= AHCI_P_CMD_SLUMBER; ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); } static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) { uint32_t cstatus, serr = 0, sntf = 0, ok, err; enum ahci_err_type et; int i, ccs, port, reset = 0; /* Clear interrupt statuses. */ ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); /* Read command statuses. */ if (ch->numtslots != 0) cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); else cstatus = 0; if (ch->numrslots != ch->numtslots) cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); /* Read SNTF in one of possible ways. */ if ((istatus & AHCI_P_IX_SDB) && (ch->pm_present || ch->curr[0].atapi != 0)) { if (ch->caps & AHCI_CAP_SSNTF) sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); else if (ch->fbs_enabled) { u_int8_t *fis = ch->dma.rfis + 0x58; for (i = 0; i < 16; i++) { if (fis[1] & 0x80) { fis[1] &= 0x7f; sntf |= 1 << i; } fis += 256; } } else { u_int8_t *fis = ch->dma.rfis + 0x58; if (fis[1] & 0x80) sntf = (1 << (fis[1] & 0x0f)); } } /* Process PHY events */ if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { serr = ATA_INL(ch->r_mem, AHCI_P_SERR); if (serr) { ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); reset = ahci_phy_check_events(ch, serr); } } /* Process cold presence detection events */ if ((istatus & AHCI_P_IX_CPD) && !reset) ahci_cpd_check_events(ch); /* Process command errors */ if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); port = -1; if (ch->fbs_enabled) { uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); if (fbs & AHCI_P_FBS_SDE) { port = (fbs & AHCI_P_FBS_DWE) >> AHCI_P_FBS_DWE_SHIFT; } else { for (i = 0; i < 16; i++) { if (ch->numrslotspd[i] == 0) continue; if (port == -1) port = i; else if (port != i) { port = -2; break; } } } } err = ch->rslots & cstatus; } else { ccs = 0; err = 0; port = -1; } /* Complete all successful commands. */ ok = ch->rslots & ~cstatus; for (i = 0; i < ch->numslots; i++) { if ((ok >> i) & 1) ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); } /* On error, complete the rest of commands with error statuses. */ if (err) { if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } for (i = 0; i < ch->numslots; i++) { /* XXX: reqests in loading state. */ if (((err >> i) & 1) == 0) continue; if (port >= 0 && ch->slot[i].ccb->ccb_h.target_id != port) continue; if (istatus & AHCI_P_IX_TFE) { if (port != -2) { /* Task File Error */ if (ch->numtslotspd[ ch->slot[i].ccb->ccb_h.target_id] == 0) { /* Untagged operation. */ if (i == ccs) et = AHCI_ERR_TFE; else et = AHCI_ERR_INNOCENT; } else { /* Tagged operation. */ et = AHCI_ERR_NCQ; } } else { et = AHCI_ERR_TFE; ch->fatalerr = 1; } } else if (istatus & AHCI_P_IX_IF) { if (ch->numtslots == 0 && i != ccs && port != -2) et = AHCI_ERR_INNOCENT; else et = AHCI_ERR_SATA; } else et = AHCI_ERR_INVALID; ahci_end_transaction(&ch->slot[i], et); } /* * We can't reinit port if there are some other * commands active, use resume to complete them. */ if (ch->rslots != 0 && !ch->recoverycmd) ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); } /* Process NOTIFY events */ if (sntf) ahci_notify_events(ch, sntf); } /* Must be called with channel locked. */ static int ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) { int t = ccb->ccb_h.target_id; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { /* Tagged command while we have no supported tag free. */ if (((~ch->oslots) & (0xffffffff >> (32 - ch->curr[t].tags))) == 0) return (1); /* If we have FBS */ if (ch->fbs_enabled) { /* Tagged command while untagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) return (1); } else { /* Tagged command while untagged are active. */ if (ch->numrslots != 0 && ch->numtslots == 0) return (1); /* Tagged command while tagged to other target is active. */ if (ch->numtslots != 0 && ch->taggedtarget != ccb->ccb_h.target_id) return (1); } } else { /* If we have FBS */ if (ch->fbs_enabled) { /* Untagged command while tagged are active. */ if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) return (1); } else { /* Untagged command while tagged are active. */ if (ch->numrslots != 0 && ch->numtslots != 0) return (1); } } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { /* Atomic command while anything active. */ if (ch->numrslots != 0) return (1); } /* We have some atomic command running. */ if (ch->aslots != 0) return (1); return (0); } /* Must be called with channel locked. */ static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) { struct ahci_slot *slot; int tag, tags; /* Choose empty slot. */ tags = ch->numslots; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) tags = ch->curr[ccb->ccb_h.target_id].tags; if (ch->lastslot + 1 < tags) tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); else tag = 0; if (tag == 0 || tag + ch->lastslot >= tags) tag = ffs(~ch->oslots) - 1; else tag += ch->lastslot; ch->lastslot = tag; /* Occupy chosen slot. */ slot = &ch->slot[tag]; slot->ccb = ccb; /* Stop PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3) callout_stop(&ch->pm_timer); /* Update channel stats. */ ch->oslots |= (1 << tag); ch->numrslots++; ch->numrslotspd[ccb->ccb_h.target_id]++; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots++; ch->numtslotspd[ccb->ccb_h.target_id]++; ch->taggedtarget = ccb->ccb_h.target_id; } if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) ch->aslots |= (1 << tag); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { slot->state = AHCI_SLOT_LOADING; bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, ahci_dmasetprd, slot, 0); } else { slot->dma.nsegs = 0; ahci_execute_transaction(slot); } } /* Locked by busdma engine. */ static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ahci_slot *slot = arg; struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_dma_prd *prd; int i; if (error) { device_printf(ch->dev, "DMA load error\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Fill S/G table */ prd = &ctp->prd_tab[0]; for (i = 0; i < nsegs; i++) { prd[i].dba = htole64(segs[i].ds_addr); prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); } slot->dma.nsegs = nsegs; bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); ahci_execute_transaction(slot); } /* Must be called with channel locked. */ static void ahci_execute_transaction(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; struct ahci_cmd_tab *ctp; struct ahci_cmd_list *clp; union ccb *ccb = slot->ccb; int port = ccb->ccb_h.target_id & 0x0f; int fis_size, i, softreset; uint8_t *fis = ch->dma.rfis + 0x40; uint8_t val; /* Get a piece of the workspace for this request */ ctp = (struct ahci_cmd_tab *) (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); /* Setup the FIS for this request */ if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { device_printf(ch->dev, "Setting up SATA FIS failed\n"); ahci_end_transaction(slot, AHCI_ERR_INVALID); return; } /* Setup the command list entry */ clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); clp->cmd_flags = htole16( (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | (ccb->ccb_h.func_code == XPT_SCSI_IO ? (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | (fis_size / sizeof(u_int32_t)) | (port << 12)); clp->prd_length = htole16(slot->dma.nsegs); /* Special handling for Soft Reset command. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { if (ccb->ataio.cmd.control & ATA_A_RESET) { softreset = 1; /* Kick controller into sane state */ ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 0); clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; } else { softreset = 2; /* Prepare FIS receive area for check. */ for (i = 0; i < 20; i++) fis[i] = 0xff; } } else softreset = 0; clp->bytecount = 0; clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); /* Set ACTIVE bit for NCQ commands. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); } /* If FBS is enabled, set PMP port. */ if (ch->fbs_enabled) { ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | (port << AHCI_P_FBS_DEV_SHIFT)); } /* Issue command to the controller. */ slot->state = AHCI_SLOT_RUNNING; ch->rslots |= (1 << slot->slot); ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); /* Device reset commands doesn't interrupt. Poll them. */ if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { int count, timeout = ccb->ccb_h.timeout * 100; enum ahci_err_type et = AHCI_ERR_NONE; for (count = 0; count < timeout; count++) { DELAY(10); if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) break; if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && softreset != 1) { #if 0 device_printf(ch->dev, "Poll error on slot %d, TFD: %04x\n", slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); #endif et = AHCI_ERR_TFE; break; } /* Workaround for ATI SB600/SB700 chipsets. */ if (ccb->ccb_h.target_id == 15 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) && (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { et = AHCI_ERR_TIMEOUT; break; } } /* * Marvell HBAs with non-RAID firmware do not wait for * readiness after soft reset, so we have to wait here. * Marvell RAIDs do not have this problem, but instead * sometimes forget to update FIS receive area, breaking * this wait. */ if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && softreset == 2 && et == AHCI_ERR_NONE) { for ( ; count < timeout; count++) { bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); val = fis[2]; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_PREREAD); if ((val & ATA_S_BUSY) == 0) break; DELAY(10); } } if (timeout && (count >= timeout)) { device_printf(ch->dev, "Poll timeout on slot %d port %d\n", slot->slot, port); device_printf(ch->dev, "is %08x cs %08x ss %08x " "rs %08x tfd %02x serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); et = AHCI_ERR_TIMEOUT; } /* Kick controller into sane state and enable FBS. */ if (softreset == 2) ch->eslots |= (1 << slot->slot); ahci_end_transaction(slot, et); return; } /* Start command execution timeout */ callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); return; } /* Must be called with channel locked. */ static void ahci_process_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); } } /* Must be called with channel locked. */ static void ahci_rearm_timeout(struct ahci_channel *ch) { int i; mtx_assert(&ch->mtx, MA_OWNED); for (i = 0; i < ch->numslots; i++) { struct ahci_slot *slot = &ch->slot[i]; /* Do we have a running request on slot? */ if (slot->state < AHCI_SLOT_RUNNING) continue; if ((ch->toslots & (1 << i)) == 0) continue; callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); } } /* Locked by callout mechanism. */ static void ahci_timeout(struct ahci_slot *slot) { struct ahci_channel *ch = slot->ch; device_t dev = ch->dev; uint32_t sstatus; int ccs; int i; /* Check for stale timeout. */ if (slot->state < AHCI_SLOT_RUNNING) return; /* Check if slot was not being executed last time we checked. */ if (slot->state < AHCI_SLOT_EXECUTING) { /* Check if slot started executing. */ sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || ch->fbs_enabled || ch->wrongccs) slot->state = AHCI_SLOT_EXECUTING; else if ((ch->rslots & (1 << ccs)) == 0) { ch->wrongccs = 1; slot->state = AHCI_SLOT_EXECUTING; } callout_reset_sbt(&slot->timeout, SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, (timeout_t*)ahci_timeout, slot, 0); return; } device_printf(dev, "Timeout on slot %d port %d\n", slot->slot, slot->ccb->ccb_h.target_id & 0x0f); device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " "serr %08x cmd %08x\n", ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), ATA_INL(ch->r_mem, AHCI_P_CMD)); /* Handle frozen command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } if (!ch->fbs_enabled && !ch->wrongccs) { /* Without FBS we know real timeout source. */ ch->fatalerr = 1; /* Handle command with timeout. */ ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); /* Handle the rest of commands. */ for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } } else { /* With FBS we wait for other commands timeout and pray. */ if (ch->toslots == 0) xpt_freeze_simq(ch->sim, 1); ch->toslots |= (1 << slot->slot); if ((ch->rslots & ~ch->toslots) == 0) ahci_process_timeout(ch); else device_printf(dev, " ... waiting for slots %08x\n", ch->rslots & ~ch->toslots); } } /* Must be called with channel locked. */ static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) { struct ahci_channel *ch = slot->ch; union ccb *ccb = slot->ccb; struct ahci_cmd_list *clp; int lastto; uint32_t sig; bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); clp = (struct ahci_cmd_list *) (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); /* Read result registers to the result struct * May be incorrect if several commands finished same time, * so read only when sure or have to. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { struct ata_res *res = &ccb->ataio.res; if ((et == AHCI_ERR_TFE) || (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { u_int8_t *fis = ch->dma.rfis + 0x40; bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); if (ch->fbs_enabled) { fis += ccb->ccb_h.target_id * 256; res->status = fis[2]; res->error = fis[3]; } else { uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); res->status = tfd; res->error = tfd >> 8; } res->lba_low = fis[4]; res->lba_mid = fis[5]; res->lba_high = fis[6]; res->device = fis[7]; res->lba_low_exp = fis[8]; res->lba_mid_exp = fis[9]; res->lba_high_exp = fis[10]; res->sector_count = fis[12]; res->sector_count_exp = fis[13]; /* * Some weird controllers do not return signature in * FIS receive area. Read it from PxSIG register. */ if ((ch->quirks & AHCI_Q_ALTSIG) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { sig = ATA_INL(ch->r_mem, AHCI_P_SIG); res->lba_high = sig >> 24; res->lba_mid = sig >> 16; res->lba_low = sig >> 8; res->sector_count = sig; } } else bzero(res, sizeof(*res)); if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->ataio.resid = ccb->ataio.dxfer_len - le32toh(clp->bytecount); } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && (ch->quirks & AHCI_Q_NOCOUNT) == 0) { ccb->csio.resid = ccb->csio.dxfer_len - le32toh(clp->bytecount); } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, (ccb->ccb_h.flags & CAM_DIR_IN) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); } if (et != AHCI_ERR_NONE) ch->eslots |= (1 << slot->slot); /* In case of error, freeze device for proper recovery. */ if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(ccb->ccb_h.path, 1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } /* Set proper result status. */ ccb->ccb_h.status &= ~CAM_STATUS_MASK; switch (et) { case AHCI_ERR_NONE: ccb->ccb_h.status |= CAM_REQ_CMP; if (ccb->ccb_h.func_code == XPT_SCSI_IO) ccb->csio.scsi_status = SCSI_STATUS_OK; break; case AHCI_ERR_INVALID: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_INVALID; break; case AHCI_ERR_INNOCENT: ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; case AHCI_ERR_TFE: case AHCI_ERR_NCQ: if (ccb->ccb_h.func_code == XPT_SCSI_IO) { ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; } else { ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; } break; case AHCI_ERR_SATA: ch->fatalerr = 1; if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_UNCOR_PARITY; break; case AHCI_ERR_TIMEOUT: if (!ch->recoverycmd) { xpt_freeze_simq(ch->sim, 1); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } ccb->ccb_h.status |= CAM_CMD_TIMEOUT; break; default: ch->fatalerr = 1; ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } /* Free slot. */ ch->oslots &= ~(1 << slot->slot); ch->rslots &= ~(1 << slot->slot); ch->aslots &= ~(1 << slot->slot); slot->state = AHCI_SLOT_EMPTY; slot->ccb = NULL; /* Update channel stats. */ ch->numrslots--; ch->numrslotspd[ccb->ccb_h.target_id]--; if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { ch->numtslots--; ch->numtslotspd[ccb->ccb_h.target_id]--; } /* Cancel timeout state if request completed normally. */ if (et != AHCI_ERR_TIMEOUT) { lastto = (ch->toslots == (1 << slot->slot)); ch->toslots &= ~(1 << slot->slot); if (lastto) xpt_release_simq(ch->sim, TRUE); } /* If it was first request of reset sequence and there is no error, * proceed to second request. */ if ((ccb->ccb_h.func_code == XPT_ATA_IO) && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET) && et == AHCI_ERR_NONE) { ccb->ataio.cmd.control &= ~ATA_A_RESET; ahci_begin_transaction(ch, ccb); return; } /* If it was our READ LOG command - process it. */ if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { ahci_process_read_log(ch, ccb); /* If it was our REQUEST SENSE command - process it. */ } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { ahci_process_request_sense(ch, ccb); /* If it was NCQ or ATAPI command error, put result on hold. */ } else if (et == AHCI_ERR_NCQ || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { ch->hold[slot->slot] = ccb; ch->numhslots++; } else ahci_done(ch, ccb); /* If we have no other active commands, ... */ if (ch->rslots == 0) { /* if there was fatal error - reset port. */ if (ch->toslots != 0 || ch->fatalerr) { ahci_reset(ch); } else { /* if we have slots in error, we can reinit port. */ if (ch->eslots != 0) { ahci_stop(ch); ahci_clo(ch); ahci_start(ch, 1); } /* if there commands on hold, we can do READ LOG. */ if (!ch->recoverycmd && ch->numhslots) ahci_issue_recovery(ch); } /* If all the rest of commands are in timeout - give them chance. */ } else if ((ch->rslots & ~ch->toslots) == 0 && et != AHCI_ERR_TIMEOUT) ahci_rearm_timeout(ch); /* Unfreeze frozen command. */ if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { union ccb *fccb = ch->frozen; ch->frozen = NULL; ahci_begin_transaction(ch, fccb); xpt_release_simq(ch->sim, TRUE); } /* Start PM timer. */ if (ch->numrslots == 0 && ch->pm_level > 3 && (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { callout_schedule(&ch->pm_timer, (ch->pm_level == 4) ? hz / 1000 : hz / 8); } } static void ahci_issue_recovery(struct ahci_channel *ch) { union ccb *ccb; struct ccb_ataio *ataio; struct ccb_scsiio *csio; int i; /* Find some held command. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i]) break; } ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { device_printf(ch->dev, "Unable to allocate recovery command\n"); completeall: /* We can't do anything -- complete held commands. */ for (i = 0; i < ch->numslots; i++) { if (ch->hold[i] == NULL) continue; ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } ahci_reset(ch); return; } ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* READ LOG */ ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; ccb->ccb_h.func_code = XPT_ATA_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ ataio = &ccb->ataio; ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); if (ataio->data_ptr == NULL) { xpt_free_ccb(ccb); device_printf(ch->dev, "Unable to allocate memory for READ LOG command\n"); goto completeall; } ataio->dxfer_len = 512; bzero(&ataio->cmd, sizeof(ataio->cmd)); ataio->cmd.flags = CAM_ATAIO_48BIT; ataio->cmd.command = 0x2F; /* READ LOG EXT */ ataio->cmd.sector_count = 1; ataio->cmd.sector_count_exp = 0; ataio->cmd.lba_low = 0x10; ataio->cmd.lba_mid = 0; ataio->cmd.lba_mid_exp = 0; } else { /* REQUEST SENSE */ ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; ccb->ccb_h.recovery_slot = i; ccb->ccb_h.func_code = XPT_SCSI_IO; ccb->ccb_h.flags = CAM_DIR_IN; ccb->ccb_h.status = 0; ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ csio = &ccb->csio; csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; csio->dxfer_len = ch->hold[i]->csio.sense_len; csio->cdb_len = 6; bzero(&csio->cdb_io, sizeof(csio->cdb_io)); csio->cdb_io.cdb_bytes[0] = 0x03; csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; } /* Freeze SIM while doing recovery. */ ch->recoverycmd = 1; xpt_freeze_simq(ch->sim, 1); ahci_begin_transaction(ch, ccb); } static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) { uint8_t *data; struct ata_res *res; int i; ch->recoverycmd = 0; data = ccb->ataio.data_ptr; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (data[0] & 0x80) == 0) { for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; if ((data[0] & 0x1F) == i) { res = &ch->hold[i]->ataio.res; res->status = data[2]; res->error = data[3]; res->lba_low = data[4]; res->lba_mid = data[5]; res->lba_high = data[6]; res->device = data[7]; res->lba_low_exp = data[8]; res->lba_mid_exp = data[9]; res->lba_high_exp = data[10]; res->sector_count = data[12]; res->sector_count_exp = data[13]; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } else { if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) device_printf(ch->dev, "Error while READ LOG EXT\n"); else if ((data[0] & 0x80) == 0) { device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } } free(ccb->ataio.data_ptr, M_AHCI); xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) { int i; ch->recoverycmd = 0; i = ccb->ccb_h.recovery_slot; if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; } else { ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; } ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; xpt_free_ccb(ccb); xpt_release_simq(ch->sim, TRUE); } static void ahci_start(struct ahci_channel *ch, int fbs) { u_int32_t cmd; /* Run the channel start callback, if any. */ if (ch->start) ch->start(ch); /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); /* Clear any interrupts pending on this channel */ ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); /* Configure FIS-based switching if supported. */ if (ch->chcaps & AHCI_P_CMD_FBSCP) { ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; ATA_OUTL(ch->r_mem, AHCI_P_FBS, ch->fbs_enabled ? AHCI_P_FBS_EN : 0); } /* Start operations on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd &= ~AHCI_P_CMD_PMA; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | (ch->pm_present ? AHCI_P_CMD_PMA : 0)); } static void ahci_stop(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all activity on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); /* Wait for activity stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); ch->eslots = 0; } static void ahci_clo(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Issue Command List Override if supported */ if (ch->caps & AHCI_CAP_SCLO) { cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); cmd |= AHCI_P_CMD_CLO; ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "executing CLO failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); } } static void ahci_stop_fr(struct ahci_channel *ch) { u_int32_t cmd; int timeout; /* Kill all FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); /* Wait for FIS reception stop. */ timeout = 0; do { DELAY(10); if (timeout++ > 50000) { device_printf(ch->dev, "stopping AHCI FR engine failed\n"); break; } } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); } static void ahci_start_fr(struct ahci_channel *ch) { u_int32_t cmd; /* Start FIS reception on this channel */ cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); } static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0) { int timeout = 0; uint32_t val; while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & (ATA_S_BUSY | ATA_S_DRQ)) { if (timeout > t) { if (t != 0) { device_printf(ch->dev, "AHCI reset: device not ready after %dms " "(tfd = %08x)\n", MAX(t, 0) + t0, val); } return (EBUSY); } DELAY(1000); timeout++; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device ready after %dms\n", timeout + t0); return (0); } static void ahci_reset_to(void *arg) { struct ahci_channel *ch = arg; if (ch->resetting == 0) return; ch->resetting--; if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, (310 - ch->resetting) * 100) == 0) { ch->resetting = 0; ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } if (ch->resetting == 0) { ahci_clo(ch); ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); return; } callout_schedule(&ch->reset_timer, hz / 10); } static void ahci_reset(struct ahci_channel *ch) { struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); int i; xpt_freeze_simq(ch->sim, 1); if (bootverbose) device_printf(ch->dev, "AHCI reset...\n"); /* Forget about previous reset. */ if (ch->resetting) { ch->resetting = 0; callout_stop(&ch->reset_timer); xpt_release_simq(ch->sim, TRUE); } /* Requeue freezed command. */ if (ch->frozen) { union ccb *fccb = ch->frozen; ch->frozen = NULL; fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { xpt_freeze_devq(fccb->ccb_h.path, 1); fccb->ccb_h.status |= CAM_DEV_QFRZN; } ahci_done(ch, fccb); } /* Kill the engine and requeue all running commands. */ ahci_stop(ch); for (i = 0; i < ch->numslots; i++) { /* Do we have a running request on slot? */ if (ch->slot[i].state < AHCI_SLOT_RUNNING) continue; /* XXX; Commands in loading state. */ ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); } for (i = 0; i < ch->numslots; i++) { if (!ch->hold[i]) continue; ahci_done(ch, ch->hold[i]); ch->hold[i] = NULL; ch->numhslots--; } if (ch->toslots != 0) xpt_release_simq(ch->sim, TRUE); ch->eslots = 0; ch->toslots = 0; ch->wrongccs = 0; ch->fatalerr = 0; /* Tell the XPT about the event */ xpt_async(AC_BUS_RESET, ch->path, NULL); /* Disable port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); /* Reset and reconnect PHY, */ if (!ahci_sata_phy_reset(ch)) { if (bootverbose) device_printf(ch->dev, "AHCI reset: device not found\n"); ch->devices = 0; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_PRC | AHCI_P_IX_PC)); xpt_release_simq(ch->sim, TRUE); return; } if (bootverbose) device_printf(ch->dev, "AHCI reset: device found\n"); /* Wait for clearing busy status. */ if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { if (dumping) ahci_clo(ch); else ch->resetting = 310; } ch->devices = 1; /* Enable wanted port interrupts */ ATA_OUTL(ch->r_mem, AHCI_P_IE, (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | AHCI_P_IX_TFE | AHCI_P_IX_HBF | AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); if (ch->resetting) callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); else { ahci_start(ch, 1); xpt_release_simq(ch->sim, TRUE); } } static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) { u_int8_t *fis = &ctp->cfis[0]; bzero(fis, 20); fis[0] = 0x27; /* host to device */ fis[1] = (ccb->ccb_h.target_id & 0x0f); if (ccb->ccb_h.func_code == XPT_SCSI_IO) { fis[1] |= 0x80; fis[2] = ATA_PACKET_CMD; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) fis[3] = ATA_F_DMA; else { fis[5] = ccb->csio.dxfer_len; fis[6] = ccb->csio.dxfer_len >> 8; } fis[7] = ATA_D_LBA; fis[15] = ATA_A_4BIT; bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, ctp->acmd, ccb->csio.cdb_len); bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { fis[1] |= 0x80; fis[2] = ccb->ataio.cmd.command; fis[3] = ccb->ataio.cmd.features; fis[4] = ccb->ataio.cmd.lba_low; fis[5] = ccb->ataio.cmd.lba_mid; fis[6] = ccb->ataio.cmd.lba_high; fis[7] = ccb->ataio.cmd.device; fis[8] = ccb->ataio.cmd.lba_low_exp; fis[9] = ccb->ataio.cmd.lba_mid_exp; fis[10] = ccb->ataio.cmd.lba_high_exp; fis[11] = ccb->ataio.cmd.features_exp; if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { fis[12] = tag << 3; - fis[13] = 0; } else { fis[12] = ccb->ataio.cmd.sector_count; - fis[13] = ccb->ataio.cmd.sector_count_exp; } + fis[13] = ccb->ataio.cmd.sector_count_exp; fis[15] = ATA_A_4BIT; } else { fis[15] = ccb->ataio.cmd.control; } if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { fis[16] = ccb->ataio.aux & 0xff; fis[17] = (ccb->ataio.aux >> 8) & 0xff; fis[18] = (ccb->ataio.aux >> 16) & 0xff; fis[19] = (ccb->ataio.aux >> 24) & 0xff; } return (20); } static int ahci_sata_connect(struct ahci_channel *ch) { u_int32_t status; int timeout, found = 0; /* Wait up to 100ms for "connect well" */ for (timeout = 0; timeout < 1000 ; timeout++) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS); if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) found = 1; if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) break; if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { if (bootverbose) { device_printf(ch->dev, "SATA offline status=%08x\n", status); } return (0); } if (found == 0 && timeout >= 100) break; DELAY(100); } if (timeout >= 1000 || !found) { if (bootverbose) { device_printf(ch->dev, "SATA connect timeout time=%dus status=%08x\n", timeout * 100, status); } return (0); } if (bootverbose) { device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", timeout * 100, status); } /* Clear SATA error register */ ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); return (1); } static int ahci_sata_phy_reset(struct ahci_channel *ch) { int sata_rev; uint32_t val; if (ch->listening) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val |= AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 0; } sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; if (sata_rev == 1) val = ATA_SC_SPD_SPEED_GEN1; else if (sata_rev == 2) val = ATA_SC_SPD_SPEED_GEN2; else if (sata_rev == 3) val = ATA_SC_SPD_SPEED_GEN3; else val = 0; ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_RESET | val | ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); DELAY(1000); ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); if (!ahci_sata_connect(ch)) { if (ch->caps & AHCI_CAP_SSS) { val = ATA_INL(ch->r_mem, AHCI_P_CMD); val &= ~AHCI_P_CMD_SUD; ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); ch->listening = 1; } else if (ch->pm_level > 0) ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); return (0); } return (1); } static int ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) { if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { ccb->ccb_h.status = CAM_TID_INVALID; ahci_done(ch, ccb); return (-1); } if (ccb->ccb_h.target_lun != 0) { ccb->ccb_h.status = CAM_LUN_INVALID; ahci_done(ch, ccb); return (-1); } return (0); } static void ahciaction(struct cam_sim *sim, union ccb *ccb) { struct ahci_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ahci_channel *)cam_sim_softc(sim); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ahci_check_ids(ch, ccb)) return; if (ch->devices == 0 || (ch->pm_present == 0 && ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } ccb->ccb_h.recovery_type = RECOVERY_NONE; /* Check for command collision. */ if (ahci_check_collision(ch, ccb)) { /* Freeze command. */ ch->frozen = ccb; /* We have only one frozen slot, so freeze simq also. */ xpt_freeze_simq(ch->sim, 1); return; } ahci_begin_transaction(ch, ccb); return; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) d->mode = cts->xport_specific.sata.mode; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) d->tags = min(ch->numslots, cts->xport_specific.sata.tags); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) ch->pm_present = cts->xport_specific.sata.pm_present; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings *cts = &ccb->cts; struct ahci_device *d; uint32_t status; if (ahci_check_ids(ch, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->proto_specific.valid = 0; cts->xport_specific.sata.valid = 0; if (cts->type == CTS_TYPE_CURRENT_SETTINGS && (ccb->ccb_h.target_id == 15 || (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; if (status & 0x0f0) { cts->xport_specific.sata.revision = (status & 0x0f0) >> 4; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->caps2 & AHCI_CAP2_APST) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; } if ((ch->caps & AHCI_CAP_SNCQ) && (ch->quirks & AHCI_Q_NOAA) == 0) cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; } cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; cts->xport_specific.sata.pm_present = ch->pm_present; cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; cts->xport_specific.sata.tags = d->tags; cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ahci_reset(ch); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; if (ch->caps & AHCI_CAP_SNCQ) cpi->hba_inquiry |= PI_TAG_ABLE; if (ch->caps & AHCI_CAP_SPM) cpi->hba_inquiry |= PI_SATAPM; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_ATA_EXT; cpi->hba_eng_cnt = 0; if (ch->caps & AHCI_CAP_SPM) cpi->max_target = 15; else cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ if (ch->quirks & AHCI_Q_MAXIO_64K) cpi->maxio = min(cpi->maxio, 128 * 512); cpi->hba_vendor = ch->vendorid; cpi->hba_device = ch->deviceid; cpi->hba_subvendor = ch->subvendorid; cpi->hba_subdevice = ch->subdeviceid; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } ahci_done(ch, ccb); } static void ahcipoll(struct cam_sim *sim) { struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); uint32_t istatus; /* Read interrupt statuses and process if any. */ istatus = ATA_INL(ch->r_mem, AHCI_P_IS); if (istatus != 0) ahci_ch_intr_main(ch, istatus); if (ch->resetting != 0 && (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { ch->resetpolldiv = 1000; ahci_reset_to(ch); } } MODULE_VERSION(ahci, 1); MODULE_DEPEND(ahci, cam, 1, 1, 1); Index: head/sys/geom/eli/g_eli.c =================================================================== --- head/sys/geom/eli/g_eli.c (revision 300206) +++ head/sys/geom/eli/g_eli.c (revision 300207) @@ -1,1270 +1,1272 @@ /*- * Copyright (c) 2005-2011 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include FEATURE(geom_eli, "GEOM crypto module"); MALLOC_DEFINE(M_ELI, "eli data", "GEOM_ELI Data"); SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, eli, CTLFLAG_RW, 0, "GEOM_ELI stuff"); static int g_eli_version = G_ELI_VERSION; SYSCTL_INT(_kern_geom_eli, OID_AUTO, version, CTLFLAG_RD, &g_eli_version, 0, "GELI version"); int g_eli_debug = 0; SYSCTL_INT(_kern_geom_eli, OID_AUTO, debug, CTLFLAG_RWTUN, &g_eli_debug, 0, "Debug level"); static u_int g_eli_tries = 3; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, tries, CTLFLAG_RWTUN, &g_eli_tries, 0, "Number of tries for entering the passphrase"); static u_int g_eli_visible_passphrase = GETS_NOECHO; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, visible_passphrase, CTLFLAG_RWTUN, &g_eli_visible_passphrase, 0, "Visibility of passphrase prompt (0 = invisible, 1 = visible, 2 = asterisk)"); u_int g_eli_overwrites = G_ELI_OVERWRITES; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, overwrites, CTLFLAG_RWTUN, &g_eli_overwrites, 0, "Number of times on-disk keys should be overwritten when destroying them"); static u_int g_eli_threads = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, threads, CTLFLAG_RWTUN, &g_eli_threads, 0, "Number of threads doing crypto work"); u_int g_eli_batch = 0; SYSCTL_UINT(_kern_geom_eli, OID_AUTO, batch, CTLFLAG_RWTUN, &g_eli_batch, 0, "Use crypto operations batching"); /* * Passphrase cached during boot, in order to be more user-friendly if * there are multiple providers using the same passphrase. */ static char cached_passphrase[256]; static u_int g_eli_boot_passcache = 1; TUNABLE_INT("kern.geom.eli.boot_passcache", &g_eli_boot_passcache); SYSCTL_UINT(_kern_geom_eli, OID_AUTO, boot_passcache, CTLFLAG_RD, &g_eli_boot_passcache, 0, "Passphrases are cached during boot process for possible reuse"); static void fetch_loader_passphrase(void * dummy) { char * env_passphrase; KASSERT(dynamic_kenv, ("need dynamic kenv")); if ((env_passphrase = kern_getenv("kern.geom.eli.passphrase")) != NULL) { /* Extract passphrase from the environment. */ strlcpy(cached_passphrase, env_passphrase, sizeof(cached_passphrase)); freeenv(env_passphrase); /* Wipe the passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); } } SYSINIT(geli_fetch_loader_passphrase, SI_SUB_KMEM + 1, SI_ORDER_ANY, fetch_loader_passphrase, NULL); static void zero_boot_passcache(void * dummy) { memset(cached_passphrase, 0, sizeof(cached_passphrase)); } EVENTHANDLER_DEFINE(mountroot, zero_boot_passcache, NULL, 0); static eventhandler_tag g_eli_pre_sync = NULL; static int g_eli_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp); static void g_eli_init(struct g_class *mp); static void g_eli_fini(struct g_class *mp); static g_taste_t g_eli_taste; static g_dumpconf_t g_eli_dumpconf; struct g_class g_eli_class = { .name = G_ELI_CLASS_NAME, .version = G_VERSION, .ctlreq = g_eli_config, .taste = g_eli_taste, .destroy_geom = g_eli_destroy_geom, .init = g_eli_init, .fini = g_eli_fini }; /* * Code paths: * BIO_READ: * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ /* * EAGAIN from crypto(9) means, that we were probably balanced to another crypto * accelerator or something like this. * The function updates the SID and rerun the operation. */ int g_eli_crypto_rerun(struct cryptop *crp) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; bp = (struct bio *)crp->crp_opaque; sc = bp->bio_to->geom->softc; LIST_FOREACH(wr, &sc->sc_workers, w_next) { if (wr->w_number == bp->bio_pflags) break; } KASSERT(wr != NULL, ("Invalid worker (%u).", bp->bio_pflags)); G_ELI_DEBUG(1, "Rerunning crypto %s request (sid: %ju -> %ju).", bp->bio_cmd == BIO_READ ? "READ" : "WRITE", (uintmax_t)wr->w_sid, (uintmax_t)crp->crp_sid); wr->w_sid = crp->crp_sid; crp->crp_etype = 0; error = crypto_dispatch(crp); if (error == 0) return (0); G_ELI_DEBUG(1, "%s: crypto_dispatch() returned %d.", __func__, error); crp->crp_etype = error; return (error); } /* * The function is called afer reading encrypted data from the provider. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> G_ELI_READ_DONE -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver */ void g_eli_read_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; sc = pbp->bio_to->geom->softc; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; if (pbp->bio_driver2 != NULL) { free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; } g_io_deliver(pbp, pbp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return; } mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, pbp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); } /* * The function is called after we encrypt and write data. * * g_eli_start -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> G_ELI_WRITE_DONE -> g_io_deliver */ void g_eli_write_done(struct bio *bp) { struct g_eli_softc *sc; struct bio *pbp; G_ELI_LOGREQ(2, bp, "Request done."); pbp = bp->bio_parent; if (pbp->bio_error == 0 && bp->bio_error != 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); /* * Do we have all sectors already? */ pbp->bio_inbed++; if (pbp->bio_inbed < pbp->bio_children) return; free(pbp->bio_driver2, M_ELI); pbp->bio_driver2 = NULL; if (pbp->bio_error != 0) { G_ELI_LOGREQ(0, pbp, "%s() failed (error=%d)", __func__, pbp->bio_error); pbp->bio_completed = 0; } else pbp->bio_completed = pbp->bio_length; /* * Write is finished, send it up. */ sc = pbp->bio_to->geom->softc; g_io_deliver(pbp, pbp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); } /* * This function should never be called, but GEOM made as it set ->orphan() * method for every geom. */ static void g_eli_orphan_spoil_assert(struct g_consumer *cp) { panic("Function %s() called for %s.", __func__, cp->geom->name); } static void g_eli_orphan(struct g_consumer *cp) { struct g_eli_softc *sc; g_topology_assert(); sc = cp->geom->softc; if (sc == NULL) return; g_eli_destroy(sc, TRUE); } /* * BIO_READ: * G_ELI_START -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> g_eli_crypto_read_done -> g_io_deliver * BIO_WRITE: * G_ELI_START -> g_eli_crypto_run -> g_eli_crypto_write_done -> g_io_request -> g_eli_write_done -> g_io_deliver */ static void g_eli_start(struct bio *bp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *cbp; sc = bp->bio_to->geom->softc; KASSERT(sc != NULL, ("Provider's error should be set (error=%d)(device=%s).", bp->bio_to->error, bp->bio_to->name)); G_ELI_LOGREQ(2, bp, "Request received."); switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_GETATTR: case BIO_FLUSH: + case BIO_ZONE: break; case BIO_DELETE: /* * If the user hasn't set the NODELETE flag, we just pass * it down the stack and let the layers beneath us do (or * not) whatever they do with it. If they have, we * reject it. A possible extension would be an * additional flag to take it as a hint to shred the data * with [multiple?] overwrites. */ if (!(sc->sc_flags & G_ELI_FLAG_NODELETE)) break; default: g_io_deliver(bp, EOPNOTSUPP); return; } cbp = g_clone_bio(bp); if (cbp == NULL) { g_io_deliver(bp, ENOMEM); return; } bp->bio_driver1 = cbp; bp->bio_pflags = G_ELI_NEW_BIO; switch (bp->bio_cmd) { case BIO_READ: if (!(sc->sc_flags & G_ELI_FLAG_AUTH)) { g_eli_crypto_read(sc, bp, 0); break; } /* FALLTHROUGH */ case BIO_WRITE: mtx_lock(&sc->sc_queue_mtx); bioq_insert_tail(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); wakeup(sc); break; case BIO_GETATTR: case BIO_FLUSH: case BIO_DELETE: + case BIO_ZONE: cbp->bio_done = g_std_done; cp = LIST_FIRST(&sc->sc_geom->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); g_io_request(cbp, cp); break; } } static int g_eli_newsession(struct g_eli_worker *wr) { struct g_eli_softc *sc; struct cryptoini crie, cria; int error; sc = wr->w_softc; bzero(&crie, sizeof(crie)); crie.cri_alg = sc->sc_ealgo; crie.cri_klen = sc->sc_ekeylen; if (sc->sc_ealgo == CRYPTO_AES_XTS) crie.cri_klen <<= 1; if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) { crie.cri_key = g_eli_key_hold(sc, 0, LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize); } else { crie.cri_key = sc->sc_ekey; } if (sc->sc_flags & G_ELI_FLAG_AUTH) { bzero(&cria, sizeof(cria)); cria.cri_alg = sc->sc_aalgo; cria.cri_klen = sc->sc_akeylen; cria.cri_key = sc->sc_akey; crie.cri_next = &cria; } switch (sc->sc_crypto) { case G_ELI_CRYPTO_SW: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_SOFTWARE); break; case G_ELI_CRYPTO_HW: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_HARDWARE); break; case G_ELI_CRYPTO_UNKNOWN: error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_HARDWARE); if (error == 0) { mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) sc->sc_crypto = G_ELI_CRYPTO_HW; mtx_unlock(&sc->sc_queue_mtx); } else { error = crypto_newsession(&wr->w_sid, &crie, CRYPTOCAP_F_SOFTWARE); mtx_lock(&sc->sc_queue_mtx); if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN) sc->sc_crypto = G_ELI_CRYPTO_SW; mtx_unlock(&sc->sc_queue_mtx); } break; default: panic("%s: invalid condition", __func__); } if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) g_eli_key_drop(sc, crie.cri_key); return (error); } static void g_eli_freesession(struct g_eli_worker *wr) { crypto_freesession(wr->w_sid); } static void g_eli_cancel(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { KASSERT(bp->bio_pflags == G_ELI_NEW_BIO, ("Not new bio when canceling (bp=%p).", bp)); g_io_deliver(bp, ENXIO); } } static struct bio * g_eli_takefirst(struct g_eli_softc *sc) { struct bio *bp; mtx_assert(&sc->sc_queue_mtx, MA_OWNED); if (!(sc->sc_flags & G_ELI_FLAG_SUSPEND)) return (bioq_takefirst(&sc->sc_queue)); /* * Device suspended, so we skip new I/O requests. */ TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { if (bp->bio_pflags != G_ELI_NEW_BIO) break; } if (bp != NULL) bioq_remove(&sc->sc_queue, bp); return (bp); } /* * This is the main function for kernel worker thread when we don't have * hardware acceleration and we have to do cryptography in software. * Dedicated thread is needed, so we don't slow down g_up/g_down GEOM * threads with crypto work. */ static void g_eli_worker(void *arg) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct bio *bp; int error; wr = arg; sc = wr->w_softc; #ifdef EARLY_AP_STARTUP MPASS(!sc->sc_cpubind || smp_started); #elif defined(SMP) /* Before sched_bind() to a CPU, wait for all CPUs to go on-line. */ if (sc->sc_cpubind) { while (!smp_started) tsleep(wr, 0, "geli:smp", hz / 4); } #endif thread_lock(curthread); sched_prio(curthread, PUSER); if (sc->sc_cpubind) sched_bind(curthread, wr->w_number % mp_ncpus); thread_unlock(curthread); G_ELI_DEBUG(1, "Thread %s started.", curthread->td_proc->p_comm); for (;;) { mtx_lock(&sc->sc_queue_mtx); again: bp = g_eli_takefirst(sc); if (bp == NULL) { if (sc->sc_flags & G_ELI_FLAG_DESTROY) { g_eli_cancel(sc); LIST_REMOVE(wr, w_next); g_eli_freesession(wr); free(wr, M_ELI); G_ELI_DEBUG(1, "Thread %s exiting.", curthread->td_proc->p_comm); wakeup(&sc->sc_workers); mtx_unlock(&sc->sc_queue_mtx); kproc_exit(0); } while (sc->sc_flags & G_ELI_FLAG_SUSPEND) { if (sc->sc_inflight > 0) { G_ELI_DEBUG(0, "inflight=%d", sc->sc_inflight); /* * We still have inflight BIOs, so * sleep and retry. */ msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:inf", hz / 5); goto again; } /* * Suspend requested, mark the worker as * suspended and go to sleep. */ if (wr->w_active) { g_eli_freesession(wr); wr->w_active = FALSE; } wakeup(&sc->sc_workers); msleep(sc, &sc->sc_queue_mtx, PRIBIO, "geli:suspend", 0); if (!wr->w_active && !(sc->sc_flags & G_ELI_FLAG_SUSPEND)) { error = g_eli_newsession(wr); KASSERT(error == 0, ("g_eli_newsession() failed on resume (error=%d)", error)); wr->w_active = TRUE; } goto again; } msleep(sc, &sc->sc_queue_mtx, PDROP, "geli:w", 0); continue; } if (bp->bio_pflags == G_ELI_NEW_BIO) atomic_add_int(&sc->sc_inflight, 1); mtx_unlock(&sc->sc_queue_mtx); if (bp->bio_pflags == G_ELI_NEW_BIO) { bp->bio_pflags = 0; if (sc->sc_flags & G_ELI_FLAG_AUTH) { if (bp->bio_cmd == BIO_READ) g_eli_auth_read(sc, bp); else g_eli_auth_run(wr, bp); } else { if (bp->bio_cmd == BIO_READ) g_eli_crypto_read(sc, bp, 1); else g_eli_crypto_run(wr, bp); } } else { if (sc->sc_flags & G_ELI_FLAG_AUTH) g_eli_auth_run(wr, bp); else g_eli_crypto_run(wr, bp); } } } int g_eli_read_metadata(struct g_class *mp, struct g_provider *pp, struct g_eli_metadata *md) { struct g_geom *gp; struct g_consumer *cp; u_char *buf = NULL; int error; g_topology_assert(); gp = g_new_geomf(mp, "eli:taste"); gp->start = g_eli_start; gp->access = g_std_access; /* * g_eli_read_metadata() is always called from the event thread. * Our geom is created and destroyed in the same event, so there * could be no orphan nor spoil event in the meantime. */ gp->orphan = g_eli_orphan_spoil_assert; gp->spoiled = g_eli_orphan_spoil_assert; cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error != 0) goto end; error = g_access(cp, 1, 0, 0); if (error != 0) goto end; g_topology_unlock(); buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, &error); g_topology_lock(); if (buf == NULL) goto end; error = eli_metadata_decode(buf, md); if (error != 0) goto end; /* Metadata was read and decoded successfully. */ end: if (buf != NULL) g_free(buf); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, 0, 0); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); return (error); } /* * The function is called when we had last close on provider and user requested * to close it when this situation occur. */ static void g_eli_last_close(void *arg, int flags __unused) { struct g_geom *gp; char gpname[64]; int error; g_topology_assert(); gp = arg; strlcpy(gpname, gp->name, sizeof(gpname)); error = g_eli_destroy(gp->softc, TRUE); KASSERT(error == 0, ("Cannot detach %s on last close (error=%d).", gpname, error)); G_ELI_DEBUG(0, "Detached %s on last close.", gpname); } int g_eli_access(struct g_provider *pp, int dr, int dw, int de) { struct g_eli_softc *sc; struct g_geom *gp; gp = pp->geom; sc = gp->softc; if (dw > 0) { if (sc->sc_flags & G_ELI_FLAG_RO) { /* Deny write attempts. */ return (EROFS); } /* Someone is opening us for write, we need to remember that. */ sc->sc_flags |= G_ELI_FLAG_WOPEN; return (0); } /* Is this the last close? */ if (pp->acr + dr > 0 || pp->acw + dw > 0 || pp->ace + de > 0) return (0); /* * Automatically detach on last close if requested. */ if ((sc->sc_flags & G_ELI_FLAG_RW_DETACH) || (sc->sc_flags & G_ELI_FLAG_WOPEN)) { g_post_event(g_eli_last_close, gp, M_WAITOK, NULL); } return (0); } static int g_eli_cpu_is_disabled(int cpu) { #ifdef SMP return (CPU_ISSET(cpu, &hlt_cpus_mask)); #else return (0); #endif } struct g_geom * g_eli_create(struct gctl_req *req, struct g_class *mp, struct g_provider *bpp, const struct g_eli_metadata *md, const u_char *mkey, int nkey) { struct g_eli_softc *sc; struct g_eli_worker *wr; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; u_int i, threads; int error; G_ELI_DEBUG(1, "Creating device %s%s.", bpp->name, G_ELI_SUFFIX); gp = g_new_geomf(mp, "%s%s", bpp->name, G_ELI_SUFFIX); sc = malloc(sizeof(*sc), M_ELI, M_WAITOK | M_ZERO); gp->start = g_eli_start; /* * Spoiling can happen even though we have the provider open * exclusively, e.g. through media change events. */ gp->spoiled = g_eli_orphan; gp->orphan = g_eli_orphan; gp->dumpconf = g_eli_dumpconf; /* * If detach-on-last-close feature is not enabled and we don't operate * on read-only provider, we can simply use g_std_access(). */ if (md->md_flags & (G_ELI_FLAG_WO_DETACH | G_ELI_FLAG_RO)) gp->access = g_eli_access; else gp->access = g_std_access; eli_metadata_softc(sc, md, bpp->sectorsize, bpp->mediasize); sc->sc_nkey = nkey; gp->softc = sc; sc->sc_geom = gp; bioq_init(&sc->sc_queue); mtx_init(&sc->sc_queue_mtx, "geli:queue", NULL, MTX_DEF); mtx_init(&sc->sc_ekeys_lock, "geli:ekeys", NULL, MTX_DEF); pp = NULL; cp = g_new_consumer(gp); error = g_attach(cp, bpp); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot attach to %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot attach to %s (error=%d).", bpp->name, error); } goto failed; } /* * Keep provider open all the time, so we can run critical tasks, * like Master Keys deletion, without wondering if we can open * provider or not. * We don't open provider for writing only when user requested read-only * access. */ if (sc->sc_flags & G_ELI_FLAG_RO) error = g_access(cp, 1, 0, 1); else error = g_access(cp, 1, 1, 1); if (error != 0) { if (req != NULL) { gctl_error(req, "Cannot access %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot access %s (error=%d).", bpp->name, error); } goto failed; } /* * Remember the keys in our softc structure. */ g_eli_mkey_propagate(sc, mkey); LIST_INIT(&sc->sc_workers); threads = g_eli_threads; if (threads == 0) threads = mp_ncpus; sc->sc_cpubind = (mp_ncpus > 1 && threads == mp_ncpus); for (i = 0; i < threads; i++) { if (g_eli_cpu_is_disabled(i)) { G_ELI_DEBUG(1, "%s: CPU %u disabled, skipping.", bpp->name, i); continue; } wr = malloc(sizeof(*wr), M_ELI, M_WAITOK | M_ZERO); wr->w_softc = sc; wr->w_number = i; wr->w_active = TRUE; error = g_eli_newsession(wr); if (error != 0) { free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot set up crypto session " "for %s (error=%d).", bpp->name, error); } goto failed; } error = kproc_create(g_eli_worker, wr, &wr->w_proc, 0, 0, "g_eli[%u] %s", i, bpp->name); if (error != 0) { g_eli_freesession(wr); free(wr, M_ELI); if (req != NULL) { gctl_error(req, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } else { G_ELI_DEBUG(1, "Cannot create kernel thread " "for %s (error=%d).", bpp->name, error); } goto failed; } LIST_INSERT_HEAD(&sc->sc_workers, wr, w_next); } /* * Create decrypted provider. */ pp = g_new_providerf(gp, "%s%s", bpp->name, G_ELI_SUFFIX); pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; g_error_provider(pp, 0); G_ELI_DEBUG(0, "Device %s created.", pp->name); G_ELI_DEBUG(0, "Encryption: %s %u", g_eli_algo2str(sc->sc_ealgo), sc->sc_ekeylen); if (sc->sc_flags & G_ELI_FLAG_AUTH) G_ELI_DEBUG(0, " Integrity: %s", g_eli_algo2str(sc->sc_aalgo)); G_ELI_DEBUG(0, " Crypto: %s", sc->sc_crypto == G_ELI_CRYPTO_SW ? "software" : "hardware"); return (gp); failed: mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); /* * Wait for kernel threads self destruction. */ while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); if (cp->provider != NULL) { if (cp->acr == 1) g_access(cp, -1, -1, -1); g_detach(cp); } g_destroy_consumer(cp); g_destroy_geom(gp); g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); return (NULL); } int g_eli_destroy(struct g_eli_softc *sc, boolean_t force) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (sc == NULL) return (ENXIO); gp = sc->sc_geom; pp = LIST_FIRST(&gp->provider); if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { if (force) { G_ELI_DEBUG(1, "Device %s is still open, so it " "cannot be definitely removed.", pp->name); sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; g_wither_provider(pp, ENXIO); return (EBUSY); } else { G_ELI_DEBUG(1, "Device %s is still open (r%dw%de%d).", pp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } } mtx_lock(&sc->sc_queue_mtx); sc->sc_flags |= G_ELI_FLAG_DESTROY; wakeup(sc); while (!LIST_EMPTY(&sc->sc_workers)) { msleep(&sc->sc_workers, &sc->sc_queue_mtx, PRIBIO, "geli:destroy", 0); } mtx_destroy(&sc->sc_queue_mtx); gp->softc = NULL; g_eli_key_destroy(sc); bzero(sc, sizeof(*sc)); free(sc, M_ELI); if (pp == NULL || (pp->acr == 0 && pp->acw == 0 && pp->ace == 0)) G_ELI_DEBUG(0, "Device %s destroyed.", gp->name); g_wither_geom_close(gp, ENXIO); return (0); } static int g_eli_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, struct g_geom *gp) { struct g_eli_softc *sc; sc = gp->softc; return (g_eli_destroy(sc, FALSE)); } static int g_eli_keyfiles_load(struct hmac_ctx *ctx, const char *provider) { u_char *keyfile, *data; char *file, name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL && i == 0) { /* * If there is only one keyfile, allow simpler name. */ snprintf(name, sizeof(name), "%s:geli_keyfile", provider); keyfile = preload_search_by_type(name); } if (keyfile == NULL) return (i); /* Return number of loaded keyfiles. */ data = preload_fetch_addr(keyfile); if (data == NULL) { G_ELI_DEBUG(0, "Cannot find key file data for %s.", name); return (0); } size = preload_fetch_size(keyfile); if (size == 0) { G_ELI_DEBUG(0, "Cannot find key file size for %s.", name); return (0); } file = preload_search_info(keyfile, MODINFO_NAME); if (file == NULL) { G_ELI_DEBUG(0, "Cannot find key file name for %s.", name); return (0); } G_ELI_DEBUG(1, "Loaded keyfile %s for %s (type: %s).", file, provider, name); g_eli_crypto_hmac_update(ctx, data, size); } } static void g_eli_keyfiles_clear(const char *provider) { u_char *keyfile, *data; char name[64]; size_t size; int i; for (i = 0; ; i++) { snprintf(name, sizeof(name), "%s:geli_keyfile%d", provider, i); keyfile = preload_search_by_type(name); if (keyfile == NULL) return; data = preload_fetch_addr(keyfile); size = preload_fetch_size(keyfile); if (data != NULL && size != 0) bzero(data, size); } } /* * Tasting is only made on boot. * We detect providers which should be attached before root is mounted. */ static struct g_geom * g_eli_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_eli_metadata md; struct g_geom *gp; struct hmac_ctx ctx; char passphrase[256]; u_char key[G_ELI_USERKEYLEN], mkey[G_ELI_DATAIVKEYLEN]; u_int i, nkey, nkeyfiles, tries; int error; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); if (root_mounted() || g_eli_tries == 0) return (NULL); G_ELI_DEBUG(3, "Tasting %s.", pp->name); error = g_eli_read_metadata(mp, pp, &md); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_ELI_MAGIC) != 0) return (NULL); if (md.md_version > G_ELI_VERSION) { printf("geom_eli.ko module is too old to handle %s.\n", pp->name); return (NULL); } if (md.md_provsize != pp->mediasize) return (NULL); /* Should we attach it on boot? */ if (!(md.md_flags & G_ELI_FLAG_BOOT)) return (NULL); if (md.md_keys == 0x00) { G_ELI_DEBUG(0, "No valid keys on %s.", pp->name); return (NULL); } if (md.md_iterations == -1) { /* If there is no passphrase, we try only once. */ tries = 1; } else { /* Ask for the passphrase no more than g_eli_tries times. */ tries = g_eli_tries; } for (i = 0; i <= tries; i++) { g_eli_crypto_hmac_init(&ctx, NULL, 0); /* * Load all key files. */ nkeyfiles = g_eli_keyfiles_load(&ctx, pp->name); if (nkeyfiles == 0 && md.md_iterations == -1) { /* * No key files and no passphrase, something is * definitely wrong here. * geli(8) doesn't allow for such situation, so assume * that there was really no passphrase and in that case * key files are no properly defined in loader.conf. */ G_ELI_DEBUG(0, "Found no key files in loader.conf for %s.", pp->name); return (NULL); } /* Ask for the passphrase if defined. */ if (md.md_iterations >= 0) { /* Try first with cached passphrase. */ if (i == 0) { if (!g_eli_boot_passcache) continue; memcpy(passphrase, cached_passphrase, sizeof(passphrase)); } else { printf("Enter passphrase for %s: ", pp->name); cngets(passphrase, sizeof(passphrase), g_eli_visible_passphrase); memcpy(cached_passphrase, passphrase, sizeof(passphrase)); } } /* * Prepare Derived-Key from the user passphrase. */ if (md.md_iterations == 0) { g_eli_crypto_hmac_update(&ctx, md.md_salt, sizeof(md.md_salt)); g_eli_crypto_hmac_update(&ctx, passphrase, strlen(passphrase)); bzero(passphrase, sizeof(passphrase)); } else if (md.md_iterations > 0) { u_char dkey[G_ELI_USERKEYLEN]; pkcs5v2_genkey(dkey, sizeof(dkey), md.md_salt, sizeof(md.md_salt), passphrase, md.md_iterations); bzero(passphrase, sizeof(passphrase)); g_eli_crypto_hmac_update(&ctx, dkey, sizeof(dkey)); bzero(dkey, sizeof(dkey)); } g_eli_crypto_hmac_final(&ctx, key, 0); /* * Decrypt Master-Key. */ error = g_eli_mkey_decrypt(&md, key, mkey, &nkey); bzero(key, sizeof(key)); if (error == -1) { if (i == tries) { G_ELI_DEBUG(0, "Wrong key for %s. No tries left.", pp->name); g_eli_keyfiles_clear(pp->name); return (NULL); } if (i > 0) { G_ELI_DEBUG(0, "Wrong key for %s. Tries left: %u.", pp->name, tries - i); } /* Try again. */ continue; } else if (error > 0) { G_ELI_DEBUG(0, "Cannot decrypt Master Key for %s (error=%d).", pp->name, error); g_eli_keyfiles_clear(pp->name); return (NULL); } g_eli_keyfiles_clear(pp->name); G_ELI_DEBUG(1, "Using Master Key %u for %s.", nkey, pp->name); break; } /* * We have correct key, let's attach provider. */ gp = g_eli_create(NULL, mp, pp, &md, mkey, nkey); bzero(mkey, sizeof(mkey)); bzero(&md, sizeof(md)); if (gp == NULL) { G_ELI_DEBUG(0, "Cannot create device %s%s.", pp->name, G_ELI_SUFFIX); return (NULL); } return (gp); } static void g_eli_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_eli_softc *sc; g_topology_assert(); sc = gp->softc; if (sc == NULL) return; if (pp != NULL || cp != NULL) return; /* Nothing here. */ sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_total); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_ekeys_allocated); sbuf_printf(sb, "%s", indent); if (sc->sc_flags == 0) sbuf_printf(sb, "NONE"); else { int first = 1; #define ADD_FLAG(flag, name) do { \ if (sc->sc_flags & (flag)) { \ if (!first) \ sbuf_printf(sb, ", "); \ else \ first = 0; \ sbuf_printf(sb, name); \ } \ } while (0) ADD_FLAG(G_ELI_FLAG_SUSPEND, "SUSPEND"); ADD_FLAG(G_ELI_FLAG_SINGLE_KEY, "SINGLE-KEY"); ADD_FLAG(G_ELI_FLAG_NATIVE_BYTE_ORDER, "NATIVE-BYTE-ORDER"); ADD_FLAG(G_ELI_FLAG_ONETIME, "ONETIME"); ADD_FLAG(G_ELI_FLAG_BOOT, "BOOT"); ADD_FLAG(G_ELI_FLAG_WO_DETACH, "W-DETACH"); ADD_FLAG(G_ELI_FLAG_RW_DETACH, "RW-DETACH"); ADD_FLAG(G_ELI_FLAG_AUTH, "AUTH"); ADD_FLAG(G_ELI_FLAG_WOPEN, "W-OPEN"); ADD_FLAG(G_ELI_FLAG_DESTROY, "DESTROY"); ADD_FLAG(G_ELI_FLAG_RO, "READ-ONLY"); ADD_FLAG(G_ELI_FLAG_NODELETE, "NODELETE"); ADD_FLAG(G_ELI_FLAG_GELIBOOT, "GELIBOOT"); #undef ADD_FLAG } sbuf_printf(sb, "\n"); if (!(sc->sc_flags & G_ELI_FLAG_ONETIME)) { sbuf_printf(sb, "%s%u\n", indent, sc->sc_nkey); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_version); sbuf_printf(sb, "%s", indent); switch (sc->sc_crypto) { case G_ELI_CRYPTO_HW: sbuf_printf(sb, "hardware"); break; case G_ELI_CRYPTO_SW: sbuf_printf(sb, "software"); break; default: sbuf_printf(sb, "UNKNOWN"); break; } sbuf_printf(sb, "\n"); if (sc->sc_flags & G_ELI_FLAG_AUTH) { sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_aalgo)); } sbuf_printf(sb, "%s%u\n", indent, sc->sc_ekeylen); sbuf_printf(sb, "%s%s\n", indent, g_eli_algo2str(sc->sc_ealgo)); sbuf_printf(sb, "%s%s\n", indent, (sc->sc_flags & G_ELI_FLAG_SUSPEND) ? "SUSPENDED" : "ACTIVE"); } static void g_eli_shutdown_pre_sync(void *arg, int howto) { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp; struct g_eli_softc *sc; int error; mp = arg; DROP_GIANT(); g_topology_lock(); LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { sc = gp->softc; if (sc == NULL) continue; pp = LIST_FIRST(&gp->provider); KASSERT(pp != NULL, ("No provider? gp=%p (%s)", gp, gp->name)); if (pp->acr + pp->acw + pp->ace == 0) error = g_eli_destroy(sc, TRUE); else { sc->sc_flags |= G_ELI_FLAG_RW_DETACH; gp->access = g_eli_access; } } g_topology_unlock(); PICKUP_GIANT(); } static void g_eli_init(struct g_class *mp) { g_eli_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, g_eli_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); if (g_eli_pre_sync == NULL) G_ELI_DEBUG(0, "Warning! Cannot register shutdown event."); } static void g_eli_fini(struct g_class *mp) { if (g_eli_pre_sync != NULL) EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_eli_pre_sync); } DECLARE_GEOM_CLASS(g_eli_class, g_eli); MODULE_DEPEND(g_eli, crypto, 1, 1, 1); Index: head/sys/geom/geom.h =================================================================== --- head/sys/geom/geom.h (revision 300206) +++ head/sys/geom/geom.h (revision 300207) @@ -1,417 +1,419 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GEOM_GEOM_H_ #define _GEOM_GEOM_H_ #include #include #include #include #include #include #include struct g_class; struct g_geom; struct g_consumer; struct g_provider; struct g_stat; struct thread; struct bio; struct sbuf; struct gctl_req; struct g_configargs; +struct disk_zone_args; typedef int g_config_t (struct g_configargs *ca); typedef void g_ctl_req_t (struct gctl_req *, struct g_class *cp, char const *verb); typedef int g_ctl_create_geom_t (struct gctl_req *, struct g_class *cp, struct g_provider *pp); typedef int g_ctl_destroy_geom_t (struct gctl_req *, struct g_class *cp, struct g_geom *gp); typedef int g_ctl_config_geom_t (struct gctl_req *, struct g_geom *gp, const char *verb); typedef void g_init_t (struct g_class *mp); typedef void g_fini_t (struct g_class *mp); typedef struct g_geom * g_taste_t (struct g_class *, struct g_provider *, int flags); typedef int g_ioctl_t(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td); #define G_TF_NORMAL 0 #define G_TF_INSIST 1 #define G_TF_TRANSPARENT 2 typedef int g_access_t (struct g_provider *, int, int, int); /* XXX: not sure about the thread arg */ typedef void g_orphan_t (struct g_consumer *); typedef void g_start_t (struct bio *); typedef void g_spoiled_t (struct g_consumer *); typedef void g_attrchanged_t (struct g_consumer *, const char *attr); typedef void g_provgone_t (struct g_provider *); typedef void g_dumpconf_t (struct sbuf *, const char *indent, struct g_geom *, struct g_consumer *, struct g_provider *); typedef void g_resize_t(struct g_consumer *cp); /* * The g_class structure describes a transformation class. In other words * all BSD disklabel handlers share one g_class, all MBR handlers share * one common g_class and so on. * Certain operations are instantiated on the class, most notably the * taste and config_geom functions. */ struct g_class { const char *name; u_int version; u_int spare0; g_taste_t *taste; g_config_t *config; g_ctl_req_t *ctlreq; g_init_t *init; g_fini_t *fini; g_ctl_destroy_geom_t *destroy_geom; /* * Default values for geom methods */ g_start_t *start; g_spoiled_t *spoiled; g_attrchanged_t *attrchanged; g_dumpconf_t *dumpconf; g_access_t *access; g_orphan_t *orphan; g_ioctl_t *ioctl; g_provgone_t *providergone; g_resize_t *resize; void *spare1; void *spare2; /* * The remaining elements are private */ LIST_ENTRY(g_class) class; LIST_HEAD(,g_geom) geom; }; #define G_VERSION_00 0x19950323 #define G_VERSION_01 0x20041207 /* add fflag to g_ioctl_t */ #define G_VERSION G_VERSION_01 /* * The g_geom is an instance of a g_class. */ struct g_geom { char *name; struct g_class *class; LIST_ENTRY(g_geom) geom; LIST_HEAD(,g_consumer) consumer; LIST_HEAD(,g_provider) provider; TAILQ_ENTRY(g_geom) geoms; /* XXX: better name */ int rank; g_start_t *start; g_spoiled_t *spoiled; g_attrchanged_t *attrchanged; g_dumpconf_t *dumpconf; g_access_t *access; g_orphan_t *orphan; g_ioctl_t *ioctl; g_provgone_t *providergone; g_resize_t *resize; void *spare0; void *spare1; void *softc; unsigned flags; #define G_GEOM_WITHER 1 #define G_GEOM_VOLATILE_BIO 2 }; /* * The g_bioq is a queue of struct bio's. * XXX: possibly collection point for statistics. * XXX: should (possibly) be collapsed with sys/bio.h::bio_queue_head. */ struct g_bioq { TAILQ_HEAD(, bio) bio_queue; struct mtx bio_queue_lock; int bio_queue_length; }; /* * A g_consumer is an attachment point for a g_provider. One g_consumer * can only be attached to one g_provider, but multiple g_consumers * can be attached to one g_provider. */ struct g_consumer { struct g_geom *geom; LIST_ENTRY(g_consumer) consumer; struct g_provider *provider; LIST_ENTRY(g_consumer) consumers; /* XXX: better name */ int acr, acw, ace; int flags; #define G_CF_SPOILED 0x1 #define G_CF_ORPHAN 0x4 #define G_CF_DIRECT_SEND 0x10 #define G_CF_DIRECT_RECEIVE 0x20 struct devstat *stat; u_int nstart, nend; /* Two fields for the implementing class to use */ void *private; u_int index; }; /* * A g_provider is a "logical disk". */ struct g_provider { char *name; LIST_ENTRY(g_provider) provider; struct g_geom *geom; LIST_HEAD(,g_consumer) consumers; int acr, acw, ace; int error; TAILQ_ENTRY(g_provider) orphan; off_t mediasize; u_int sectorsize; u_int stripesize; u_int stripeoffset; struct devstat *stat; u_int nstart, nend; u_int flags; #define G_PF_WITHER 0x2 #define G_PF_ORPHAN 0x4 #define G_PF_ACCEPT_UNMAPPED 0x8 #define G_PF_DIRECT_SEND 0x10 #define G_PF_DIRECT_RECEIVE 0x20 /* Two fields for the implementing class to use */ void *private; u_int index; }; /* * Descriptor of a classifier. We can register a function and * an argument, which is called by g_io_request() on bio's * that are not previously classified. */ struct g_classifier_hook { TAILQ_ENTRY(g_classifier_hook) link; int (*func)(void *arg, struct bio *bp); void *arg; }; /* BIO_GETATTR("GEOM::setstate") argument values. */ #define G_STATE_FAILED 0 #define G_STATE_REBUILD 1 #define G_STATE_RESYNC 2 #define G_STATE_ACTIVE 3 /* geom_dev.c */ struct cdev; void g_dev_print(void); void g_dev_physpath_changed(void); struct g_provider *g_dev_getprovider(struct cdev *dev); /* geom_dump.c */ void g_trace(int level, const char *, ...); # define G_T_TOPOLOGY 1 # define G_T_BIO 2 # define G_T_ACCESS 4 /* geom_event.c */ typedef void g_event_t(void *, int flag); #define EV_CANCEL 1 int g_post_event(g_event_t *func, void *arg, int flag, ...); int g_waitfor_event(g_event_t *func, void *arg, int flag, ...); void g_cancel_event(void *ref); int g_attr_changed(struct g_provider *pp, const char *attr, int flag); int g_media_changed(struct g_provider *pp, int flag); int g_media_gone(struct g_provider *pp, int flag); void g_orphan_provider(struct g_provider *pp, int error); void g_waitidlelock(void); /* geom_subr.c */ int g_access(struct g_consumer *cp, int nread, int nwrite, int nexcl); int g_attach(struct g_consumer *cp, struct g_provider *pp); int g_compare_names(const char *namea, const char *nameb); void g_destroy_consumer(struct g_consumer *cp); void g_destroy_geom(struct g_geom *pp); void g_destroy_provider(struct g_provider *pp); void g_detach(struct g_consumer *cp); void g_error_provider(struct g_provider *pp, int error); struct g_provider *g_provider_by_name(char const *arg); int g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len); #define g_getattr(a, c, v) g_getattr__((a), (c), (v), sizeof *(v)) int g_handleattr(struct bio *bp, const char *attribute, const void *val, int len); int g_handleattr_int(struct bio *bp, const char *attribute, int val); int g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val); int g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val); int g_handleattr_str(struct bio *bp, const char *attribute, const char *str); struct g_consumer * g_new_consumer(struct g_geom *gp); struct g_geom * g_new_geomf(struct g_class *mp, const char *fmt, ...) __printflike(2, 3); struct g_provider * g_new_providerf(struct g_geom *gp, const char *fmt, ...) __printflike(2, 3); void g_resize_provider(struct g_provider *pp, off_t size); int g_retaste(struct g_class *mp); void g_spoil(struct g_provider *pp, struct g_consumer *cp); int g_std_access(struct g_provider *pp, int dr, int dw, int de); void g_std_done(struct bio *bp); void g_std_spoiled(struct g_consumer *cp); void g_wither_geom(struct g_geom *gp, int error); void g_wither_geom_close(struct g_geom *gp, int error); void g_wither_provider(struct g_provider *pp, int error); #if defined(DIAGNOSTIC) || defined(DDB) int g_valid_obj(void const *ptr); #endif #ifdef DIAGNOSTIC #define G_VALID_CLASS(foo) \ KASSERT(g_valid_obj(foo) == 1, ("%p is not a g_class", foo)) #define G_VALID_GEOM(foo) \ KASSERT(g_valid_obj(foo) == 2, ("%p is not a g_geom", foo)) #define G_VALID_CONSUMER(foo) \ KASSERT(g_valid_obj(foo) == 3, ("%p is not a g_consumer", foo)) #define G_VALID_PROVIDER(foo) \ KASSERT(g_valid_obj(foo) == 4, ("%p is not a g_provider", foo)) #else #define G_VALID_CLASS(foo) do { } while (0) #define G_VALID_GEOM(foo) do { } while (0) #define G_VALID_CONSUMER(foo) do { } while (0) #define G_VALID_PROVIDER(foo) do { } while (0) #endif int g_modevent(module_t, int, void *); /* geom_io.c */ struct bio * g_clone_bio(struct bio *); struct bio * g_duplicate_bio(struct bio *); void g_destroy_bio(struct bio *); void g_io_deliver(struct bio *bp, int error); int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr); +int g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp); int g_io_flush(struct g_consumer *cp); int g_register_classifier(struct g_classifier_hook *hook); void g_unregister_classifier(struct g_classifier_hook *hook); void g_io_request(struct bio *bp, struct g_consumer *cp); struct bio *g_new_bio(void); struct bio *g_alloc_bio(void); void g_reset_bio(struct bio *); void * g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error); int g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length); int g_delete_data(struct g_consumer *cp, off_t offset, off_t length); void g_print_bio(struct bio *bp); /* geom_kern.c / geom_kernsim.c */ #ifdef _KERNEL extern struct sx topology_lock; struct g_kerneldump { off_t offset; off_t length; struct dumperinfo di; }; MALLOC_DECLARE(M_GEOM); static __inline void * g_malloc(int size, int flags) { void *p; p = malloc(size, M_GEOM, flags); return (p); } static __inline void g_free(void *ptr) { #ifdef DIAGNOSTIC if (sx_xlocked(&topology_lock)) { KASSERT(g_valid_obj(ptr) == 0, ("g_free(%p) of live object, type %d", ptr, g_valid_obj(ptr))); } #endif free(ptr, M_GEOM); } #define g_topology_lock() \ do { \ mtx_assert(&Giant, MA_NOTOWNED); \ sx_xlock(&topology_lock); \ } while (0) #define g_topology_try_lock() sx_try_xlock(&topology_lock) #define g_topology_unlock() \ do { \ sx_xunlock(&topology_lock); \ } while (0) #define g_topology_assert() \ do { \ sx_assert(&topology_lock, SX_XLOCKED); \ } while (0) #define g_topology_assert_not() \ do { \ sx_assert(&topology_lock, SX_UNLOCKED); \ } while (0) #define g_topology_sleep(chan, timo) \ sx_sleep(chan, &topology_lock, 0, "gtopol", timo) #define DECLARE_GEOM_CLASS(class, name) \ static moduledata_t name##_mod = { \ #name, g_modevent, &class \ }; \ DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); int g_is_geom_thread(struct thread *td); #endif /* _KERNEL */ /* geom_ctl.c */ int gctl_set_param(struct gctl_req *req, const char *param, void const *ptr, int len); void gctl_set_param_err(struct gctl_req *req, const char *param, void const *ptr, int len); void *gctl_get_param(struct gctl_req *req, const char *param, int *len); char const *gctl_get_asciiparam(struct gctl_req *req, const char *param); void *gctl_get_paraml(struct gctl_req *req, const char *param, int len); int gctl_error(struct gctl_req *req, const char *fmt, ...) __printflike(2, 3); struct g_class *gctl_get_class(struct gctl_req *req, char const *arg); struct g_geom *gctl_get_geom(struct gctl_req *req, struct g_class *mpr, char const *arg); struct g_provider *gctl_get_provider(struct gctl_req *req, char const *arg); #endif /* _GEOM_GEOM_H_ */ Index: head/sys/geom/geom_dev.c =================================================================== --- head/sys/geom/geom_dev.c (revision 300206) +++ head/sys/geom/geom_dev.c (revision 300207) @@ -1,711 +1,751 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct g_dev_softc { struct mtx sc_mtx; struct cdev *sc_dev; struct cdev *sc_alias; int sc_open; int sc_active; }; static d_open_t g_dev_open; static d_close_t g_dev_close; static d_strategy_t g_dev_strategy; static d_ioctl_t g_dev_ioctl; static struct cdevsw g_dev_cdevsw = { .d_version = D_VERSION, .d_open = g_dev_open, .d_close = g_dev_close, .d_read = physread, .d_write = physwrite, .d_ioctl = g_dev_ioctl, .d_strategy = g_dev_strategy, .d_name = "g_dev", .d_flags = D_DISK | D_TRACKCLOSE, }; static g_init_t g_dev_init; static g_fini_t g_dev_fini; static g_taste_t g_dev_taste; static g_orphan_t g_dev_orphan; static g_attrchanged_t g_dev_attrchanged; static struct g_class g_dev_class = { .name = "DEV", .version = G_VERSION, .init = g_dev_init, .fini = g_dev_fini, .taste = g_dev_taste, .orphan = g_dev_orphan, .attrchanged = g_dev_attrchanged }; /* * We target 262144 (8 x 32768) sectors by default as this significantly * increases the throughput on commonly used SSD's with a marginal * increase in non-interruptible request latency. */ static uint64_t g_dev_del_max_sectors = 262144; SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, dev, CTLFLAG_RW, 0, "GEOM_DEV stuff"); SYSCTL_QUAD(_kern_geom_dev, OID_AUTO, delete_max_sectors, CTLFLAG_RW, &g_dev_del_max_sectors, 0, "Maximum number of sectors in a single " "delete request sent to the provider. Larger requests are chunked " "so they can be interrupted. (0 = disable chunking)"); static char *dumpdev = NULL; static void g_dev_init(struct g_class *mp) { dumpdev = kern_getenv("dumpdev"); } static void g_dev_fini(struct g_class *mp) { freeenv(dumpdev); dumpdev = NULL; } static int g_dev_setdumpdev(struct cdev *dev, struct thread *td) { struct g_kerneldump kd; struct g_consumer *cp; int error, len; if (dev == NULL) return (set_dumper(NULL, NULL, td)); cp = dev->si_drv2; len = sizeof(kd); kd.offset = 0; kd.length = OFF_MAX; error = g_io_getattr("GEOM::kerneldump", cp, &len, &kd); if (error == 0) { error = set_dumper(&kd.di, devtoname(dev), td); if (error == 0) dev->si_flags |= SI_DUMPDEV; } return (error); } static int init_dumpdev(struct cdev *dev) { struct g_consumer *cp; const char *devprefix = "/dev/", *devname; int error; size_t len; if (dumpdev == NULL) return (0); len = strlen(devprefix); devname = devtoname(dev); if (strcmp(devname, dumpdev) != 0 && (strncmp(dumpdev, devprefix, len) != 0 || strcmp(devname, dumpdev + len) != 0)) return (0); cp = (struct g_consumer *)dev->si_drv2; error = g_access(cp, 1, 0, 0); if (error != 0) return (error); error = g_dev_setdumpdev(dev, curthread); if (error == 0) { freeenv(dumpdev); dumpdev = NULL; } (void)g_access(cp, -1, 0, 0); return (error); } static void g_dev_destroy(void *arg, int flags __unused) { struct g_consumer *cp; struct g_geom *gp; struct g_dev_softc *sc; char buf[SPECNAMELEN + 6]; g_topology_assert(); cp = arg; gp = cp->geom; sc = cp->private; g_trace(G_T_TOPOLOGY, "g_dev_destroy(%p(%s))", cp, gp->name); snprintf(buf, sizeof(buf), "cdev=%s", gp->name); devctl_notify_f("GEOM", "DEV", "DESTROY", buf, M_WAITOK); if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) g_access(cp, -cp->acr, -cp->acw, -cp->ace); g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); mtx_destroy(&sc->sc_mtx); g_free(sc); } void g_dev_print(void) { struct g_geom *gp; char const *p = ""; LIST_FOREACH(gp, &g_dev_class.geom, geom) { printf("%s%s", p, gp->name); p = " "; } printf("\n"); } static void g_dev_attrchanged(struct g_consumer *cp, const char *attr) { struct g_dev_softc *sc; struct cdev *dev; char buf[SPECNAMELEN + 6]; sc = cp->private; if (strcmp(attr, "GEOM::media") == 0) { dev = sc->sc_dev; snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name); devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK); devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK); dev = sc->sc_alias; if (dev != NULL) { snprintf(buf, sizeof(buf), "cdev=%s", dev->si_name); devctl_notify_f("DEVFS", "CDEV", "MEDIACHANGE", buf, M_WAITOK); devctl_notify_f("GEOM", "DEV", "MEDIACHANGE", buf, M_WAITOK); } return; } if (strcmp(attr, "GEOM::physpath") != 0) return; if (g_access(cp, 1, 0, 0) == 0) { char *physpath; int error, physpath_len; physpath_len = MAXPATHLEN; physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO); error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath); g_access(cp, -1, 0, 0); if (error == 0 && strlen(physpath) != 0) { struct cdev *old_alias_dev; struct cdev **alias_devp; dev = sc->sc_dev; old_alias_dev = sc->sc_alias; alias_devp = (struct cdev **)&sc->sc_alias; make_dev_physpath_alias(MAKEDEV_WAITOK, alias_devp, dev, old_alias_dev, physpath); } else if (sc->sc_alias) { destroy_dev((struct cdev *)sc->sc_alias); sc->sc_alias = NULL; } g_free(physpath); } } struct g_provider * g_dev_getprovider(struct cdev *dev) { struct g_consumer *cp; g_topology_assert(); if (dev == NULL) return (NULL); if (dev->si_devsw != &g_dev_cdevsw) return (NULL); cp = dev->si_drv2; return (cp->provider); } static struct g_geom * g_dev_taste(struct g_class *mp, struct g_provider *pp, int insist __unused) { struct g_geom *gp; struct g_consumer *cp; struct g_dev_softc *sc; int error; struct cdev *dev; char buf[SPECNAMELEN + 6]; g_trace(G_T_TOPOLOGY, "dev_taste(%s,%s)", mp->name, pp->name); g_topology_assert(); gp = g_new_geomf(mp, "%s", pp->name); sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); mtx_init(&sc->sc_mtx, "g_dev", NULL, MTX_DEF); cp = g_new_consumer(gp); cp->private = sc; cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; error = g_attach(cp, pp); KASSERT(error == 0, ("g_dev_taste(%s) failed to g_attach, err=%d", pp->name, error)); error = make_dev_p(MAKEDEV_CHECKNAME | MAKEDEV_WAITOK, &dev, &g_dev_cdevsw, NULL, UID_ROOT, GID_OPERATOR, 0640, "%s", gp->name); if (error != 0) { printf("%s: make_dev_p() failed (gp->name=%s, error=%d)\n", __func__, gp->name, error); g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); mtx_destroy(&sc->sc_mtx); g_free(sc); return (NULL); } dev->si_flags |= SI_UNMAPPED; sc->sc_dev = dev; dev->si_iosize_max = MAXPHYS; dev->si_drv2 = cp; error = init_dumpdev(dev); if (error != 0) printf("%s: init_dumpdev() failed (gp->name=%s, error=%d)\n", __func__, gp->name, error); g_dev_attrchanged(cp, "GEOM::physpath"); snprintf(buf, sizeof(buf), "cdev=%s", gp->name); devctl_notify_f("GEOM", "DEV", "CREATE", buf, M_WAITOK); return (gp); } static int g_dev_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct g_consumer *cp; struct g_dev_softc *sc; int error, r, w, e; cp = dev->si_drv2; if (cp == NULL) return (ENXIO); /* g_dev_taste() not done yet */ g_trace(G_T_ACCESS, "g_dev_open(%s, %d, %d, %p)", cp->geom->name, flags, fmt, td); r = flags & FREAD ? 1 : 0; w = flags & FWRITE ? 1 : 0; #ifdef notyet e = flags & O_EXCL ? 1 : 0; #else e = 0; #endif /* * This happens on attempt to open a device node with O_EXEC. */ if (r + w + e == 0) return (EINVAL); if (w) { /* * When running in very secure mode, do not allow * opens for writing of any disks. */ error = securelevel_ge(td->td_ucred, 2); if (error) return (error); } g_topology_lock(); error = g_access(cp, r, w, e); g_topology_unlock(); if (error == 0) { sc = cp->private; mtx_lock(&sc->sc_mtx); if (sc->sc_open == 0 && sc->sc_active != 0) wakeup(&sc->sc_active); sc->sc_open += r + w + e; mtx_unlock(&sc->sc_mtx); } return (error); } static int g_dev_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct g_consumer *cp; struct g_dev_softc *sc; int error, r, w, e; cp = dev->si_drv2; if (cp == NULL) return (ENXIO); g_trace(G_T_ACCESS, "g_dev_close(%s, %d, %d, %p)", cp->geom->name, flags, fmt, td); r = flags & FREAD ? -1 : 0; w = flags & FWRITE ? -1 : 0; #ifdef notyet e = flags & O_EXCL ? -1 : 0; #else e = 0; #endif /* * The vgonel(9) - caused by eg. forced unmount of devfs - calls * VOP_CLOSE(9) on devfs vnode without any FREAD or FWRITE flags, * which would result in zero deltas, which in turn would cause * panic in g_access(9). * * Note that we cannot zero the counters (ie. do "r = cp->acr" * etc) instead, because the consumer might be opened in another * devfs instance. */ if (r + w + e == 0) return (EINVAL); sc = cp->private; mtx_lock(&sc->sc_mtx); sc->sc_open += r + w + e; while (sc->sc_open == 0 && sc->sc_active != 0) msleep(&sc->sc_active, &sc->sc_mtx, 0, "PRIBIO", 0); mtx_unlock(&sc->sc_mtx); g_topology_lock(); error = g_access(cp, r, w, e); g_topology_unlock(); return (error); } /* * XXX: Until we have unmessed the ioctl situation, there is a race against * XXX: a concurrent orphanization. We cannot close it by holding topology * XXX: since that would prevent us from doing our job, and stalling events * XXX: will break (actually: stall) the BSD disklabel hacks. */ static int g_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct g_consumer *cp; struct g_provider *pp; off_t offset, length, chunk; int i, error; cp = dev->si_drv2; pp = cp->provider; error = 0; KASSERT(cp->acr || cp->acw, ("Consumer with zero access count in g_dev_ioctl")); i = IOCPARM_LEN(cmd); switch (cmd) { case DIOCGSECTORSIZE: *(u_int *)data = cp->provider->sectorsize; if (*(u_int *)data == 0) error = ENOENT; break; case DIOCGMEDIASIZE: *(off_t *)data = cp->provider->mediasize; if (*(off_t *)data == 0) error = ENOENT; break; case DIOCGFWSECTORS: error = g_io_getattr("GEOM::fwsectors", cp, &i, data); if (error == 0 && *(u_int *)data == 0) error = ENOENT; break; case DIOCGFWHEADS: error = g_io_getattr("GEOM::fwheads", cp, &i, data); if (error == 0 && *(u_int *)data == 0) error = ENOENT; break; case DIOCGFRONTSTUFF: error = g_io_getattr("GEOM::frontstuff", cp, &i, data); break; case DIOCSKERNELDUMP: if (*(u_int *)data == 0) error = g_dev_setdumpdev(NULL, td); else error = g_dev_setdumpdev(dev, td); break; case DIOCGFLUSH: error = g_io_flush(cp); break; case DIOCGDELETE: offset = ((off_t *)data)[0]; length = ((off_t *)data)[1]; if ((offset % cp->provider->sectorsize) != 0 || (length % cp->provider->sectorsize) != 0 || length <= 0) { printf("%s: offset=%jd length=%jd\n", __func__, offset, length); error = EINVAL; break; } while (length > 0) { chunk = length; if (g_dev_del_max_sectors != 0 && chunk > g_dev_del_max_sectors * cp->provider->sectorsize) { chunk = g_dev_del_max_sectors * cp->provider->sectorsize; } error = g_delete_data(cp, offset, chunk); length -= chunk; offset += chunk; if (error) break; /* * Since the request size can be large, the service * time can be is likewise. We make this ioctl * interruptible by checking for signals for each bio. */ if (SIGPENDING(td)) break; } break; case DIOCGIDENT: error = g_io_getattr("GEOM::ident", cp, &i, data); break; case DIOCGPROVIDERNAME: if (pp == NULL) return (ENOENT); strlcpy(data, pp->name, i); break; case DIOCGSTRIPESIZE: *(off_t *)data = cp->provider->stripesize; break; case DIOCGSTRIPEOFFSET: *(off_t *)data = cp->provider->stripeoffset; break; case DIOCGPHYSPATH: error = g_io_getattr("GEOM::physpath", cp, &i, data); if (error == 0 && *(char *)data == '\0') error = ENOENT; break; case DIOCGATTR: { struct diocgattr_arg *arg = (struct diocgattr_arg *)data; if (arg->len > sizeof(arg->value)) { error = EINVAL; break; } error = g_io_getattr(arg->name, cp, &arg->len, &arg->value); break; } + case DIOCZONECMD: { + struct disk_zone_args *zone_args =(struct disk_zone_args *)data; + struct disk_zone_rep_entry *new_entries, *old_entries; + struct disk_zone_report *rep; + size_t alloc_size; + + old_entries = NULL; + new_entries = NULL; + rep = NULL; + alloc_size = 0; + + if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) { + + rep = &zone_args->zone_params.report; + alloc_size = rep->entries_allocated * + sizeof(struct disk_zone_rep_entry); + if (alloc_size != 0) + new_entries = g_malloc(alloc_size, + M_WAITOK| M_ZERO); + old_entries = rep->entries; + rep->entries = new_entries; + } + error = g_io_zonecmd(zone_args, cp); + if ((zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) + && (alloc_size != 0) + && (error == 0)) { + error = copyout(new_entries, old_entries, alloc_size); + } + if ((old_entries != NULL) + && (rep != NULL)) + rep->entries = old_entries; + + if (new_entries != NULL) + g_free(new_entries); + break; + } default: if (cp->provider->geom->ioctl != NULL) { error = cp->provider->geom->ioctl(cp->provider, cmd, data, fflag, td); } else { error = ENOIOCTL; } } return (error); } static void g_dev_done(struct bio *bp2) { struct g_consumer *cp; struct g_dev_softc *sc; struct bio *bp; int destroy; cp = bp2->bio_from; sc = cp->private; bp = bp2->bio_parent; bp->bio_error = bp2->bio_error; bp->bio_completed = bp2->bio_completed; bp->bio_resid = bp->bio_length - bp2->bio_completed; + if (bp2->bio_cmd == BIO_ZONE) + bcopy(&bp2->bio_zone, &bp->bio_zone, sizeof(bp->bio_zone)); + if (bp2->bio_error != 0) { g_trace(G_T_BIO, "g_dev_done(%p) had error %d", bp2, bp2->bio_error); bp->bio_flags |= BIO_ERROR; } else { g_trace(G_T_BIO, "g_dev_done(%p/%p) resid %ld completed %jd", bp2, bp, bp2->bio_resid, (intmax_t)bp2->bio_completed); } g_destroy_bio(bp2); destroy = 0; mtx_lock(&sc->sc_mtx); if ((--sc->sc_active) == 0) { if (sc->sc_open == 0) wakeup(&sc->sc_active); if (sc->sc_dev == NULL) destroy = 1; } mtx_unlock(&sc->sc_mtx); if (destroy) g_post_event(g_dev_destroy, cp, M_NOWAIT, NULL); biodone(bp); } static void g_dev_strategy(struct bio *bp) { struct g_consumer *cp; struct bio *bp2; struct cdev *dev; struct g_dev_softc *sc; KASSERT(bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE || - bp->bio_cmd == BIO_FLUSH, + bp->bio_cmd == BIO_FLUSH || + bp->bio_cmd == BIO_ZONE, ("Wrong bio_cmd bio=%p cmd=%d", bp, bp->bio_cmd)); dev = bp->bio_dev; cp = dev->si_drv2; sc = cp->private; KASSERT(cp->acr || cp->acw, ("Consumer with zero access count in g_dev_strategy")); #ifdef INVARIANTS if ((bp->bio_offset % cp->provider->sectorsize) != 0 || (bp->bio_bcount % cp->provider->sectorsize) != 0) { bp->bio_resid = bp->bio_bcount; biofinish(bp, NULL, EINVAL); return; } #endif mtx_lock(&sc->sc_mtx); KASSERT(sc->sc_open > 0, ("Closed device in g_dev_strategy")); sc->sc_active++; mtx_unlock(&sc->sc_mtx); for (;;) { /* * XXX: This is not an ideal solution, but I believe it to * XXX: deadlock safely, all things considered. */ bp2 = g_clone_bio(bp); if (bp2 != NULL) break; pause("gdstrat", hz / 10); } KASSERT(bp2 != NULL, ("XXX: ENOMEM in a bad place")); bp2->bio_done = g_dev_done; g_trace(G_T_BIO, "g_dev_strategy(%p/%p) offset %jd length %jd data %p cmd %d", bp, bp2, (intmax_t)bp->bio_offset, (intmax_t)bp2->bio_length, bp2->bio_data, bp2->bio_cmd); g_io_request(bp2, cp); KASSERT(cp->acr || cp->acw, ("g_dev_strategy raced with g_dev_close and lost")); } /* * g_dev_callback() * * Called by devfs when asynchronous device destruction is completed. * - Mark that we have no attached device any more. * - If there are no outstanding requests, schedule geom destruction. * Otherwise destruction will be scheduled later by g_dev_done(). */ static void g_dev_callback(void *arg) { struct g_consumer *cp; struct g_dev_softc *sc; int destroy; cp = arg; sc = cp->private; g_trace(G_T_TOPOLOGY, "g_dev_callback(%p(%s))", cp, cp->geom->name); mtx_lock(&sc->sc_mtx); sc->sc_dev = NULL; sc->sc_alias = NULL; destroy = (sc->sc_active == 0); mtx_unlock(&sc->sc_mtx); if (destroy) g_post_event(g_dev_destroy, cp, M_WAITOK, NULL); } /* * g_dev_orphan() * * Called from below when the provider orphaned us. * - Clear any dump settings. * - Request asynchronous device destruction to prevent any more requests * from coming in. The provider is already marked with an error, so * anything which comes in the interim will be returned immediately. */ static void g_dev_orphan(struct g_consumer *cp) { struct cdev *dev; struct g_dev_softc *sc; g_topology_assert(); sc = cp->private; dev = sc->sc_dev; g_trace(G_T_TOPOLOGY, "g_dev_orphan(%p(%s))", cp, cp->geom->name); /* Reset any dump-area set on this device */ if (dev->si_flags & SI_DUMPDEV) (void)set_dumper(NULL, NULL, curthread); /* Destroy the struct cdev *so we get no more requests */ destroy_dev_sched_cb(dev, g_dev_callback, cp); } DECLARE_GEOM_CLASS(g_dev_class, g_dev); Index: head/sys/geom/geom_disk.c =================================================================== --- head/sys/geom/geom_disk.c (revision 300206) +++ head/sys/geom/geom_disk.c (revision 300207) @@ -1,930 +1,944 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_geom.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct g_disk_softc { struct mtx done_mtx; struct disk *dp; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; char led[64]; uint32_t state; struct mtx start_mtx; }; static g_access_t g_disk_access; static g_start_t g_disk_start; static g_ioctl_t g_disk_ioctl; static g_dumpconf_t g_disk_dumpconf; static g_provgone_t g_disk_providergone; static struct g_class g_disk_class = { .name = G_DISK_CLASS_NAME, .version = G_VERSION, .start = g_disk_start, .access = g_disk_access, .ioctl = g_disk_ioctl, .providergone = g_disk_providergone, .dumpconf = g_disk_dumpconf, }; SYSCTL_DECL(_kern_geom); static SYSCTL_NODE(_kern_geom, OID_AUTO, disk, CTLFLAG_RW, 0, "GEOM_DISK stuff"); DECLARE_GEOM_CLASS(g_disk_class, g_disk); static int g_disk_access(struct g_provider *pp, int r, int w, int e) { struct disk *dp; struct g_disk_softc *sc; int error; g_trace(G_T_ACCESS, "g_disk_access(%s, %d, %d, %d)", pp->name, r, w, e); g_topology_assert(); sc = pp->private; if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { /* * Allow decreasing access count even if disk is not * available anymore. */ if (r <= 0 && w <= 0 && e <= 0) return (0); return (ENXIO); } r += pp->acr; w += pp->acw; e += pp->ace; error = 0; if ((pp->acr + pp->acw + pp->ace) == 0 && (r + w + e) > 0) { if (dp->d_open != NULL) { error = dp->d_open(dp); if (bootverbose && error != 0) printf("Opened disk %s -> %d\n", pp->name, error); if (error != 0) return (error); } pp->mediasize = dp->d_mediasize; pp->sectorsize = dp->d_sectorsize; if (dp->d_maxsize == 0) { printf("WARNING: Disk drive %s%d has no d_maxsize\n", dp->d_name, dp->d_unit); dp->d_maxsize = DFLTPHYS; } if (dp->d_delmaxsize == 0) { if (bootverbose && dp->d_flags & DISKFLAG_CANDELETE) { printf("WARNING: Disk drive %s%d has no " "d_delmaxsize\n", dp->d_name, dp->d_unit); } dp->d_delmaxsize = dp->d_maxsize; } pp->stripeoffset = dp->d_stripeoffset; pp->stripesize = dp->d_stripesize; dp->d_flags |= DISKFLAG_OPEN; } else if ((pp->acr + pp->acw + pp->ace) > 0 && (r + w + e) == 0) { if (dp->d_close != NULL) { error = dp->d_close(dp); if (error != 0) printf("Closed disk %s -> %d\n", pp->name, error); } sc->state = G_STATE_ACTIVE; if (sc->led[0] != 0) led_set(sc->led, "0"); dp->d_flags &= ~DISKFLAG_OPEN; } return (error); } static void g_disk_kerneldump(struct bio *bp, struct disk *dp) { struct g_kerneldump *gkd; struct g_geom *gp; gkd = (struct g_kerneldump*)bp->bio_data; gp = bp->bio_to->geom; g_trace(G_T_TOPOLOGY, "g_disk_kerneldump(%s, %jd, %jd)", gp->name, (intmax_t)gkd->offset, (intmax_t)gkd->length); if (dp->d_dump == NULL) { g_io_deliver(bp, ENODEV); return; } gkd->di.dumper = dp->d_dump; gkd->di.priv = dp; gkd->di.blocksize = dp->d_sectorsize; gkd->di.maxiosize = dp->d_maxsize; gkd->di.mediaoffset = gkd->offset; if ((gkd->offset + gkd->length) > dp->d_mediasize) gkd->length = dp->d_mediasize - gkd->offset; gkd->di.mediasize = gkd->length; g_io_deliver(bp, 0); } static void g_disk_setstate(struct bio *bp, struct g_disk_softc *sc) { const char *cmd; memcpy(&sc->state, bp->bio_data, sizeof(sc->state)); if (sc->led[0] != 0) { switch (sc->state) { case G_STATE_FAILED: cmd = "1"; break; case G_STATE_REBUILD: cmd = "f5"; break; case G_STATE_RESYNC: cmd = "f1"; break; default: cmd = "0"; break; } led_set(sc->led, cmd); } g_io_deliver(bp, 0); } static void g_disk_done(struct bio *bp) { struct bintime now; struct bio *bp2; struct g_disk_softc *sc; /* See "notes" for why we need a mutex here */ /* XXX: will witness accept a mix of Giant/unGiant drivers here ? */ bp2 = bp->bio_parent; sc = bp2->bio_to->private; bp->bio_completed = bp->bio_length - bp->bio_resid; binuptime(&now); mtx_lock(&sc->done_mtx); if (bp2->bio_error == 0) bp2->bio_error = bp->bio_error; bp2->bio_completed += bp->bio_completed; + switch (bp->bio_cmd) { + case BIO_ZONE: + bcopy(&bp->bio_zone, &bp2->bio_zone, sizeof(bp->bio_zone)); + /*FALLTHROUGH*/ case BIO_READ: case BIO_WRITE: case BIO_DELETE: case BIO_FLUSH: devstat_end_transaction_bio_bt(sc->dp->d_devstat, bp, &now); break; default: break; } bp2->bio_inbed++; if (bp2->bio_children == bp2->bio_inbed) { mtx_unlock(&sc->done_mtx); bp2->bio_resid = bp2->bio_bcount - bp2->bio_completed; g_io_deliver(bp2, bp2->bio_error); } else mtx_unlock(&sc->done_mtx); g_destroy_bio(bp); } static int g_disk_ioctl(struct g_provider *pp, u_long cmd, void * data, int fflag, struct thread *td) { struct disk *dp; struct g_disk_softc *sc; int error; sc = pp->private; dp = sc->dp; if (dp->d_ioctl == NULL) return (ENOIOCTL); error = dp->d_ioctl(dp, cmd, data, fflag, td); return (error); } static off_t g_disk_maxsize(struct disk *dp, struct bio *bp) { if (bp->bio_cmd == BIO_DELETE) return (dp->d_delmaxsize); return (dp->d_maxsize); } static int g_disk_maxsegs(struct disk *dp, struct bio *bp) { return ((g_disk_maxsize(dp, bp) / PAGE_SIZE) + 1); } static void g_disk_advance(struct disk *dp, struct bio *bp, off_t off) { bp->bio_offset += off; bp->bio_length -= off; if ((bp->bio_flags & BIO_VLIST) != 0) { bus_dma_segment_t *seg, *end; seg = (bus_dma_segment_t *)bp->bio_data; end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; off += bp->bio_ma_offset; while (off >= seg->ds_len) { KASSERT((seg != end), ("vlist request runs off the end")); off -= seg->ds_len; seg++; } bp->bio_ma_offset = off; bp->bio_ma_n = end - seg; bp->bio_data = (void *)seg; } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { bp->bio_ma += off / PAGE_SIZE; bp->bio_ma_offset += off; bp->bio_ma_offset %= PAGE_SIZE; bp->bio_ma_n -= off / PAGE_SIZE; } else { bp->bio_data += off; } } static void g_disk_seg_limit(bus_dma_segment_t *seg, off_t *poffset, off_t *plength, int *ppages) { uintptr_t seg_page_base; uintptr_t seg_page_end; off_t offset; off_t length; int seg_pages; offset = *poffset; length = *plength; if (length > seg->ds_len - offset) length = seg->ds_len - offset; seg_page_base = trunc_page(seg->ds_addr + offset); seg_page_end = round_page(seg->ds_addr + offset + length); seg_pages = (seg_page_end - seg_page_base) >> PAGE_SHIFT; if (seg_pages > *ppages) { seg_pages = *ppages; length = (seg_page_base + (seg_pages << PAGE_SHIFT)) - (seg->ds_addr + offset); } *poffset = 0; *plength -= length; *ppages -= seg_pages; } static off_t g_disk_vlist_limit(struct disk *dp, struct bio *bp, bus_dma_segment_t **pendseg) { bus_dma_segment_t *seg, *end; off_t residual; off_t offset; int pages; seg = (bus_dma_segment_t *)bp->bio_data; end = (bus_dma_segment_t *)bp->bio_data + bp->bio_ma_n; residual = bp->bio_length; offset = bp->bio_ma_offset; pages = g_disk_maxsegs(dp, bp); while (residual != 0 && pages != 0) { KASSERT((seg != end), ("vlist limit runs off the end")); g_disk_seg_limit(seg, &offset, &residual, &pages); seg++; } if (pendseg != NULL) *pendseg = seg; return (residual); } static bool g_disk_limit(struct disk *dp, struct bio *bp) { bool limited = false; off_t maxsz; maxsz = g_disk_maxsize(dp, bp); /* * XXX: If we have a stripesize we should really use it here. * Care should be taken in the delete case if this is done * as deletes can be very sensitive to size given how they * are processed. */ if (bp->bio_length > maxsz) { bp->bio_length = maxsz; limited = true; } if ((bp->bio_flags & BIO_VLIST) != 0) { bus_dma_segment_t *firstseg, *endseg; off_t residual; firstseg = (bus_dma_segment_t*)bp->bio_data; residual = g_disk_vlist_limit(dp, bp, &endseg); if (residual != 0) { bp->bio_ma_n = endseg - firstseg; bp->bio_length -= residual; limited = true; } } else if ((bp->bio_flags & BIO_UNMAPPED) != 0) { bp->bio_ma_n = howmany(bp->bio_ma_offset + bp->bio_length, PAGE_SIZE); } return (limited); } static void g_disk_start(struct bio *bp) { struct bio *bp2, *bp3; struct disk *dp; struct g_disk_softc *sc; int error; off_t off; sc = bp->bio_to->private; if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { g_io_deliver(bp, ENXIO); return; } error = EJUSTRETURN; switch(bp->bio_cmd) { case BIO_DELETE: if (!(dp->d_flags & DISKFLAG_CANDELETE)) { error = EOPNOTSUPP; break; } /* fall-through */ case BIO_READ: case BIO_WRITE: KASSERT((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0 || (bp->bio_flags & BIO_UNMAPPED) == 0, ("unmapped bio not supported by disk %s", dp->d_name)); off = 0; bp3 = NULL; bp2 = g_clone_bio(bp); if (bp2 == NULL) { error = ENOMEM; break; } for (;;) { if (g_disk_limit(dp, bp2)) { off += bp2->bio_length; /* * To avoid a race, we need to grab the next bio * before we schedule this one. See "notes". */ bp3 = g_clone_bio(bp); if (bp3 == NULL) bp->bio_error = ENOMEM; } bp2->bio_done = g_disk_done; bp2->bio_pblkno = bp2->bio_offset / dp->d_sectorsize; bp2->bio_bcount = bp2->bio_length; bp2->bio_disk = dp; mtx_lock(&sc->start_mtx); devstat_start_transaction_bio(dp->d_devstat, bp2); mtx_unlock(&sc->start_mtx); dp->d_strategy(bp2); if (bp3 == NULL) break; bp2 = bp3; bp3 = NULL; g_disk_advance(dp, bp2, off); } break; case BIO_GETATTR: /* Give the driver a chance to override */ if (dp->d_getattr != NULL) { if (bp->bio_disk == NULL) bp->bio_disk = dp; error = dp->d_getattr(bp); if (error != -1) break; error = EJUSTRETURN; } if (g_handleattr_int(bp, "GEOM::candelete", (dp->d_flags & DISKFLAG_CANDELETE) != 0)) break; else if (g_handleattr_int(bp, "GEOM::fwsectors", dp->d_fwsectors)) break; else if (g_handleattr_int(bp, "GEOM::fwheads", dp->d_fwheads)) break; else if (g_handleattr_off_t(bp, "GEOM::frontstuff", 0)) break; else if (g_handleattr_str(bp, "GEOM::ident", dp->d_ident)) break; else if (g_handleattr_uint16_t(bp, "GEOM::hba_vendor", dp->d_hba_vendor)) break; else if (g_handleattr_uint16_t(bp, "GEOM::hba_device", dp->d_hba_device)) break; else if (g_handleattr_uint16_t(bp, "GEOM::hba_subvendor", dp->d_hba_subvendor)) break; else if (g_handleattr_uint16_t(bp, "GEOM::hba_subdevice", dp->d_hba_subdevice)) break; else if (!strcmp(bp->bio_attribute, "GEOM::kerneldump")) g_disk_kerneldump(bp, dp); else if (!strcmp(bp->bio_attribute, "GEOM::setstate")) g_disk_setstate(bp, sc); else if (g_handleattr_uint16_t(bp, "GEOM::rotation_rate", dp->d_rotation_rate)) break; else error = ENOIOCTL; break; case BIO_FLUSH: g_trace(G_T_BIO, "g_disk_flushcache(%s)", bp->bio_to->name); if (!(dp->d_flags & DISKFLAG_CANFLUSHCACHE)) { error = EOPNOTSUPP; break; + } + /*FALLTHROUGH*/ + case BIO_ZONE: + if (bp->bio_cmd == BIO_ZONE) { + if (!(dp->d_flags & DISKFLAG_CANZONE)) { + error = EOPNOTSUPP; + break; + } + g_trace(G_T_BIO, "g_disk_zone(%s)", + bp->bio_to->name); } bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_disk_done; bp2->bio_disk = dp; mtx_lock(&sc->start_mtx); devstat_start_transaction_bio(dp->d_devstat, bp2); mtx_unlock(&sc->start_mtx); dp->d_strategy(bp2); break; default: error = EOPNOTSUPP; break; } if (error != EJUSTRETURN) g_io_deliver(bp, error); return; } static void g_disk_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct bio *bp; struct disk *dp; struct g_disk_softc *sc; char *buf; int res = 0; sc = gp->softc; if (sc == NULL || (dp = sc->dp) == NULL) return; if (indent == NULL) { sbuf_printf(sb, " hd %u", dp->d_fwheads); sbuf_printf(sb, " sc %u", dp->d_fwsectors); return; } if (pp != NULL) { sbuf_printf(sb, "%s%u\n", indent, dp->d_fwheads); sbuf_printf(sb, "%s%u\n", indent, dp->d_fwsectors); /* * "rotationrate" is a little complicated, because the value * returned by the drive might not be the RPM; 0 and 1 are * special cases, and there's also a valid range. */ sbuf_printf(sb, "%s", indent); if (dp->d_rotation_rate == 0) /* Old drives don't */ sbuf_printf(sb, "unknown"); /* report RPM. */ else if (dp->d_rotation_rate == 1) /* Since 0 is used */ sbuf_printf(sb, "0"); /* above, SSDs use 1. */ else if ((dp->d_rotation_rate >= 0x041) && (dp->d_rotation_rate <= 0xfffe)) sbuf_printf(sb, "%u", dp->d_rotation_rate); else sbuf_printf(sb, "invalid"); sbuf_printf(sb, "\n"); if (dp->d_getattr != NULL) { buf = g_malloc(DISK_IDENT_SIZE, M_WAITOK); bp = g_alloc_bio(); bp->bio_disk = dp; bp->bio_attribute = "GEOM::ident"; bp->bio_length = DISK_IDENT_SIZE; bp->bio_data = buf; res = dp->d_getattr(bp); sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", res == 0 ? buf: dp->d_ident); sbuf_printf(sb, "\n"); bp->bio_attribute = "GEOM::lunid"; bp->bio_length = DISK_IDENT_SIZE; bp->bio_data = buf; if (dp->d_getattr(bp) == 0) { sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", buf); sbuf_printf(sb, "\n"); } bp->bio_attribute = "GEOM::lunname"; bp->bio_length = DISK_IDENT_SIZE; bp->bio_data = buf; if (dp->d_getattr(bp) == 0) { sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", buf); sbuf_printf(sb, "\n"); } g_destroy_bio(bp); g_free(buf); } else { sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", dp->d_ident); sbuf_printf(sb, "\n"); } sbuf_printf(sb, "%s", indent); g_conf_printf_escaped(sb, "%s", dp->d_descr); sbuf_printf(sb, "\n"); } } static void g_disk_resize(void *ptr, int flag) { struct disk *dp; struct g_geom *gp; struct g_provider *pp; if (flag == EV_CANCEL) return; g_topology_assert(); dp = ptr; gp = dp->d_geom; if (dp->d_destroyed || gp == NULL) return; LIST_FOREACH(pp, &gp->provider, provider) { if (pp->sectorsize != 0 && pp->sectorsize != dp->d_sectorsize) g_wither_provider(pp, ENXIO); else g_resize_provider(pp, dp->d_mediasize); } } static void g_disk_create(void *arg, int flag) { struct g_geom *gp; struct g_provider *pp; struct disk *dp; struct g_disk_softc *sc; char tmpstr[80]; if (flag == EV_CANCEL) return; g_topology_assert(); dp = arg; sc = g_malloc(sizeof(*sc), M_WAITOK | M_ZERO); mtx_init(&sc->start_mtx, "g_disk_start", NULL, MTX_DEF); mtx_init(&sc->done_mtx, "g_disk_done", NULL, MTX_DEF); sc->dp = dp; gp = g_new_geomf(&g_disk_class, "%s%d", dp->d_name, dp->d_unit); gp->softc = sc; pp = g_new_providerf(gp, "%s", gp->name); devstat_remove_entry(pp->stat); pp->stat = NULL; dp->d_devstat->id = pp; pp->mediasize = dp->d_mediasize; pp->sectorsize = dp->d_sectorsize; pp->stripeoffset = dp->d_stripeoffset; pp->stripesize = dp->d_stripesize; if ((dp->d_flags & DISKFLAG_UNMAPPED_BIO) != 0) pp->flags |= G_PF_ACCEPT_UNMAPPED; if ((dp->d_flags & DISKFLAG_DIRECT_COMPLETION) != 0) pp->flags |= G_PF_DIRECT_SEND; pp->flags |= G_PF_DIRECT_RECEIVE; if (bootverbose) printf("GEOM: new disk %s\n", gp->name); sysctl_ctx_init(&sc->sysctl_ctx); snprintf(tmpstr, sizeof(tmpstr), "GEOM disk %s", gp->name); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_kern_geom_disk), OID_AUTO, gp->name, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree != NULL) { SYSCTL_ADD_STRING(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "led", CTLFLAG_RWTUN, sc->led, sizeof(sc->led), "LED name"); } pp->private = sc; dp->d_geom = gp; g_error_provider(pp, 0); } /* * We get this callback after all of the consumers have gone away, and just * before the provider is freed. If the disk driver provided a d_gone * callback, let them know that it is okay to free resources -- they won't * be getting any more accesses from GEOM. */ static void g_disk_providergone(struct g_provider *pp) { struct disk *dp; struct g_disk_softc *sc; sc = (struct g_disk_softc *)pp->private; dp = sc->dp; if (dp != NULL && dp->d_gone != NULL) dp->d_gone(dp); if (sc->sysctl_tree != NULL) { sysctl_ctx_free(&sc->sysctl_ctx); sc->sysctl_tree = NULL; } if (sc->led[0] != 0) { led_set(sc->led, "0"); sc->led[0] = 0; } pp->private = NULL; pp->geom->softc = NULL; mtx_destroy(&sc->done_mtx); mtx_destroy(&sc->start_mtx); g_free(sc); } static void g_disk_destroy(void *ptr, int flag) { struct disk *dp; struct g_geom *gp; struct g_disk_softc *sc; g_topology_assert(); dp = ptr; gp = dp->d_geom; if (gp != NULL) { sc = gp->softc; if (sc != NULL) sc->dp = NULL; dp->d_geom = NULL; g_wither_geom(gp, ENXIO); } g_free(dp); } /* * We only allow printable characters in disk ident, * the rest is converted to 'x'. */ static void g_disk_ident_adjust(char *ident, size_t size) { char *p, tmp[4], newid[DISK_IDENT_SIZE]; newid[0] = '\0'; for (p = ident; *p != '\0'; p++) { if (isprint(*p)) { tmp[0] = *p; tmp[1] = '\0'; } else { snprintf(tmp, sizeof(tmp), "x%02hhx", *(unsigned char *)p); } if (strlcat(newid, tmp, sizeof(newid)) >= sizeof(newid)) break; } bzero(ident, size); strlcpy(ident, newid, size); } struct disk * disk_alloc(void) { return (g_malloc(sizeof(struct disk), M_WAITOK | M_ZERO)); } void disk_create(struct disk *dp, int version) { if (version != DISK_VERSION) { printf("WARNING: Attempt to add disk %s%d %s", dp->d_name, dp->d_unit, " using incompatible ABI version of disk(9)\n"); printf("WARNING: Ignoring disk %s%d\n", dp->d_name, dp->d_unit); return; } if (dp->d_flags & DISKFLAG_RESERVED) { printf("WARNING: Attempt to add non-MPSAFE disk %s%d\n", dp->d_name, dp->d_unit); printf("WARNING: Ignoring disk %s%d\n", dp->d_name, dp->d_unit); return; } KASSERT(dp->d_strategy != NULL, ("disk_create need d_strategy")); KASSERT(dp->d_name != NULL, ("disk_create need d_name")); KASSERT(*dp->d_name != 0, ("disk_create need d_name")); KASSERT(strlen(dp->d_name) < SPECNAMELEN - 4, ("disk name too long")); if (dp->d_devstat == NULL) dp->d_devstat = devstat_new_entry(dp->d_name, dp->d_unit, dp->d_sectorsize, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); dp->d_geom = NULL; g_disk_ident_adjust(dp->d_ident, sizeof(dp->d_ident)); g_post_event(g_disk_create, dp, M_WAITOK, dp, NULL); } void disk_destroy(struct disk *dp) { g_cancel_event(dp); dp->d_destroyed = 1; if (dp->d_devstat != NULL) devstat_remove_entry(dp->d_devstat); g_post_event(g_disk_destroy, dp, M_WAITOK, NULL); } void disk_gone(struct disk *dp) { struct g_geom *gp; struct g_provider *pp; gp = dp->d_geom; if (gp != NULL) { pp = LIST_FIRST(&gp->provider); if (pp != NULL) { KASSERT(LIST_NEXT(pp, provider) == NULL, ("geom %p has more than one provider", gp)); g_wither_provider(pp, ENXIO); } } } void disk_attr_changed(struct disk *dp, const char *attr, int flag) { struct g_geom *gp; struct g_provider *pp; char devnamebuf[128]; gp = dp->d_geom; if (gp != NULL) LIST_FOREACH(pp, &gp->provider, provider) (void)g_attr_changed(pp, attr, flag); snprintf(devnamebuf, sizeof(devnamebuf), "devname=%s%d", dp->d_name, dp->d_unit); devctl_notify("GEOM", "disk", attr, devnamebuf); } void disk_media_changed(struct disk *dp, int flag) { struct g_geom *gp; struct g_provider *pp; gp = dp->d_geom; if (gp != NULL) { pp = LIST_FIRST(&gp->provider); if (pp != NULL) { KASSERT(LIST_NEXT(pp, provider) == NULL, ("geom %p has more than one provider", gp)); g_media_changed(pp, flag); } } } void disk_media_gone(struct disk *dp, int flag) { struct g_geom *gp; struct g_provider *pp; gp = dp->d_geom; if (gp != NULL) { pp = LIST_FIRST(&gp->provider); if (pp != NULL) { KASSERT(LIST_NEXT(pp, provider) == NULL, ("geom %p has more than one provider", gp)); g_media_gone(pp, flag); } } } int disk_resize(struct disk *dp, int flag) { if (dp->d_destroyed || dp->d_geom == NULL) return (0); return (g_post_event(g_disk_resize, dp, flag, NULL)); } static void g_kern_disks(void *p, int flag __unused) { struct sbuf *sb; struct g_geom *gp; char *sp; sb = p; sp = ""; g_topology_assert(); LIST_FOREACH(gp, &g_disk_class.geom, geom) { sbuf_printf(sb, "%s%s", sp, gp->name); sp = " "; } sbuf_finish(sb); } static int sysctl_disks(SYSCTL_HANDLER_ARGS) { int error; struct sbuf *sb; sb = sbuf_new_auto(); g_waitfor_event(g_kern_disks, sb, M_WAITOK, NULL); error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1); sbuf_delete(sb); return error; } SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_disks, "A", "names of available disks"); Index: head/sys/geom/geom_disk.h =================================================================== --- head/sys/geom/geom_disk.h (revision 300206) +++ head/sys/geom/geom_disk.h (revision 300207) @@ -1,130 +1,131 @@ /*- * Copyright (c) 2003 Poul-Henning Kamp * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GEOM_GEOM_DISK_H_ #define _GEOM_GEOM_DISK_H_ #ifdef _KERNEL #include #include #include #include #define G_DISK_CLASS_NAME "DISK" struct disk; typedef int disk_open_t(struct disk *); typedef int disk_close_t(struct disk *); typedef void disk_strategy_t(struct bio *bp); typedef int disk_getattr_t(struct bio *bp); typedef void disk_gone_t(struct disk *); typedef int disk_ioctl_t(struct disk *, u_long cmd, void *data, int fflag, struct thread *td); /* NB: disk_ioctl_t SHALL be cast'able to d_ioctl_t */ struct g_geom; struct devstat; struct disk { /* Fields which are private to geom_disk */ struct g_geom *d_geom; struct devstat *d_devstat; int d_destroyed; /* Shared fields */ u_int d_flags; const char *d_name; u_int d_unit; struct bio_queue_head *d_queue; struct mtx *d_lock; /* Disk methods */ disk_open_t *d_open; disk_close_t *d_close; disk_strategy_t *d_strategy; disk_ioctl_t *d_ioctl; dumper_t *d_dump; disk_getattr_t *d_getattr; disk_gone_t *d_gone; /* Info fields from driver to geom_disk.c. Valid when open */ u_int d_sectorsize; off_t d_mediasize; u_int d_fwsectors; u_int d_fwheads; u_int d_maxsize; off_t d_delmaxsize; u_int d_stripeoffset; u_int d_stripesize; char d_ident[DISK_IDENT_SIZE]; char d_descr[DISK_IDENT_SIZE]; uint16_t d_hba_vendor; uint16_t d_hba_device; uint16_t d_hba_subvendor; uint16_t d_hba_subdevice; uint16_t d_rotation_rate; /* Fields private to the driver */ void *d_drv1; }; #define DISKFLAG_RESERVED 0x1 /* Was NEEDSGIANT */ #define DISKFLAG_OPEN 0x2 #define DISKFLAG_CANDELETE 0x4 #define DISKFLAG_CANFLUSHCACHE 0x8 #define DISKFLAG_UNMAPPED_BIO 0x10 #define DISKFLAG_DIRECT_COMPLETION 0x20 +#define DISKFLAG_CANZONE 0x80 struct disk *disk_alloc(void); void disk_create(struct disk *disk, int version); void disk_destroy(struct disk *disk); void disk_gone(struct disk *disk); void disk_attr_changed(struct disk *dp, const char *attr, int flag); void disk_media_changed(struct disk *dp, int flag); void disk_media_gone(struct disk *dp, int flag); int disk_resize(struct disk *dp, int flag); #define DISK_VERSION_00 0x58561059 #define DISK_VERSION_01 0x5856105a #define DISK_VERSION_02 0x5856105b #define DISK_VERSION_03 0x5856105c #define DISK_VERSION_04 0x5856105d #define DISK_VERSION DISK_VERSION_04 #endif /* _KERNEL */ #endif /* _GEOM_GEOM_DISK_H_ */ Index: head/sys/geom/geom_io.c =================================================================== --- head/sys/geom/geom_io.c (revision 300206) +++ head/sys/geom/geom_io.c (revision 300207) @@ -1,1007 +1,1075 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Portions of this software were developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int g_io_transient_map_bio(struct bio *bp); static struct g_bioq g_bio_run_down; static struct g_bioq g_bio_run_up; static struct g_bioq g_bio_run_task; /* * Pace is a hint that we've had some trouble recently allocating * bios, so we should back off trying to send I/O down the stack * a bit to let the problem resolve. When pacing, we also turn * off direct dispatch to also reduce memory pressure from I/Os * there, at the expxense of some added latency while the memory * pressures exist. See g_io_schedule_down() for more details * and limitations. */ static volatile u_int pace; static uma_zone_t biozone; /* * The head of the list of classifiers used in g_io_request. * Use g_register_classifier() and g_unregister_classifier() * to add/remove entries to the list. * Classifiers are invoked in registration order. */ static TAILQ_HEAD(g_classifier_tailq, g_classifier_hook) g_classifier_tailq = TAILQ_HEAD_INITIALIZER(g_classifier_tailq); #include static void g_bioq_lock(struct g_bioq *bq) { mtx_lock(&bq->bio_queue_lock); } static void g_bioq_unlock(struct g_bioq *bq) { mtx_unlock(&bq->bio_queue_lock); } #if 0 static void g_bioq_destroy(struct g_bioq *bq) { mtx_destroy(&bq->bio_queue_lock); } #endif static void g_bioq_init(struct g_bioq *bq) { TAILQ_INIT(&bq->bio_queue); mtx_init(&bq->bio_queue_lock, "bio queue", NULL, MTX_DEF); } static struct bio * g_bioq_first(struct g_bioq *bq) { struct bio *bp; bp = TAILQ_FIRST(&bq->bio_queue); if (bp != NULL) { KASSERT((bp->bio_flags & BIO_ONQUEUE), ("Bio not on queue bp=%p target %p", bp, bq)); bp->bio_flags &= ~BIO_ONQUEUE; TAILQ_REMOVE(&bq->bio_queue, bp, bio_queue); bq->bio_queue_length--; } return (bp); } struct bio * g_new_bio(void) { struct bio *bp; bp = uma_zalloc(biozone, M_NOWAIT | M_ZERO); #ifdef KTR if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { struct stack st; CTR1(KTR_GEOM, "g_new_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3, 0); } #endif return (bp); } struct bio * g_alloc_bio(void) { struct bio *bp; bp = uma_zalloc(biozone, M_WAITOK | M_ZERO); #ifdef KTR if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { struct stack st; CTR1(KTR_GEOM, "g_alloc_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3, 0); } #endif return (bp); } void g_destroy_bio(struct bio *bp) { #ifdef KTR if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { struct stack st; CTR1(KTR_GEOM, "g_destroy_bio(): %p", bp); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3, 0); } #endif uma_zfree(biozone, bp); } struct bio * g_clone_bio(struct bio *bp) { struct bio *bp2; bp2 = uma_zalloc(biozone, M_NOWAIT | M_ZERO); if (bp2 != NULL) { bp2->bio_parent = bp; bp2->bio_cmd = bp->bio_cmd; /* * BIO_ORDERED flag may be used by disk drivers to enforce * ordering restrictions, so this flag needs to be cloned. * BIO_UNMAPPED and BIO_VLIST should be inherited, to properly * indicate which way the buffer is passed. * Other bio flags are not suitable for cloning. */ bp2->bio_flags = bp->bio_flags & (BIO_ORDERED | BIO_UNMAPPED | BIO_VLIST); bp2->bio_length = bp->bio_length; bp2->bio_offset = bp->bio_offset; bp2->bio_data = bp->bio_data; bp2->bio_ma = bp->bio_ma; bp2->bio_ma_n = bp->bio_ma_n; bp2->bio_ma_offset = bp->bio_ma_offset; bp2->bio_attribute = bp->bio_attribute; + if (bp->bio_cmd == BIO_ZONE) + bcopy(&bp->bio_zone, &bp2->bio_zone, + sizeof(bp->bio_zone)); /* Inherit classification info from the parent */ bp2->bio_classifier1 = bp->bio_classifier1; bp2->bio_classifier2 = bp->bio_classifier2; bp->bio_children++; } #ifdef KTR if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { struct stack st; CTR2(KTR_GEOM, "g_clone_bio(%p): %p", bp, bp2); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3, 0); } #endif return(bp2); } struct bio * g_duplicate_bio(struct bio *bp) { struct bio *bp2; bp2 = uma_zalloc(biozone, M_WAITOK | M_ZERO); bp2->bio_flags = bp->bio_flags & (BIO_UNMAPPED | BIO_VLIST); bp2->bio_parent = bp; bp2->bio_cmd = bp->bio_cmd; bp2->bio_length = bp->bio_length; bp2->bio_offset = bp->bio_offset; bp2->bio_data = bp->bio_data; bp2->bio_ma = bp->bio_ma; bp2->bio_ma_n = bp->bio_ma_n; bp2->bio_ma_offset = bp->bio_ma_offset; bp2->bio_attribute = bp->bio_attribute; bp->bio_children++; #ifdef KTR if ((KTR_COMPILE & KTR_GEOM) && (ktr_mask & KTR_GEOM)) { struct stack st; CTR2(KTR_GEOM, "g_duplicate_bio(%p): %p", bp, bp2); stack_save(&st); CTRSTACK(KTR_GEOM, &st, 3, 0); } #endif return(bp2); } void g_reset_bio(struct bio *bp) { bzero(bp, sizeof(*bp)); } void g_io_init() { g_bioq_init(&g_bio_run_down); g_bioq_init(&g_bio_run_up); g_bioq_init(&g_bio_run_task); biozone = uma_zcreate("g_bio", sizeof (struct bio), NULL, NULL, NULL, NULL, 0, 0); } int g_io_getattr(const char *attr, struct g_consumer *cp, int *len, void *ptr) { struct bio *bp; int error; g_trace(G_T_BIO, "bio_getattr(%s)", attr); bp = g_alloc_bio(); bp->bio_cmd = BIO_GETATTR; bp->bio_done = NULL; bp->bio_attribute = attr; bp->bio_length = *len; bp->bio_data = ptr; g_io_request(bp, cp); error = biowait(bp, "ggetattr"); *len = bp->bio_completed; g_destroy_bio(bp); return (error); } int +g_io_zonecmd(struct disk_zone_args *zone_args, struct g_consumer *cp) +{ + struct bio *bp; + int error; + + g_trace(G_T_BIO, "bio_zone(%d)", zone_args->zone_cmd); + bp = g_alloc_bio(); + bp->bio_cmd = BIO_ZONE; + bp->bio_done = NULL; + /* + * XXX KDM need to handle report zone data. + */ + bcopy(zone_args, &bp->bio_zone, sizeof(*zone_args)); + if (zone_args->zone_cmd == DISK_ZONE_REPORT_ZONES) + bp->bio_length = + zone_args->zone_params.report.entries_allocated * + sizeof(struct disk_zone_rep_entry); + else + bp->bio_length = 0; + + g_io_request(bp, cp); + error = biowait(bp, "gzone"); + bcopy(&bp->bio_zone, zone_args, sizeof(*zone_args)); + g_destroy_bio(bp); + return (error); +} + +int g_io_flush(struct g_consumer *cp) { struct bio *bp; int error; g_trace(G_T_BIO, "bio_flush(%s)", cp->provider->name); bp = g_alloc_bio(); bp->bio_cmd = BIO_FLUSH; bp->bio_flags |= BIO_ORDERED; bp->bio_done = NULL; bp->bio_attribute = NULL; bp->bio_offset = cp->provider->mediasize; bp->bio_length = 0; bp->bio_data = NULL; g_io_request(bp, cp); error = biowait(bp, "gflush"); g_destroy_bio(bp); return (error); } static int g_io_check(struct bio *bp) { struct g_consumer *cp; struct g_provider *pp; off_t excess; int error; cp = bp->bio_from; pp = bp->bio_to; /* Fail if access counters dont allow the operation */ switch(bp->bio_cmd) { case BIO_READ: case BIO_GETATTR: if (cp->acr == 0) return (EPERM); break; case BIO_WRITE: case BIO_DELETE: case BIO_FLUSH: if (cp->acw == 0) return (EPERM); break; + case BIO_ZONE: + if ((bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES) || + (bp->bio_zone.zone_cmd == DISK_ZONE_GET_PARAMS)) { + if (cp->acr == 0) + return (EPERM); + } else if (cp->acw == 0) + return (EPERM); + break; default: return (EPERM); } /* if provider is marked for error, don't disturb. */ if (pp->error) return (pp->error); if (cp->flags & G_CF_ORPHAN) return (ENXIO); switch(bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: /* Zero sectorsize or mediasize is probably a lack of media. */ if (pp->sectorsize == 0 || pp->mediasize == 0) return (ENXIO); /* Reject I/O not on sector boundary */ if (bp->bio_offset % pp->sectorsize) return (EINVAL); /* Reject I/O not integral sector long */ if (bp->bio_length % pp->sectorsize) return (EINVAL); /* Reject requests before or past the end of media. */ if (bp->bio_offset < 0) return (EIO); if (bp->bio_offset > pp->mediasize) return (EIO); /* Truncate requests to the end of providers media. */ excess = bp->bio_offset + bp->bio_length; if (excess > bp->bio_to->mediasize) { KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 || round_page(bp->bio_ma_offset + bp->bio_length) / PAGE_SIZE == bp->bio_ma_n, ("excess bio %p too short", bp)); excess -= bp->bio_to->mediasize; bp->bio_length -= excess; if ((bp->bio_flags & BIO_UNMAPPED) != 0) { bp->bio_ma_n = round_page(bp->bio_ma_offset + bp->bio_length) / PAGE_SIZE; } if (excess > 0) CTR3(KTR_GEOM, "g_down truncated bio " "%p provider %s by %d", bp, bp->bio_to->name, excess); } /* Deliver zero length transfers right here. */ if (bp->bio_length == 0) { CTR2(KTR_GEOM, "g_down terminated 0-length " "bp %p provider %s", bp, bp->bio_to->name); return (0); } if ((bp->bio_flags & BIO_UNMAPPED) != 0 && (bp->bio_to->flags & G_PF_ACCEPT_UNMAPPED) == 0 && (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { if ((error = g_io_transient_map_bio(bp)) >= 0) return (error); } break; default: break; } return (EJUSTRETURN); } /* * bio classification support. * * g_register_classifier() and g_unregister_classifier() * are used to add/remove a classifier from the list. * The list is protected using the g_bio_run_down lock, * because the classifiers are called in this path. * * g_io_request() passes bio's that are not already classified * (i.e. those with bio_classifier1 == NULL) to g_run_classifiers(). * Classifiers can store their result in the two fields * bio_classifier1 and bio_classifier2. * A classifier that updates one of the fields should * return a non-zero value. * If no classifier updates the field, g_run_classifiers() sets * bio_classifier1 = BIO_NOTCLASSIFIED to avoid further calls. */ int g_register_classifier(struct g_classifier_hook *hook) { g_bioq_lock(&g_bio_run_down); TAILQ_INSERT_TAIL(&g_classifier_tailq, hook, link); g_bioq_unlock(&g_bio_run_down); return (0); } void g_unregister_classifier(struct g_classifier_hook *hook) { struct g_classifier_hook *entry; g_bioq_lock(&g_bio_run_down); TAILQ_FOREACH(entry, &g_classifier_tailq, link) { if (entry == hook) { TAILQ_REMOVE(&g_classifier_tailq, hook, link); break; } } g_bioq_unlock(&g_bio_run_down); } static void g_run_classifiers(struct bio *bp) { struct g_classifier_hook *hook; int classified = 0; TAILQ_FOREACH(hook, &g_classifier_tailq, link) classified |= hook->func(hook->arg, bp); if (!classified) bp->bio_classifier1 = BIO_NOTCLASSIFIED; } void g_io_request(struct bio *bp, struct g_consumer *cp) { struct g_provider *pp; struct mtx *mtxp; int direct, error, first; uint8_t cmd; KASSERT(cp != NULL, ("NULL cp in g_io_request")); KASSERT(bp != NULL, ("NULL bp in g_io_request")); pp = cp->provider; KASSERT(pp != NULL, ("consumer not attached in g_io_request")); #ifdef DIAGNOSTIC KASSERT(bp->bio_driver1 == NULL, ("bio_driver1 used by the consumer (geom %s)", cp->geom->name)); KASSERT(bp->bio_driver2 == NULL, ("bio_driver2 used by the consumer (geom %s)", cp->geom->name)); KASSERT(bp->bio_pflags == 0, ("bio_pflags used by the consumer (geom %s)", cp->geom->name)); /* * Remember consumer's private fields, so we can detect if they were * modified by the provider. */ bp->_bio_caller1 = bp->bio_caller1; bp->_bio_caller2 = bp->bio_caller2; bp->_bio_cflags = bp->bio_cflags; #endif cmd = bp->bio_cmd; if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_GETATTR) { KASSERT(bp->bio_data != NULL, ("NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); } if (cmd == BIO_DELETE || cmd == BIO_FLUSH) { KASSERT(bp->bio_data == NULL, ("non-NULL bp->data in g_io_request(cmd=%hu)", bp->bio_cmd)); } if (cmd == BIO_READ || cmd == BIO_WRITE || cmd == BIO_DELETE) { KASSERT(bp->bio_offset % cp->provider->sectorsize == 0, ("wrong offset %jd for sectorsize %u", bp->bio_offset, cp->provider->sectorsize)); KASSERT(bp->bio_length % cp->provider->sectorsize == 0, ("wrong length %jd for sectorsize %u", bp->bio_length, cp->provider->sectorsize)); } g_trace(G_T_BIO, "bio_request(%p) from %p(%s) to %p(%s) cmd %d", bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd); bp->bio_from = cp; bp->bio_to = pp; bp->bio_error = 0; bp->bio_completed = 0; KASSERT(!(bp->bio_flags & BIO_ONQUEUE), ("Bio already on queue bp=%p", bp)); if ((g_collectstats & G_STATS_CONSUMERS) != 0 || ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) binuptime(&bp->bio_t0); else getbinuptime(&bp->bio_t0); #ifdef GET_STACK_USAGE direct = (cp->flags & G_CF_DIRECT_SEND) != 0 && (pp->flags & G_PF_DIRECT_RECEIVE) != 0 && !g_is_geom_thread(curthread) && ((pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 || (bp->bio_flags & BIO_UNMAPPED) == 0 || THREAD_CAN_SLEEP()) && pace == 0; if (direct) { /* Block direct execution if less then half of stack left. */ size_t st, su; GET_STACK_USAGE(st, su); if (su * 2 > st) direct = 0; } #else direct = 0; #endif if (!TAILQ_EMPTY(&g_classifier_tailq) && !bp->bio_classifier1) { g_bioq_lock(&g_bio_run_down); g_run_classifiers(bp); g_bioq_unlock(&g_bio_run_down); } /* * The statistics collection is lockless, as such, but we * can not update one instance of the statistics from more * than one thread at a time, so grab the lock first. */ mtxp = mtx_pool_find(mtxpool_sleep, pp); mtx_lock(mtxp); if (g_collectstats & G_STATS_PROVIDERS) devstat_start_transaction(pp->stat, &bp->bio_t0); if (g_collectstats & G_STATS_CONSUMERS) devstat_start_transaction(cp->stat, &bp->bio_t0); pp->nstart++; cp->nstart++; mtx_unlock(mtxp); if (direct) { error = g_io_check(bp); if (error >= 0) { CTR3(KTR_GEOM, "g_io_request g_io_check on bp %p " "provider %s returned %d", bp, bp->bio_to->name, error); g_io_deliver(bp, error); return; } bp->bio_to->geom->start(bp); } else { g_bioq_lock(&g_bio_run_down); first = TAILQ_EMPTY(&g_bio_run_down.bio_queue); TAILQ_INSERT_TAIL(&g_bio_run_down.bio_queue, bp, bio_queue); bp->bio_flags |= BIO_ONQUEUE; g_bio_run_down.bio_queue_length++; g_bioq_unlock(&g_bio_run_down); /* Pass it on down. */ if (first) wakeup(&g_wait_down); } } void g_io_deliver(struct bio *bp, int error) { struct bintime now; struct g_consumer *cp; struct g_provider *pp; struct mtx *mtxp; int direct, first; KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); pp = bp->bio_to; KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); cp = bp->bio_from; if (cp == NULL) { bp->bio_error = error; bp->bio_done(bp); return; } KASSERT(cp != NULL, ("NULL bio_from in g_io_deliver")); KASSERT(cp->geom != NULL, ("NULL bio_from->geom in g_io_deliver")); #ifdef DIAGNOSTIC /* * Some classes - GJournal in particular - can modify bio's * private fields while the bio is in transit; G_GEOM_VOLATILE_BIO * flag means it's an expected behaviour for that particular geom. */ if ((cp->geom->flags & G_GEOM_VOLATILE_BIO) == 0) { KASSERT(bp->bio_caller1 == bp->_bio_caller1, ("bio_caller1 used by the provider %s", pp->name)); KASSERT(bp->bio_caller2 == bp->_bio_caller2, ("bio_caller2 used by the provider %s", pp->name)); KASSERT(bp->bio_cflags == bp->_bio_cflags, ("bio_cflags used by the provider %s", pp->name)); } #endif KASSERT(bp->bio_completed >= 0, ("bio_completed can't be less than 0")); KASSERT(bp->bio_completed <= bp->bio_length, ("bio_completed can't be greater than bio_length")); g_trace(G_T_BIO, "g_io_deliver(%p) from %p(%s) to %p(%s) cmd %d error %d off %jd len %jd", bp, cp, cp->geom->name, pp, pp->name, bp->bio_cmd, error, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); KASSERT(!(bp->bio_flags & BIO_ONQUEUE), ("Bio already on queue bp=%p", bp)); /* * XXX: next two doesn't belong here */ bp->bio_bcount = bp->bio_length; bp->bio_resid = bp->bio_bcount - bp->bio_completed; #ifdef GET_STACK_USAGE direct = (pp->flags & G_PF_DIRECT_SEND) && (cp->flags & G_CF_DIRECT_RECEIVE) && !g_is_geom_thread(curthread); if (direct) { /* Block direct execution if less then half of stack left. */ size_t st, su; GET_STACK_USAGE(st, su); if (su * 2 > st) direct = 0; } #else direct = 0; #endif /* * The statistics collection is lockless, as such, but we * can not update one instance of the statistics from more * than one thread at a time, so grab the lock first. */ if ((g_collectstats & G_STATS_CONSUMERS) != 0 || ((g_collectstats & G_STATS_PROVIDERS) != 0 && pp->stat != NULL)) binuptime(&now); mtxp = mtx_pool_find(mtxpool_sleep, cp); mtx_lock(mtxp); if (g_collectstats & G_STATS_PROVIDERS) devstat_end_transaction_bio_bt(pp->stat, bp, &now); if (g_collectstats & G_STATS_CONSUMERS) devstat_end_transaction_bio_bt(cp->stat, bp, &now); cp->nend++; pp->nend++; mtx_unlock(mtxp); if (error != ENOMEM) { bp->bio_error = error; if (direct) { biodone(bp); } else { g_bioq_lock(&g_bio_run_up); first = TAILQ_EMPTY(&g_bio_run_up.bio_queue); TAILQ_INSERT_TAIL(&g_bio_run_up.bio_queue, bp, bio_queue); bp->bio_flags |= BIO_ONQUEUE; g_bio_run_up.bio_queue_length++; g_bioq_unlock(&g_bio_run_up); if (first) wakeup(&g_wait_up); } return; } if (bootverbose) printf("ENOMEM %p on %p(%s)\n", bp, pp, pp->name); bp->bio_children = 0; bp->bio_inbed = 0; bp->bio_driver1 = NULL; bp->bio_driver2 = NULL; bp->bio_pflags = 0; g_io_request(bp, cp); pace = 1; return; } SYSCTL_DECL(_kern_geom); static long transient_maps; SYSCTL_LONG(_kern_geom, OID_AUTO, transient_maps, CTLFLAG_RD, &transient_maps, 0, "Total count of the transient mapping requests"); u_int transient_map_retries = 10; SYSCTL_UINT(_kern_geom, OID_AUTO, transient_map_retries, CTLFLAG_RW, &transient_map_retries, 0, "Max count of retries used before giving up on creating transient map"); int transient_map_hard_failures; SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_hard_failures, CTLFLAG_RD, &transient_map_hard_failures, 0, "Failures to establish the transient mapping due to retry attempts " "exhausted"); int transient_map_soft_failures; SYSCTL_INT(_kern_geom, OID_AUTO, transient_map_soft_failures, CTLFLAG_RD, &transient_map_soft_failures, 0, "Count of retried failures to establish the transient mapping"); int inflight_transient_maps; SYSCTL_INT(_kern_geom, OID_AUTO, inflight_transient_maps, CTLFLAG_RD, &inflight_transient_maps, 0, "Current count of the active transient maps"); static int g_io_transient_map_bio(struct bio *bp) { vm_offset_t addr; long size; u_int retried; KASSERT(unmapped_buf_allowed, ("unmapped disabled")); size = round_page(bp->bio_ma_offset + bp->bio_length); KASSERT(size / PAGE_SIZE == bp->bio_ma_n, ("Bio too short %p", bp)); addr = 0; retried = 0; atomic_add_long(&transient_maps, 1); retry: if (vmem_alloc(transient_arena, size, M_BESTFIT | M_NOWAIT, &addr)) { if (transient_map_retries != 0 && retried >= transient_map_retries) { CTR2(KTR_GEOM, "g_down cannot map bp %p provider %s", bp, bp->bio_to->name); atomic_add_int(&transient_map_hard_failures, 1); return (EDEADLK/* XXXKIB */); } else { /* * Naive attempt to quisce the I/O to get more * in-flight requests completed and defragment * the transient_arena. */ CTR3(KTR_GEOM, "g_down retrymap bp %p provider %s r %d", bp, bp->bio_to->name, retried); pause("g_d_tra", hz / 10); retried++; atomic_add_int(&transient_map_soft_failures, 1); goto retry; } } atomic_add_int(&inflight_transient_maps, 1); pmap_qenter((vm_offset_t)addr, bp->bio_ma, OFF_TO_IDX(size)); bp->bio_data = (caddr_t)addr + bp->bio_ma_offset; bp->bio_flags |= BIO_TRANSIENT_MAPPING; bp->bio_flags &= ~BIO_UNMAPPED; return (EJUSTRETURN); } void g_io_schedule_down(struct thread *tp __unused) { struct bio *bp; int error; for(;;) { g_bioq_lock(&g_bio_run_down); bp = g_bioq_first(&g_bio_run_down); if (bp == NULL) { CTR0(KTR_GEOM, "g_down going to sleep"); msleep(&g_wait_down, &g_bio_run_down.bio_queue_lock, PRIBIO | PDROP, "-", 0); continue; } CTR0(KTR_GEOM, "g_down has work to do"); g_bioq_unlock(&g_bio_run_down); if (pace != 0) { /* * There has been at least one memory allocation * failure since the last I/O completed. Pause 1ms to * give the system a chance to free up memory. We only * do this once because a large number of allocations * can fail in the direct dispatch case and there's no * relationship between the number of these failures and * the length of the outage. If there's still an outage, * we'll pause again and again until it's * resolved. Older versions paused longer and once per * allocation failure. This was OK for a single threaded * g_down, but with direct dispatch would lead to max of * 10 IOPs for minutes at a time when transient memory * issues prevented allocation for a batch of requests * from the upper layers. * * XXX This pacing is really lame. It needs to be solved * by other methods. This is OK only because the worst * case scenario is so rare. In the worst case scenario * all memory is tied up waiting for I/O to complete * which can never happen since we can't allocate bios * for that I/O. */ CTR0(KTR_GEOM, "g_down pacing self"); pause("g_down", min(hz/1000, 1)); pace = 0; } CTR2(KTR_GEOM, "g_down processing bp %p provider %s", bp, bp->bio_to->name); error = g_io_check(bp); if (error >= 0) { CTR3(KTR_GEOM, "g_down g_io_check on bp %p provider " "%s returned %d", bp, bp->bio_to->name, error); g_io_deliver(bp, error); continue; } THREAD_NO_SLEEPING(); CTR4(KTR_GEOM, "g_down starting bp %p provider %s off %ld " "len %ld", bp, bp->bio_to->name, bp->bio_offset, bp->bio_length); bp->bio_to->geom->start(bp); THREAD_SLEEPING_OK(); } } void bio_taskqueue(struct bio *bp, bio_task_t *func, void *arg) { bp->bio_task = func; bp->bio_task_arg = arg; /* * The taskqueue is actually just a second queue off the "up" * queue, so we use the same lock. */ g_bioq_lock(&g_bio_run_up); KASSERT(!(bp->bio_flags & BIO_ONQUEUE), ("Bio already on queue bp=%p target taskq", bp)); bp->bio_flags |= BIO_ONQUEUE; TAILQ_INSERT_TAIL(&g_bio_run_task.bio_queue, bp, bio_queue); g_bio_run_task.bio_queue_length++; wakeup(&g_wait_up); g_bioq_unlock(&g_bio_run_up); } void g_io_schedule_up(struct thread *tp __unused) { struct bio *bp; for(;;) { g_bioq_lock(&g_bio_run_up); bp = g_bioq_first(&g_bio_run_task); if (bp != NULL) { g_bioq_unlock(&g_bio_run_up); THREAD_NO_SLEEPING(); CTR1(KTR_GEOM, "g_up processing task bp %p", bp); bp->bio_task(bp->bio_task_arg); THREAD_SLEEPING_OK(); continue; } bp = g_bioq_first(&g_bio_run_up); if (bp != NULL) { g_bioq_unlock(&g_bio_run_up); THREAD_NO_SLEEPING(); CTR4(KTR_GEOM, "g_up biodone bp %p provider %s off " "%jd len %ld", bp, bp->bio_to->name, bp->bio_offset, bp->bio_length); biodone(bp); THREAD_SLEEPING_OK(); continue; } CTR0(KTR_GEOM, "g_up going to sleep"); msleep(&g_wait_up, &g_bio_run_up.bio_queue_lock, PRIBIO | PDROP, "-", 0); } } void * g_read_data(struct g_consumer *cp, off_t offset, off_t length, int *error) { struct bio *bp; void *ptr; int errorc; KASSERT(length > 0 && length >= cp->provider->sectorsize && length <= MAXPHYS, ("g_read_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_READ; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; ptr = g_malloc(length, M_WAITOK); bp->bio_data = ptr; g_io_request(bp, cp); errorc = biowait(bp, "gread"); if (error != NULL) *error = errorc; g_destroy_bio(bp); if (errorc) { g_free(ptr); ptr = NULL; } return (ptr); } int g_write_data(struct g_consumer *cp, off_t offset, void *ptr, off_t length) { struct bio *bp; int error; KASSERT(length > 0 && length >= cp->provider->sectorsize && length <= MAXPHYS, ("g_write_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_WRITE; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; bp->bio_data = ptr; g_io_request(bp, cp); error = biowait(bp, "gwrite"); g_destroy_bio(bp); return (error); } int g_delete_data(struct g_consumer *cp, off_t offset, off_t length) { struct bio *bp; int error; KASSERT(length > 0 && length >= cp->provider->sectorsize, ("g_delete_data(): invalid length %jd", (intmax_t)length)); bp = g_alloc_bio(); bp->bio_cmd = BIO_DELETE; bp->bio_done = NULL; bp->bio_offset = offset; bp->bio_length = length; bp->bio_data = NULL; g_io_request(bp, cp); error = biowait(bp, "gdelete"); g_destroy_bio(bp); return (error); } void g_print_bio(struct bio *bp) { const char *pname, *cmd = NULL; if (bp->bio_to != NULL) pname = bp->bio_to->name; else pname = "[unknown]"; switch (bp->bio_cmd) { case BIO_GETATTR: cmd = "GETATTR"; printf("%s[%s(attr=%s)]", pname, cmd, bp->bio_attribute); return; case BIO_FLUSH: cmd = "FLUSH"; printf("%s[%s]", pname, cmd); return; + case BIO_ZONE: { + char *subcmd = NULL; + cmd = "ZONE"; + switch (bp->bio_zone.zone_cmd) { + case DISK_ZONE_OPEN: + subcmd = "OPEN"; + break; + case DISK_ZONE_CLOSE: + subcmd = "CLOSE"; + break; + case DISK_ZONE_FINISH: + subcmd = "FINISH"; + break; + case DISK_ZONE_RWP: + subcmd = "RWP"; + break; + case DISK_ZONE_REPORT_ZONES: + subcmd = "REPORT ZONES"; + break; + case DISK_ZONE_GET_PARAMS: + subcmd = "GET PARAMS"; + break; + default: + subcmd = "UNKNOWN"; + break; + } + printf("%s[%s,%s]", pname, cmd, subcmd); + return; + } case BIO_READ: cmd = "READ"; break; case BIO_WRITE: cmd = "WRITE"; break; case BIO_DELETE: cmd = "DELETE"; break; default: cmd = "UNKNOWN"; printf("%s[%s()]", pname, cmd); return; } printf("%s[%s(offset=%jd, length=%jd)]", pname, cmd, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length); } Index: head/sys/geom/geom_subr.c =================================================================== --- head/sys/geom/geom_subr.c (revision 300206) +++ head/sys/geom/geom_subr.c (revision 300207) @@ -1,1535 +1,1536 @@ /*- * Copyright (c) 2002 Poul-Henning Kamp * Copyright (c) 2002 Networks Associates Technology, Inc. * All rights reserved. * * This software was developed for the FreeBSD Project by Poul-Henning Kamp * and NAI Labs, the Security Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #ifdef KDB #include #endif struct class_list_head g_classes = LIST_HEAD_INITIALIZER(g_classes); static struct g_tailq_head geoms = TAILQ_HEAD_INITIALIZER(geoms); char *g_wait_event, *g_wait_up, *g_wait_down, *g_wait_sim; struct g_hh00 { struct g_class *mp; struct g_provider *pp; off_t size; int error; int post; }; /* * This event offers a new class a chance to taste all preexisting providers. */ static void g_load_class(void *arg, int flag) { struct g_hh00 *hh; struct g_class *mp2, *mp; struct g_geom *gp; struct g_provider *pp; g_topology_assert(); if (flag == EV_CANCEL) /* XXX: can't happen ? */ return; if (g_shutdown) return; hh = arg; mp = hh->mp; hh->error = 0; if (hh->post) { g_free(hh); hh = NULL; } g_trace(G_T_TOPOLOGY, "g_load_class(%s)", mp->name); KASSERT(mp->name != NULL && *mp->name != '\0', ("GEOM class has no name")); LIST_FOREACH(mp2, &g_classes, class) { if (mp2 == mp) { printf("The GEOM class %s is already loaded.\n", mp2->name); if (hh != NULL) hh->error = EEXIST; return; } else if (strcmp(mp2->name, mp->name) == 0) { printf("A GEOM class %s is already loaded.\n", mp2->name); if (hh != NULL) hh->error = EEXIST; return; } } LIST_INIT(&mp->geom); LIST_INSERT_HEAD(&g_classes, mp, class); if (mp->init != NULL) mp->init(mp); if (mp->taste == NULL) return; LIST_FOREACH(mp2, &g_classes, class) { if (mp == mp2) continue; LIST_FOREACH(gp, &mp2->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { mp->taste(mp, pp, 0); g_topology_assert(); } } } } static int g_unload_class(struct g_class *mp) { struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp; int error; g_topology_lock(); g_trace(G_T_TOPOLOGY, "g_unload_class(%s)", mp->name); retry: G_VALID_CLASS(mp); LIST_FOREACH(gp, &mp->geom, geom) { /* We refuse to unload if anything is open */ LIST_FOREACH(pp, &gp->provider, provider) if (pp->acr || pp->acw || pp->ace) { g_topology_unlock(); return (EBUSY); } LIST_FOREACH(cp, &gp->consumer, consumer) if (cp->acr || cp->acw || cp->ace) { g_topology_unlock(); return (EBUSY); } /* If the geom is withering, wait for it to finish. */ if (gp->flags & G_GEOM_WITHER) { g_topology_sleep(mp, 1); goto retry; } } /* * We allow unloading if we have no geoms, or a class * method we can use to get rid of them. */ if (!LIST_EMPTY(&mp->geom) && mp->destroy_geom == NULL) { g_topology_unlock(); return (EOPNOTSUPP); } /* Bar new entries */ mp->taste = NULL; mp->config = NULL; LIST_FOREACH(gp, &mp->geom, geom) { error = mp->destroy_geom(NULL, mp, gp); if (error != 0) { g_topology_unlock(); return (error); } } /* Wait for withering to finish. */ for (;;) { gp = LIST_FIRST(&mp->geom); if (gp == NULL) break; KASSERT(gp->flags & G_GEOM_WITHER, ("Non-withering geom in class %s", mp->name)); g_topology_sleep(mp, 1); } G_VALID_CLASS(mp); if (mp->fini != NULL) mp->fini(mp); LIST_REMOVE(mp, class); g_topology_unlock(); return (0); } int g_modevent(module_t mod, int type, void *data) { struct g_hh00 *hh; int error; static int g_ignition; struct g_class *mp; mp = data; if (mp->version != G_VERSION) { printf("GEOM class %s has Wrong version %x\n", mp->name, mp->version); return (EINVAL); } if (!g_ignition) { g_ignition++; g_init(); } error = EOPNOTSUPP; switch (type) { case MOD_LOAD: g_trace(G_T_TOPOLOGY, "g_modevent(%s, LOAD)", mp->name); hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); hh->mp = mp; /* * Once the system is not cold, MOD_LOAD calls will be * from the userland and the g_event thread will be able * to acknowledge their completion. */ if (cold) { hh->post = 1; error = g_post_event(g_load_class, hh, M_WAITOK, NULL); } else { error = g_waitfor_event(g_load_class, hh, M_WAITOK, NULL); if (error == 0) error = hh->error; g_free(hh); } break; case MOD_UNLOAD: g_trace(G_T_TOPOLOGY, "g_modevent(%s, UNLOAD)", mp->name); DROP_GIANT(); error = g_unload_class(mp); PICKUP_GIANT(); if (error == 0) { KASSERT(LIST_EMPTY(&mp->geom), ("Unloaded class (%s) still has geom", mp->name)); } break; } return (error); } static void g_retaste_event(void *arg, int flag) { struct g_class *mp, *mp2; struct g_geom *gp; struct g_hh00 *hh; struct g_provider *pp; struct g_consumer *cp; g_topology_assert(); if (flag == EV_CANCEL) /* XXX: can't happen ? */ return; if (g_shutdown || g_notaste) return; hh = arg; mp = hh->mp; hh->error = 0; if (hh->post) { g_free(hh); hh = NULL; } g_trace(G_T_TOPOLOGY, "g_retaste(%s)", mp->name); LIST_FOREACH(mp2, &g_classes, class) { LIST_FOREACH(gp, &mp2->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { if (pp->acr || pp->acw || pp->ace) continue; LIST_FOREACH(cp, &pp->consumers, consumers) { if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; } if (cp != NULL) { cp->flags |= G_CF_ORPHAN; g_wither_geom(cp->geom, ENXIO); } mp->taste(mp, pp, 0); g_topology_assert(); } } } } int g_retaste(struct g_class *mp) { struct g_hh00 *hh; int error; if (mp->taste == NULL) return (EINVAL); hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); hh->mp = mp; if (cold) { hh->post = 1; error = g_post_event(g_retaste_event, hh, M_WAITOK, NULL); } else { error = g_waitfor_event(g_retaste_event, hh, M_WAITOK, NULL); if (error == 0) error = hh->error; g_free(hh); } return (error); } struct g_geom * g_new_geomf(struct g_class *mp, const char *fmt, ...) { struct g_geom *gp; va_list ap; struct sbuf *sb; g_topology_assert(); G_VALID_CLASS(mp); sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); gp = g_malloc(sizeof *gp, M_WAITOK | M_ZERO); gp->name = g_malloc(sbuf_len(sb) + 1, M_WAITOK | M_ZERO); gp->class = mp; gp->rank = 1; LIST_INIT(&gp->consumer); LIST_INIT(&gp->provider); LIST_INSERT_HEAD(&mp->geom, gp, geom); TAILQ_INSERT_HEAD(&geoms, gp, geoms); strcpy(gp->name, sbuf_data(sb)); sbuf_delete(sb); /* Fill in defaults from class */ gp->start = mp->start; gp->spoiled = mp->spoiled; gp->attrchanged = mp->attrchanged; gp->providergone = mp->providergone; gp->dumpconf = mp->dumpconf; gp->access = mp->access; gp->orphan = mp->orphan; gp->ioctl = mp->ioctl; gp->resize = mp->resize; return (gp); } void g_destroy_geom(struct g_geom *gp) { g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_destroy_geom(%p(%s))", gp, gp->name); KASSERT(LIST_EMPTY(&gp->consumer), ("g_destroy_geom(%s) with consumer(s) [%p]", gp->name, LIST_FIRST(&gp->consumer))); KASSERT(LIST_EMPTY(&gp->provider), ("g_destroy_geom(%s) with provider(s) [%p]", gp->name, LIST_FIRST(&gp->provider))); g_cancel_event(gp); LIST_REMOVE(gp, geom); TAILQ_REMOVE(&geoms, gp, geoms); g_free(gp->name); g_free(gp); } /* * This function is called (repeatedly) until the geom has withered away. */ void g_wither_geom(struct g_geom *gp, int error) { struct g_provider *pp; g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_wither_geom(%p(%s))", gp, gp->name); if (!(gp->flags & G_GEOM_WITHER)) { gp->flags |= G_GEOM_WITHER; LIST_FOREACH(pp, &gp->provider, provider) if (!(pp->flags & G_PF_ORPHAN)) g_orphan_provider(pp, error); } g_do_wither(); } /* * Convenience function to destroy a particular provider. */ void g_wither_provider(struct g_provider *pp, int error) { pp->flags |= G_PF_WITHER; if (!(pp->flags & G_PF_ORPHAN)) g_orphan_provider(pp, error); } /* * This function is called (repeatedly) until the has withered away. */ void g_wither_geom_close(struct g_geom *gp, int error) { struct g_consumer *cp; g_topology_assert(); G_VALID_GEOM(gp); g_trace(G_T_TOPOLOGY, "g_wither_geom_close(%p(%s))", gp, gp->name); LIST_FOREACH(cp, &gp->consumer, consumer) if (cp->acr || cp->acw || cp->ace) g_access(cp, -cp->acr, -cp->acw, -cp->ace); g_wither_geom(gp, error); } /* * This function is called (repeatedly) until we cant wash away more * withered bits at present. */ void g_wither_washer() { struct g_class *mp; struct g_geom *gp, *gp2; struct g_provider *pp, *pp2; struct g_consumer *cp, *cp2; g_topology_assert(); LIST_FOREACH(mp, &g_classes, class) { LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { if (!(pp->flags & G_PF_WITHER)) continue; if (LIST_EMPTY(&pp->consumers)) g_destroy_provider(pp); } if (!(gp->flags & G_GEOM_WITHER)) continue; LIST_FOREACH_SAFE(pp, &gp->provider, provider, pp2) { if (LIST_EMPTY(&pp->consumers)) g_destroy_provider(pp); } LIST_FOREACH_SAFE(cp, &gp->consumer, consumer, cp2) { if (cp->acr || cp->acw || cp->ace) continue; if (cp->provider != NULL) g_detach(cp); g_destroy_consumer(cp); } if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) g_destroy_geom(gp); } } } struct g_consumer * g_new_consumer(struct g_geom *gp) { struct g_consumer *cp; g_topology_assert(); G_VALID_GEOM(gp); KASSERT(!(gp->flags & G_GEOM_WITHER), ("g_new_consumer on WITHERing geom(%s) (class %s)", gp->name, gp->class->name)); KASSERT(gp->orphan != NULL, ("g_new_consumer on geom(%s) (class %s) without orphan", gp->name, gp->class->name)); cp = g_malloc(sizeof *cp, M_WAITOK | M_ZERO); cp->geom = gp; cp->stat = devstat_new_entry(cp, -1, 0, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); LIST_INSERT_HEAD(&gp->consumer, cp, consumer); return(cp); } void g_destroy_consumer(struct g_consumer *cp) { struct g_geom *gp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_destroy_consumer(%p)", cp); KASSERT (cp->provider == NULL, ("g_destroy_consumer but attached")); KASSERT (cp->acr == 0, ("g_destroy_consumer with acr")); KASSERT (cp->acw == 0, ("g_destroy_consumer with acw")); KASSERT (cp->ace == 0, ("g_destroy_consumer with ace")); g_cancel_event(cp); gp = cp->geom; LIST_REMOVE(cp, consumer); devstat_remove_entry(cp->stat); g_free(cp); if (gp->flags & G_GEOM_WITHER) g_do_wither(); } static void g_new_provider_event(void *arg, int flag) { struct g_class *mp; struct g_provider *pp; struct g_consumer *cp, *next_cp; g_topology_assert(); if (flag == EV_CANCEL) return; if (g_shutdown) return; pp = arg; G_VALID_PROVIDER(pp); KASSERT(!(pp->flags & G_PF_WITHER), ("g_new_provider_event but withered")); LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, next_cp) { if ((cp->flags & G_CF_ORPHAN) == 0 && cp->geom->attrchanged != NULL) cp->geom->attrchanged(cp, "GEOM::media"); } if (g_notaste) return; LIST_FOREACH(mp, &g_classes, class) { if (mp->taste == NULL) continue; LIST_FOREACH(cp, &pp->consumers, consumers) if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; if (cp != NULL) continue; mp->taste(mp, pp, 0); g_topology_assert(); } } struct g_provider * g_new_providerf(struct g_geom *gp, const char *fmt, ...) { struct g_provider *pp; struct sbuf *sb; va_list ap; g_topology_assert(); G_VALID_GEOM(gp); KASSERT(gp->access != NULL, ("new provider on geom(%s) without ->access (class %s)", gp->name, gp->class->name)); KASSERT(gp->start != NULL, ("new provider on geom(%s) without ->start (class %s)", gp->name, gp->class->name)); KASSERT(!(gp->flags & G_GEOM_WITHER), ("new provider on WITHERing geom(%s) (class %s)", gp->name, gp->class->name)); sb = sbuf_new_auto(); va_start(ap, fmt); sbuf_vprintf(sb, fmt, ap); va_end(ap); sbuf_finish(sb); pp = g_malloc(sizeof *pp + sbuf_len(sb) + 1, M_WAITOK | M_ZERO); pp->name = (char *)(pp + 1); strcpy(pp->name, sbuf_data(sb)); sbuf_delete(sb); LIST_INIT(&pp->consumers); pp->error = ENXIO; pp->geom = gp; pp->stat = devstat_new_entry(pp, -1, 0, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX); LIST_INSERT_HEAD(&gp->provider, pp, provider); g_post_event(g_new_provider_event, pp, M_WAITOK, pp, gp, NULL); return (pp); } void g_error_provider(struct g_provider *pp, int error) { /* G_VALID_PROVIDER(pp); We may not have g_topology */ pp->error = error; } static void g_resize_provider_event(void *arg, int flag) { struct g_hh00 *hh; struct g_class *mp; struct g_geom *gp; struct g_provider *pp; struct g_consumer *cp, *cp2; off_t size; g_topology_assert(); if (g_shutdown) return; hh = arg; pp = hh->pp; size = hh->size; g_free(hh); G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "g_resize_provider_event(%p)", pp); LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { gp = cp->geom; if (gp->resize == NULL && size < pp->mediasize) { cp->flags |= G_CF_ORPHAN; cp->geom->orphan(cp); } } pp->mediasize = size; LIST_FOREACH_SAFE(cp, &pp->consumers, consumers, cp2) { gp = cp->geom; if (gp->resize != NULL) gp->resize(cp); } /* * After resizing, the previously invalid GEOM class metadata * might become valid. This means we should retaste. */ LIST_FOREACH(mp, &g_classes, class) { if (mp->taste == NULL) continue; LIST_FOREACH(cp, &pp->consumers, consumers) if (cp->geom->class == mp && (cp->flags & G_CF_ORPHAN) == 0) break; if (cp != NULL) continue; mp->taste(mp, pp, 0); g_topology_assert(); } } void g_resize_provider(struct g_provider *pp, off_t size) { struct g_hh00 *hh; G_VALID_PROVIDER(pp); if (size == pp->mediasize) return; hh = g_malloc(sizeof *hh, M_WAITOK | M_ZERO); hh->pp = pp; hh->size = size; g_post_event(g_resize_provider_event, hh, M_WAITOK, NULL); } #ifndef _PATH_DEV #define _PATH_DEV "/dev/" #endif struct g_provider * g_provider_by_name(char const *arg) { struct g_class *cp; struct g_geom *gp; struct g_provider *pp, *wpp; if (strncmp(arg, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) arg += sizeof(_PATH_DEV) - 1; wpp = NULL; LIST_FOREACH(cp, &g_classes, class) { LIST_FOREACH(gp, &cp->geom, geom) { LIST_FOREACH(pp, &gp->provider, provider) { if (strcmp(arg, pp->name) != 0) continue; if ((gp->flags & G_GEOM_WITHER) == 0 && (pp->flags & G_PF_WITHER) == 0) return (pp); else wpp = pp; } } } return (wpp); } void g_destroy_provider(struct g_provider *pp) { struct g_geom *gp; g_topology_assert(); G_VALID_PROVIDER(pp); KASSERT(LIST_EMPTY(&pp->consumers), ("g_destroy_provider but attached")); KASSERT (pp->acr == 0, ("g_destroy_provider with acr")); KASSERT (pp->acw == 0, ("g_destroy_provider with acw")); KASSERT (pp->ace == 0, ("g_destroy_provider with ace")); g_cancel_event(pp); LIST_REMOVE(pp, provider); gp = pp->geom; devstat_remove_entry(pp->stat); /* * If a callback was provided, send notification that the provider * is now gone. */ if (gp->providergone != NULL) gp->providergone(pp); g_free(pp); if ((gp->flags & G_GEOM_WITHER)) g_do_wither(); } /* * We keep the "geoms" list sorted by topological order (== increasing * numerical rank) at all times. * When an attach is done, the attaching geoms rank is invalidated * and it is moved to the tail of the list. * All geoms later in the sequence has their ranks reevaluated in * sequence. If we cannot assign rank to a geom because it's * prerequisites do not have rank, we move that element to the tail * of the sequence with invalid rank as well. * At some point we encounter our original geom and if we stil fail * to assign it a rank, there must be a loop and we fail back to * g_attach() which detach again and calls redo_rank again * to fix up the damage. * It would be much simpler code wise to do it recursively, but we * can't risk that on the kernel stack. */ static int redo_rank(struct g_geom *gp) { struct g_consumer *cp; struct g_geom *gp1, *gp2; int n, m; g_topology_assert(); G_VALID_GEOM(gp); /* Invalidate this geoms rank and move it to the tail */ gp1 = TAILQ_NEXT(gp, geoms); if (gp1 != NULL) { gp->rank = 0; TAILQ_REMOVE(&geoms, gp, geoms); TAILQ_INSERT_TAIL(&geoms, gp, geoms); } else { gp1 = gp; } /* re-rank the rest of the sequence */ for (; gp1 != NULL; gp1 = gp2) { gp1->rank = 0; m = 1; LIST_FOREACH(cp, &gp1->consumer, consumer) { if (cp->provider == NULL) continue; n = cp->provider->geom->rank; if (n == 0) { m = 0; break; } else if (n >= m) m = n + 1; } gp1->rank = m; gp2 = TAILQ_NEXT(gp1, geoms); /* got a rank, moving on */ if (m != 0) continue; /* no rank to original geom means loop */ if (gp == gp1) return (ELOOP); /* no rank, put it at the end move on */ TAILQ_REMOVE(&geoms, gp1, geoms); TAILQ_INSERT_TAIL(&geoms, gp1, geoms); } return (0); } int g_attach(struct g_consumer *cp, struct g_provider *pp) { int error; g_topology_assert(); G_VALID_CONSUMER(cp); G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "g_attach(%p, %p)", cp, pp); KASSERT(cp->provider == NULL, ("attach but attached")); cp->provider = pp; LIST_INSERT_HEAD(&pp->consumers, cp, consumers); error = redo_rank(cp->geom); if (error) { LIST_REMOVE(cp, consumers); cp->provider = NULL; redo_rank(cp->geom); } return (error); } void g_detach(struct g_consumer *cp) { struct g_provider *pp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_detach(%p)", cp); KASSERT(cp->provider != NULL, ("detach but not attached")); KASSERT(cp->acr == 0, ("detach but nonzero acr")); KASSERT(cp->acw == 0, ("detach but nonzero acw")); KASSERT(cp->ace == 0, ("detach but nonzero ace")); KASSERT(cp->nstart == cp->nend, ("detach with active requests")); pp = cp->provider; LIST_REMOVE(cp, consumers); cp->provider = NULL; if ((cp->geom->flags & G_GEOM_WITHER) || (pp->geom->flags & G_GEOM_WITHER) || (pp->flags & G_PF_WITHER)) g_do_wither(); redo_rank(cp->geom); } /* * g_access() * * Access-check with delta values. The question asked is "can provider * "cp" change the access counters by the relative amounts dc[rwe] ?" */ int g_access(struct g_consumer *cp, int dcr, int dcw, int dce) { struct g_provider *pp; int pr,pw,pe; int error; g_topology_assert(); G_VALID_CONSUMER(cp); pp = cp->provider; KASSERT(pp != NULL, ("access but not attached")); G_VALID_PROVIDER(pp); g_trace(G_T_ACCESS, "g_access(%p(%s), %d, %d, %d)", cp, pp->name, dcr, dcw, dce); KASSERT(cp->acr + dcr >= 0, ("access resulting in negative acr")); KASSERT(cp->acw + dcw >= 0, ("access resulting in negative acw")); KASSERT(cp->ace + dce >= 0, ("access resulting in negative ace")); KASSERT(dcr != 0 || dcw != 0 || dce != 0, ("NOP access request")); KASSERT(pp->geom->access != NULL, ("NULL geom->access")); /* * If our class cares about being spoiled, and we have been, we * are probably just ahead of the event telling us that. Fail * now rather than having to unravel this later. */ if (cp->geom->spoiled != NULL && (cp->flags & G_CF_SPOILED) && (dcr > 0 || dcw > 0 || dce > 0)) return (ENXIO); /* * Figure out what counts the provider would have had, if this * consumer had (r0w0e0) at this time. */ pr = pp->acr - cp->acr; pw = pp->acw - cp->acw; pe = pp->ace - cp->ace; g_trace(G_T_ACCESS, "open delta:[r%dw%de%d] old:[r%dw%de%d] provider:[r%dw%de%d] %p(%s)", dcr, dcw, dce, cp->acr, cp->acw, cp->ace, pp->acr, pp->acw, pp->ace, pp, pp->name); /* If foot-shooting is enabled, any open on rank#1 is OK */ if ((g_debugflags & 16) && pp->geom->rank == 1) ; /* If we try exclusive but already write: fail */ else if (dce > 0 && pw > 0) return (EPERM); /* If we try write but already exclusive: fail */ else if (dcw > 0 && pe > 0) return (EPERM); /* If we try to open more but provider is error'ed: fail */ else if ((dcr > 0 || dcw > 0 || dce > 0) && pp->error != 0) return (pp->error); /* Ok then... */ error = pp->geom->access(pp, dcr, dcw, dce); KASSERT(dcr > 0 || dcw > 0 || dce > 0 || error == 0, ("Geom provider %s::%s dcr=%d dcw=%d dce=%d error=%d failed " "closing ->access()", pp->geom->class->name, pp->name, dcr, dcw, dce, error)); if (!error) { /* * If we open first write, spoil any partner consumers. * If we close last write and provider is not errored, * trigger re-taste. */ if (pp->acw == 0 && dcw != 0) g_spoil(pp, cp); else if (pp->acw != 0 && pp->acw == -dcw && pp->error == 0 && !(pp->geom->flags & G_GEOM_WITHER)) g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); pp->acr += dcr; pp->acw += dcw; pp->ace += dce; cp->acr += dcr; cp->acw += dcw; cp->ace += dce; if (pp->acr != 0 || pp->acw != 0 || pp->ace != 0) KASSERT(pp->sectorsize > 0, ("Provider %s lacks sectorsize", pp->name)); if ((cp->geom->flags & G_GEOM_WITHER) && cp->acr == 0 && cp->acw == 0 && cp->ace == 0) g_do_wither(); } return (error); } int g_handleattr_int(struct bio *bp, const char *attribute, int val) { return (g_handleattr(bp, attribute, &val, sizeof val)); } int g_handleattr_uint16_t(struct bio *bp, const char *attribute, uint16_t val) { return (g_handleattr(bp, attribute, &val, sizeof val)); } int g_handleattr_off_t(struct bio *bp, const char *attribute, off_t val) { return (g_handleattr(bp, attribute, &val, sizeof val)); } int g_handleattr_str(struct bio *bp, const char *attribute, const char *str) { return (g_handleattr(bp, attribute, str, 0)); } int g_handleattr(struct bio *bp, const char *attribute, const void *val, int len) { int error = 0; if (strcmp(bp->bio_attribute, attribute)) return (0); if (len == 0) { bzero(bp->bio_data, bp->bio_length); if (strlcpy(bp->bio_data, val, bp->bio_length) >= bp->bio_length) { printf("%s: %s bio_length %jd len %zu -> EFAULT\n", __func__, bp->bio_to->name, (intmax_t)bp->bio_length, strlen(val)); error = EFAULT; } } else if (bp->bio_length == len) { bcopy(val, bp->bio_data, len); } else { printf("%s: %s bio_length %jd len %d -> EFAULT\n", __func__, bp->bio_to->name, (intmax_t)bp->bio_length, len); error = EFAULT; } if (error == 0) bp->bio_completed = bp->bio_length; g_io_deliver(bp, error); return (1); } int g_std_access(struct g_provider *pp, int dr __unused, int dw __unused, int de __unused) { g_topology_assert(); G_VALID_PROVIDER(pp); return (0); } void g_std_done(struct bio *bp) { struct bio *bp2; bp2 = bp->bio_parent; if (bp2->bio_error == 0) bp2->bio_error = bp->bio_error; bp2->bio_completed += bp->bio_completed; g_destroy_bio(bp); bp2->bio_inbed++; if (bp2->bio_children == bp2->bio_inbed) g_io_deliver(bp2, bp2->bio_error); } /* XXX: maybe this is only g_slice_spoiled */ void g_std_spoiled(struct g_consumer *cp) { struct g_geom *gp; struct g_provider *pp; g_topology_assert(); G_VALID_CONSUMER(cp); g_trace(G_T_TOPOLOGY, "g_std_spoiled(%p)", cp); cp->flags |= G_CF_ORPHAN; g_detach(cp); gp = cp->geom; LIST_FOREACH(pp, &gp->provider, provider) g_orphan_provider(pp, ENXIO); g_destroy_consumer(cp); if (LIST_EMPTY(&gp->provider) && LIST_EMPTY(&gp->consumer)) g_destroy_geom(gp); else gp->flags |= G_GEOM_WITHER; } /* * Spoiling happens when a provider is opened for writing, but consumers * which are configured by in-band data are attached (slicers for instance). * Since the write might potentially change the in-band data, such consumers * need to re-evaluate their existence after the writing session closes. * We do this by (offering to) tear them down when the open for write happens * in return for a re-taste when it closes again. * Together with the fact that such consumers grab an 'e' bit whenever they * are open, regardless of mode, this ends up DTRT. */ static void g_spoil_event(void *arg, int flag) { struct g_provider *pp; struct g_consumer *cp, *cp2; g_topology_assert(); if (flag == EV_CANCEL) return; pp = arg; G_VALID_PROVIDER(pp); g_trace(G_T_TOPOLOGY, "%s %p(%s:%s:%s)", __func__, pp, pp->geom->class->name, pp->geom->name, pp->name); for (cp = LIST_FIRST(&pp->consumers); cp != NULL; cp = cp2) { cp2 = LIST_NEXT(cp, consumers); if ((cp->flags & G_CF_SPOILED) == 0) continue; cp->flags &= ~G_CF_SPOILED; if (cp->geom->spoiled == NULL) continue; cp->geom->spoiled(cp); g_topology_assert(); } } void g_spoil(struct g_provider *pp, struct g_consumer *cp) { struct g_consumer *cp2; g_topology_assert(); G_VALID_PROVIDER(pp); G_VALID_CONSUMER(cp); LIST_FOREACH(cp2, &pp->consumers, consumers) { if (cp2 == cp) continue; /* KASSERT(cp2->acr == 0, ("spoiling cp->acr = %d", cp2->acr)); KASSERT(cp2->acw == 0, ("spoiling cp->acw = %d", cp2->acw)); */ KASSERT(cp2->ace == 0, ("spoiling cp->ace = %d", cp2->ace)); cp2->flags |= G_CF_SPOILED; } g_post_event(g_spoil_event, pp, M_WAITOK, pp, NULL); } static void g_media_changed_event(void *arg, int flag) { struct g_provider *pp; int retaste; g_topology_assert(); if (flag == EV_CANCEL) return; pp = arg; G_VALID_PROVIDER(pp); /* * If provider was not open for writing, queue retaste after spoiling. * If it was, retaste will happen automatically on close. */ retaste = (pp->acw == 0 && pp->error == 0 && !(pp->geom->flags & G_GEOM_WITHER)); g_spoil_event(arg, flag); if (retaste) g_post_event(g_new_provider_event, pp, M_WAITOK, pp, NULL); } int g_media_changed(struct g_provider *pp, int flag) { struct g_consumer *cp; LIST_FOREACH(cp, &pp->consumers, consumers) cp->flags |= G_CF_SPOILED; return (g_post_event(g_media_changed_event, pp, flag, pp, NULL)); } int g_media_gone(struct g_provider *pp, int flag) { struct g_consumer *cp; LIST_FOREACH(cp, &pp->consumers, consumers) cp->flags |= G_CF_SPOILED; return (g_post_event(g_spoil_event, pp, flag, pp, NULL)); } int g_getattr__(const char *attr, struct g_consumer *cp, void *var, int len) { int error, i; i = len; error = g_io_getattr(attr, cp, &i, var); if (error) return (error); if (i != len) return (EINVAL); return (0); } static int g_get_device_prefix_len(const char *name) { int len; if (strncmp(name, "ada", 3) == 0) len = 3; else if (strncmp(name, "ad", 2) == 0) len = 2; else return (0); if (name[len] < '0' || name[len] > '9') return (0); do { len++; } while (name[len] >= '0' && name[len] <= '9'); return (len); } int g_compare_names(const char *namea, const char *nameb) { int deva, devb; if (strcmp(namea, nameb) == 0) return (1); deva = g_get_device_prefix_len(namea); if (deva == 0) return (0); devb = g_get_device_prefix_len(nameb); if (devb == 0) return (0); if (strcmp(namea + deva, nameb + devb) == 0) return (1); return (0); } #if defined(DIAGNOSTIC) || defined(DDB) /* * This function walks the mesh and returns a non-zero integer if it * finds the argument pointer is an object. The return value indicates * which type of object it is believed to be. If topology is not locked, * this function is potentially dangerous, but we don't assert that the * topology lock is held when called from debugger. */ int g_valid_obj(void const *ptr) { struct g_class *mp; struct g_geom *gp; struct g_consumer *cp; struct g_provider *pp; #ifdef KDB if (kdb_active == 0) #endif g_topology_assert(); LIST_FOREACH(mp, &g_classes, class) { if (ptr == mp) return (1); LIST_FOREACH(gp, &mp->geom, geom) { if (ptr == gp) return (2); LIST_FOREACH(cp, &gp->consumer, consumer) if (ptr == cp) return (3); LIST_FOREACH(pp, &gp->provider, provider) if (ptr == pp) return (4); } } return(0); } #endif #ifdef DDB #define gprintf(...) do { \ db_printf("%*s", indent, ""); \ db_printf(__VA_ARGS__); \ } while (0) #define gprintln(...) do { \ gprintf(__VA_ARGS__); \ db_printf("\n"); \ } while (0) #define ADDFLAG(obj, flag, sflag) do { \ if ((obj)->flags & (flag)) { \ if (comma) \ strlcat(str, ",", size); \ strlcat(str, (sflag), size); \ comma = 1; \ } \ } while (0) static char * provider_flags_to_string(struct g_provider *pp, char *str, size_t size) { int comma = 0; bzero(str, size); if (pp->flags == 0) { strlcpy(str, "NONE", size); return (str); } ADDFLAG(pp, G_PF_WITHER, "G_PF_WITHER"); ADDFLAG(pp, G_PF_ORPHAN, "G_PF_ORPHAN"); return (str); } static char * geom_flags_to_string(struct g_geom *gp, char *str, size_t size) { int comma = 0; bzero(str, size); if (gp->flags == 0) { strlcpy(str, "NONE", size); return (str); } ADDFLAG(gp, G_GEOM_WITHER, "G_GEOM_WITHER"); return (str); } static void db_show_geom_consumer(int indent, struct g_consumer *cp) { if (indent == 0) { gprintln("consumer: %p", cp); gprintln(" class: %s (%p)", cp->geom->class->name, cp->geom->class); gprintln(" geom: %s (%p)", cp->geom->name, cp->geom); if (cp->provider == NULL) gprintln(" provider: none"); else { gprintln(" provider: %s (%p)", cp->provider->name, cp->provider); } gprintln(" access: r%dw%de%d", cp->acr, cp->acw, cp->ace); gprintln(" flags: 0x%04x", cp->flags); gprintln(" nstart: %u", cp->nstart); gprintln(" nend: %u", cp->nend); } else { gprintf("consumer: %p (%s), access=r%dw%de%d", cp, cp->provider != NULL ? cp->provider->name : "none", cp->acr, cp->acw, cp->ace); if (cp->flags) db_printf(", flags=0x%04x", cp->flags); db_printf("\n"); } } static void db_show_geom_provider(int indent, struct g_provider *pp) { struct g_consumer *cp; char flags[64]; if (indent == 0) { gprintln("provider: %s (%p)", pp->name, pp); gprintln(" class: %s (%p)", pp->geom->class->name, pp->geom->class); gprintln(" geom: %s (%p)", pp->geom->name, pp->geom); gprintln(" mediasize: %jd", (intmax_t)pp->mediasize); gprintln(" sectorsize: %u", pp->sectorsize); gprintln(" stripesize: %u", pp->stripesize); gprintln(" stripeoffset: %u", pp->stripeoffset); gprintln(" access: r%dw%de%d", pp->acr, pp->acw, pp->ace); gprintln(" flags: %s (0x%04x)", provider_flags_to_string(pp, flags, sizeof(flags)), pp->flags); gprintln(" error: %d", pp->error); gprintln(" nstart: %u", pp->nstart); gprintln(" nend: %u", pp->nend); if (LIST_EMPTY(&pp->consumers)) gprintln(" consumers: none"); } else { gprintf("provider: %s (%p), access=r%dw%de%d", pp->name, pp, pp->acr, pp->acw, pp->ace); if (pp->flags != 0) { db_printf(", flags=%s (0x%04x)", provider_flags_to_string(pp, flags, sizeof(flags)), pp->flags); } db_printf("\n"); } if (!LIST_EMPTY(&pp->consumers)) { LIST_FOREACH(cp, &pp->consumers, consumers) { db_show_geom_consumer(indent + 2, cp); if (db_pager_quit) break; } } } static void db_show_geom_geom(int indent, struct g_geom *gp) { struct g_provider *pp; struct g_consumer *cp; char flags[64]; if (indent == 0) { gprintln("geom: %s (%p)", gp->name, gp); gprintln(" class: %s (%p)", gp->class->name, gp->class); gprintln(" flags: %s (0x%04x)", geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); gprintln(" rank: %d", gp->rank); if (LIST_EMPTY(&gp->provider)) gprintln(" providers: none"); if (LIST_EMPTY(&gp->consumer)) gprintln(" consumers: none"); } else { gprintf("geom: %s (%p), rank=%d", gp->name, gp, gp->rank); if (gp->flags != 0) { db_printf(", flags=%s (0x%04x)", geom_flags_to_string(gp, flags, sizeof(flags)), gp->flags); } db_printf("\n"); } if (!LIST_EMPTY(&gp->provider)) { LIST_FOREACH(pp, &gp->provider, provider) { db_show_geom_provider(indent + 2, pp); if (db_pager_quit) break; } } if (!LIST_EMPTY(&gp->consumer)) { LIST_FOREACH(cp, &gp->consumer, consumer) { db_show_geom_consumer(indent + 2, cp); if (db_pager_quit) break; } } } static void db_show_geom_class(struct g_class *mp) { struct g_geom *gp; db_printf("class: %s (%p)\n", mp->name, mp); LIST_FOREACH(gp, &mp->geom, geom) { db_show_geom_geom(2, gp); if (db_pager_quit) break; } } /* * Print the GEOM topology or the given object. */ DB_SHOW_COMMAND(geom, db_show_geom) { struct g_class *mp; if (!have_addr) { /* No address given, print the entire topology. */ LIST_FOREACH(mp, &g_classes, class) { db_show_geom_class(mp); db_printf("\n"); if (db_pager_quit) break; } } else { switch (g_valid_obj((void *)addr)) { case 1: db_show_geom_class((struct g_class *)addr); break; case 2: db_show_geom_geom(0, (struct g_geom *)addr); break; case 3: db_show_geom_consumer(0, (struct g_consumer *)addr); break; case 4: db_show_geom_provider(0, (struct g_provider *)addr); break; default: db_printf("Not a GEOM object.\n"); break; } } } static void db_print_bio_cmd(struct bio *bp) { db_printf(" cmd: "); switch (bp->bio_cmd) { case BIO_READ: db_printf("BIO_READ"); break; case BIO_WRITE: db_printf("BIO_WRITE"); break; case BIO_DELETE: db_printf("BIO_DELETE"); break; case BIO_GETATTR: db_printf("BIO_GETATTR"); break; case BIO_FLUSH: db_printf("BIO_FLUSH"); break; case BIO_CMD0: db_printf("BIO_CMD0"); break; case BIO_CMD1: db_printf("BIO_CMD1"); break; case BIO_CMD2: db_printf("BIO_CMD2"); break; + case BIO_ZONE: db_printf("BIO_ZONE"); break; default: db_printf("UNKNOWN"); break; } db_printf("\n"); } static void db_print_bio_flags(struct bio *bp) { int comma; comma = 0; db_printf(" flags: "); if (bp->bio_flags & BIO_ERROR) { db_printf("BIO_ERROR"); comma = 1; } if (bp->bio_flags & BIO_DONE) { db_printf("%sBIO_DONE", (comma ? ", " : "")); comma = 1; } if (bp->bio_flags & BIO_ONQUEUE) db_printf("%sBIO_ONQUEUE", (comma ? ", " : "")); db_printf("\n"); } /* * Print useful information in a BIO */ DB_SHOW_COMMAND(bio, db_show_bio) { struct bio *bp; if (have_addr) { bp = (struct bio *)addr; db_printf("BIO %p\n", bp); db_print_bio_cmd(bp); db_print_bio_flags(bp); db_printf(" cflags: 0x%hx\n", bp->bio_cflags); db_printf(" pflags: 0x%hx\n", bp->bio_pflags); db_printf(" offset: %jd\n", (intmax_t)bp->bio_offset); db_printf(" length: %jd\n", (intmax_t)bp->bio_length); db_printf(" bcount: %ld\n", bp->bio_bcount); db_printf(" resid: %ld\n", bp->bio_resid); db_printf(" completed: %jd\n", (intmax_t)bp->bio_completed); db_printf(" children: %u\n", bp->bio_children); db_printf(" inbed: %u\n", bp->bio_inbed); db_printf(" error: %d\n", bp->bio_error); db_printf(" parent: %p\n", bp->bio_parent); db_printf(" driver1: %p\n", bp->bio_driver1); db_printf(" driver2: %p\n", bp->bio_driver2); db_printf(" caller1: %p\n", bp->bio_caller1); db_printf(" caller2: %p\n", bp->bio_caller2); db_printf(" bio_from: %p\n", bp->bio_from); db_printf(" bio_to: %p\n", bp->bio_to); } } #undef gprintf #undef gprintln #undef ADDFLAG #endif /* DDB */ Index: head/sys/kern/subr_devstat.c =================================================================== --- head/sys/kern/subr_devstat.c (revision 300206) +++ head/sys/kern/subr_devstat.c (revision 300207) @@ -1,578 +1,580 @@ /*- * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include SDT_PROVIDER_DEFINE(io); SDT_PROBE_DEFINE2(io, , , start, "struct bio *", "struct devstat *"); SDT_PROBE_DEFINE2(io, , , done, "struct bio *", "struct devstat *"); SDT_PROBE_DEFINE2(io, , , wait__start, "struct bio *", "struct devstat *"); SDT_PROBE_DEFINE2(io, , , wait__done, "struct bio *", "struct devstat *"); #define DTRACE_DEVSTAT_START() SDT_PROBE2(io, , , start, NULL, ds) #define DTRACE_DEVSTAT_BIO_START() SDT_PROBE2(io, , , start, bp, ds) #define DTRACE_DEVSTAT_DONE() SDT_PROBE2(io, , , done, NULL, ds) #define DTRACE_DEVSTAT_BIO_DONE() SDT_PROBE2(io, , , done, bp, ds) #define DTRACE_DEVSTAT_WAIT_START() SDT_PROBE2(io, , , wait__start, NULL, ds) #define DTRACE_DEVSTAT_WAIT_DONE() SDT_PROBE2(io, , , wait__done, NULL, ds) static int devstat_num_devs; static long devstat_generation = 1; static int devstat_version = DEVSTAT_VERSION; static int devstat_current_devnumber; static struct mtx devstat_mutex; MTX_SYSINIT(devstat_mutex, &devstat_mutex, "devstat", MTX_DEF); static struct devstatlist device_statq = STAILQ_HEAD_INITIALIZER(device_statq); static struct devstat *devstat_alloc(void); static void devstat_free(struct devstat *); static void devstat_add_entry(struct devstat *ds, const void *dev_name, int unit_number, uint32_t block_size, devstat_support_flags flags, devstat_type_flags device_type, devstat_priority priority); /* * Allocate a devstat and initialize it */ struct devstat * devstat_new_entry(const void *dev_name, int unit_number, uint32_t block_size, devstat_support_flags flags, devstat_type_flags device_type, devstat_priority priority) { struct devstat *ds; mtx_assert(&devstat_mutex, MA_NOTOWNED); ds = devstat_alloc(); mtx_lock(&devstat_mutex); if (unit_number == -1) { ds->unit_number = unit_number; ds->id = dev_name; binuptime(&ds->creation_time); devstat_generation++; } else { devstat_add_entry(ds, dev_name, unit_number, block_size, flags, device_type, priority); } mtx_unlock(&devstat_mutex); return (ds); } /* * Take a malloced and zeroed devstat structure given to us, fill it in * and add it to the queue of devices. */ static void devstat_add_entry(struct devstat *ds, const void *dev_name, int unit_number, uint32_t block_size, devstat_support_flags flags, devstat_type_flags device_type, devstat_priority priority) { struct devstatlist *devstat_head; struct devstat *ds_tmp; mtx_assert(&devstat_mutex, MA_OWNED); devstat_num_devs++; devstat_head = &device_statq; /* * Priority sort. Each driver passes in its priority when it adds * its devstat entry. Drivers are sorted first by priority, and * then by probe order. * * For the first device, we just insert it, since the priority * doesn't really matter yet. Subsequent devices are inserted into * the list using the order outlined above. */ if (devstat_num_devs == 1) STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); else { STAILQ_FOREACH(ds_tmp, devstat_head, dev_links) { struct devstat *ds_next; ds_next = STAILQ_NEXT(ds_tmp, dev_links); /* * If we find a break between higher and lower * priority items, and if this item fits in the * break, insert it. This also applies if the * "lower priority item" is the end of the list. */ if ((priority <= ds_tmp->priority) && ((ds_next == NULL) || (priority > ds_next->priority))) { STAILQ_INSERT_AFTER(devstat_head, ds_tmp, ds, dev_links); break; } else if (priority > ds_tmp->priority) { /* * If this is the case, we should be able * to insert ourselves at the head of the * list. If we can't, something is wrong. */ if (ds_tmp == STAILQ_FIRST(devstat_head)) { STAILQ_INSERT_HEAD(devstat_head, ds, dev_links); break; } else { STAILQ_INSERT_TAIL(devstat_head, ds, dev_links); printf("devstat_add_entry: HELP! " "sorting problem detected " "for name %p unit %d\n", dev_name, unit_number); break; } } } } ds->device_number = devstat_current_devnumber++; ds->unit_number = unit_number; strlcpy(ds->device_name, dev_name, DEVSTAT_NAME_LEN); ds->block_size = block_size; ds->flags = flags; ds->device_type = device_type; ds->priority = priority; binuptime(&ds->creation_time); devstat_generation++; } /* * Remove a devstat structure from the list of devices. */ void devstat_remove_entry(struct devstat *ds) { struct devstatlist *devstat_head; mtx_assert(&devstat_mutex, MA_NOTOWNED); if (ds == NULL) return; mtx_lock(&devstat_mutex); devstat_head = &device_statq; /* Remove this entry from the devstat queue */ atomic_add_acq_int(&ds->sequence1, 1); if (ds->unit_number != -1) { devstat_num_devs--; STAILQ_REMOVE(devstat_head, ds, devstat, dev_links); } devstat_free(ds); devstat_generation++; mtx_unlock(&devstat_mutex); } /* * Record a transaction start. * * See comments for devstat_end_transaction(). Ordering is very important * here. */ void devstat_start_transaction(struct devstat *ds, struct bintime *now) { mtx_assert(&devstat_mutex, MA_NOTOWNED); /* sanity check */ if (ds == NULL) return; atomic_add_acq_int(&ds->sequence1, 1); /* * We only want to set the start time when we are going from idle * to busy. The start time is really the start of the latest busy * period. */ if (ds->start_count == ds->end_count) { if (now != NULL) ds->busy_from = *now; else binuptime(&ds->busy_from); } ds->start_count++; atomic_add_rel_int(&ds->sequence0, 1); DTRACE_DEVSTAT_START(); } void devstat_start_transaction_bio(struct devstat *ds, struct bio *bp) { mtx_assert(&devstat_mutex, MA_NOTOWNED); /* sanity check */ if (ds == NULL) return; binuptime(&bp->bio_t0); devstat_start_transaction(ds, &bp->bio_t0); DTRACE_DEVSTAT_BIO_START(); } /* * Record the ending of a transaction, and incrment the various counters. * * Ordering in this function, and in devstat_start_transaction() is VERY * important. The idea here is to run without locks, so we are very * careful to only modify some fields on the way "down" (i.e. at * transaction start) and some fields on the way "up" (i.e. at transaction * completion). One exception is busy_from, which we only modify in * devstat_start_transaction() when there are no outstanding transactions, * and thus it can't be modified in devstat_end_transaction() * simultaneously. * * The sequence0 and sequence1 fields are provided to enable an application * spying on the structures with mmap(2) to tell when a structure is in a * consistent state or not. * * For this to work 100% reliably, it is important that the two fields * are at opposite ends of the structure and that they are incremented * in the opposite order of how a memcpy(3) in userland would copy them. * We assume that the copying happens front to back, but there is actually * no way short of writing your own memcpy(3) replacement to guarantee * this will be the case. * * In addition to this, being a kind of locks, they must be updated with * atomic instructions using appropriate memory barriers. */ void devstat_end_transaction(struct devstat *ds, uint32_t bytes, devstat_tag_type tag_type, devstat_trans_flags flags, struct bintime *now, struct bintime *then) { struct bintime dt, lnow; /* sanity check */ if (ds == NULL) return; if (now == NULL) { now = &lnow; binuptime(now); } atomic_add_acq_int(&ds->sequence1, 1); /* Update byte and operations counts */ ds->bytes[flags] += bytes; ds->operations[flags]++; /* * Keep a count of the various tag types sent. */ if ((ds->flags & DEVSTAT_NO_ORDERED_TAGS) == 0 && tag_type != DEVSTAT_TAG_NONE) ds->tag_types[tag_type]++; if (then != NULL) { /* Update duration of operations */ dt = *now; bintime_sub(&dt, then); bintime_add(&ds->duration[flags], &dt); } /* Accumulate busy time */ dt = *now; bintime_sub(&dt, &ds->busy_from); bintime_add(&ds->busy_time, &dt); ds->busy_from = *now; ds->end_count++; atomic_add_rel_int(&ds->sequence0, 1); DTRACE_DEVSTAT_DONE(); } void devstat_end_transaction_bio(struct devstat *ds, struct bio *bp) { devstat_end_transaction_bio_bt(ds, bp, NULL); } void devstat_end_transaction_bio_bt(struct devstat *ds, struct bio *bp, struct bintime *now) { devstat_trans_flags flg; /* sanity check */ if (ds == NULL) return; if (bp->bio_cmd == BIO_DELETE) flg = DEVSTAT_FREE; - else if (bp->bio_cmd == BIO_READ) + else if ((bp->bio_cmd == BIO_READ) + || ((bp->bio_cmd == BIO_ZONE) + && (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES))) flg = DEVSTAT_READ; else if (bp->bio_cmd == BIO_WRITE) flg = DEVSTAT_WRITE; else flg = DEVSTAT_NO_DATA; devstat_end_transaction(ds, bp->bio_bcount - bp->bio_resid, DEVSTAT_TAG_SIMPLE, flg, now, &bp->bio_t0); DTRACE_DEVSTAT_BIO_DONE(); } /* * This is the sysctl handler for the devstat package. The data pushed out * on the kern.devstat.all sysctl variable consists of the current devstat * generation number, and then an array of devstat structures, one for each * device in the system. * * This is more cryptic that obvious, but basically we neither can nor * want to hold the devstat_mutex for any amount of time, so we grab it * only when we need to and keep an eye on devstat_generation all the time. */ static int sysctl_devstat(SYSCTL_HANDLER_ARGS) { int error; long mygen; struct devstat *nds; mtx_assert(&devstat_mutex, MA_NOTOWNED); /* * XXX devstat_generation should really be "volatile" but that * XXX freaks out the sysctl macro below. The places where we * XXX change it and inspect it are bracketed in the mutex which * XXX guarantees us proper write barriers. I don't believe the * XXX compiler is allowed to optimize mygen away across calls * XXX to other functions, so the following is belived to be safe. */ mygen = devstat_generation; error = SYSCTL_OUT(req, &mygen, sizeof(mygen)); if (devstat_num_devs == 0) return(0); if (error != 0) return (error); mtx_lock(&devstat_mutex); nds = STAILQ_FIRST(&device_statq); if (mygen != devstat_generation) error = EBUSY; mtx_unlock(&devstat_mutex); if (error != 0) return (error); for (;nds != NULL;) { error = SYSCTL_OUT(req, nds, sizeof(struct devstat)); if (error != 0) return (error); mtx_lock(&devstat_mutex); if (mygen != devstat_generation) error = EBUSY; else nds = STAILQ_NEXT(nds, dev_links); mtx_unlock(&devstat_mutex); if (error != 0) return (error); } return(error); } /* * Sysctl entries for devstat. The first one is a node that all the rest * hang off of. */ static SYSCTL_NODE(_kern, OID_AUTO, devstat, CTLFLAG_RD, NULL, "Device Statistics"); SYSCTL_PROC(_kern_devstat, OID_AUTO, all, CTLFLAG_RD|CTLTYPE_OPAQUE, NULL, 0, sysctl_devstat, "S,devstat", "All devices in the devstat list"); /* * Export the number of devices in the system so that userland utilities * can determine how much memory to allocate to hold all the devices. */ SYSCTL_INT(_kern_devstat, OID_AUTO, numdevs, CTLFLAG_RD, &devstat_num_devs, 0, "Number of devices in the devstat list"); SYSCTL_LONG(_kern_devstat, OID_AUTO, generation, CTLFLAG_RD, &devstat_generation, 0, "Devstat list generation"); SYSCTL_INT(_kern_devstat, OID_AUTO, version, CTLFLAG_RD, &devstat_version, 0, "Devstat list version number"); /* * Allocator for struct devstat structures. We sub-allocate these from pages * which we get from malloc. These pages are exported for mmap(2)'ing through * a miniature device driver */ #define statsperpage (PAGE_SIZE / sizeof(struct devstat)) static d_mmap_t devstat_mmap; static struct cdevsw devstat_cdevsw = { .d_version = D_VERSION, .d_mmap = devstat_mmap, .d_name = "devstat", }; struct statspage { TAILQ_ENTRY(statspage) list; struct devstat *stat; u_int nfree; }; static TAILQ_HEAD(, statspage) pagelist = TAILQ_HEAD_INITIALIZER(pagelist); static MALLOC_DEFINE(M_DEVSTAT, "devstat", "Device statistics"); static int devstat_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct statspage *spp; if (nprot != VM_PROT_READ) return (-1); mtx_lock(&devstat_mutex); TAILQ_FOREACH(spp, &pagelist, list) { if (offset == 0) { *paddr = vtophys(spp->stat); mtx_unlock(&devstat_mutex); return (0); } offset -= PAGE_SIZE; } mtx_unlock(&devstat_mutex); return (-1); } static struct devstat * devstat_alloc(void) { struct devstat *dsp; struct statspage *spp, *spp2; u_int u; static int once; mtx_assert(&devstat_mutex, MA_NOTOWNED); if (!once) { make_dev_credf(MAKEDEV_ETERNAL | MAKEDEV_CHECKNAME, &devstat_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0444, DEVSTAT_DEVICE_NAME); once = 1; } spp2 = NULL; mtx_lock(&devstat_mutex); for (;;) { TAILQ_FOREACH(spp, &pagelist, list) { if (spp->nfree > 0) break; } if (spp != NULL) break; mtx_unlock(&devstat_mutex); spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); spp2->nfree = statsperpage; /* * If free statspages were added while the lock was released * just reuse them. */ mtx_lock(&devstat_mutex); TAILQ_FOREACH(spp, &pagelist, list) if (spp->nfree > 0) break; if (spp == NULL) { spp = spp2; /* * It would make more sense to add the new page at the * head but the order on the list determine the * sequence of the mapping so we can't do that. */ TAILQ_INSERT_TAIL(&pagelist, spp, list); } else break; } dsp = spp->stat; for (u = 0; u < statsperpage; u++) { if (dsp->allocated == 0) break; dsp++; } spp->nfree--; dsp->allocated = 1; mtx_unlock(&devstat_mutex); if (spp2 != NULL && spp2 != spp) { free(spp2->stat, M_DEVSTAT); free(spp2, M_DEVSTAT); } return (dsp); } static void devstat_free(struct devstat *dsp) { struct statspage *spp; mtx_assert(&devstat_mutex, MA_OWNED); bzero(dsp, sizeof *dsp); TAILQ_FOREACH(spp, &pagelist, list) { if (dsp >= spp->stat && dsp < (spp->stat + statsperpage)) { spp->nfree++; return; } } } SYSCTL_INT(_debug_sizeof, OID_AUTO, devstat, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, sizeof(struct devstat), "sizeof(struct devstat)"); Index: head/sys/sys/ata.h =================================================================== --- head/sys/sys/ata.h (revision 300206) +++ head/sys/sys/ata.h (revision 300207) @@ -1,645 +1,1015 @@ /*- * Copyright (c) 2000 - 2008 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_ATA_H_ #define _SYS_ATA_H_ #include /* ATA/ATAPI device parameters */ struct ata_params { /*000*/ u_int16_t config; /* configuration info */ #define ATA_PROTO_MASK 0x8003 #define ATA_PROTO_ATAPI 0x8000 #define ATA_PROTO_ATAPI_12 0x8000 #define ATA_PROTO_ATAPI_16 0x8001 #define ATA_PROTO_CFA 0x848a #define ATA_ATAPI_TYPE_MASK 0x1f00 #define ATA_ATAPI_TYPE_DIRECT 0x0000 /* disk/floppy */ #define ATA_ATAPI_TYPE_TAPE 0x0100 /* streaming tape */ #define ATA_ATAPI_TYPE_CDROM 0x0500 /* CD-ROM device */ #define ATA_ATAPI_TYPE_OPTICAL 0x0700 /* optical disk */ #define ATA_DRQ_MASK 0x0060 #define ATA_DRQ_SLOW 0x0000 /* cpu 3 ms delay */ #define ATA_DRQ_INTR 0x0020 /* interrupt 10 ms delay */ #define ATA_DRQ_FAST 0x0040 /* accel 50 us delay */ #define ATA_RESP_INCOMPLETE 0x0004 /*001*/ u_int16_t cylinders; /* # of cylinders */ /*002*/ u_int16_t specconf; /* specific configuration */ /*003*/ u_int16_t heads; /* # heads */ u_int16_t obsolete4; u_int16_t obsolete5; /*006*/ u_int16_t sectors; /* # sectors/track */ /*007*/ u_int16_t vendor7[3]; /*010*/ u_int8_t serial[20]; /* serial number */ /*020*/ u_int16_t retired20; u_int16_t retired21; u_int16_t obsolete22; /*023*/ u_int8_t revision[8]; /* firmware revision */ /*027*/ u_int8_t model[40]; /* model name */ /*047*/ u_int16_t sectors_intr; /* sectors per interrupt */ /*048*/ u_int16_t usedmovsd; /* double word read/write? */ /*049*/ u_int16_t capabilities1; #define ATA_SUPPORT_DMA 0x0100 #define ATA_SUPPORT_LBA 0x0200 #define ATA_SUPPORT_IORDY 0x0400 #define ATA_SUPPORT_IORDYDIS 0x0800 #define ATA_SUPPORT_OVERLAP 0x4000 /*050*/ u_int16_t capabilities2; /*051*/ u_int16_t retired_piomode; /* PIO modes 0-2 */ #define ATA_RETIRED_PIO_MASK 0x0300 /*052*/ u_int16_t retired_dmamode; /* DMA modes */ #define ATA_RETIRED_DMA_MASK 0x0003 /*053*/ u_int16_t atavalid; /* fields valid */ #define ATA_FLAG_54_58 0x0001 /* words 54-58 valid */ #define ATA_FLAG_64_70 0x0002 /* words 64-70 valid */ #define ATA_FLAG_88 0x0004 /* word 88 valid */ /*054*/ u_int16_t current_cylinders; /*055*/ u_int16_t current_heads; /*056*/ u_int16_t current_sectors; /*057*/ u_int16_t current_size_1; /*058*/ u_int16_t current_size_2; /*059*/ u_int16_t multi; #define ATA_MULTI_VALID 0x0100 /*060*/ u_int16_t lba_size_1; u_int16_t lba_size_2; u_int16_t obsolete62; /*063*/ u_int16_t mwdmamodes; /* multiword DMA modes */ /*064*/ u_int16_t apiomodes; /* advanced PIO modes */ /*065*/ u_int16_t mwdmamin; /* min. M/W DMA time/word ns */ /*066*/ u_int16_t mwdmarec; /* rec. M/W DMA time ns */ /*067*/ u_int16_t pioblind; /* min. PIO cycle w/o flow */ /*068*/ u_int16_t pioiordy; /* min. PIO cycle IORDY flow */ /*069*/ u_int16_t support3; #define ATA_SUPPORT_RZAT 0x0020 #define ATA_SUPPORT_DRAT 0x4000 +#define ATA_SUPPORT_ZONE_MASK 0x0003 +#define ATA_SUPPORT_ZONE_NR 0x0000 +#define ATA_SUPPORT_ZONE_HOST_AWARE 0x0001 +#define ATA_SUPPORT_ZONE_DEV_MANAGED 0x0002 u_int16_t reserved70; /*071*/ u_int16_t rlsovlap; /* rel time (us) for overlap */ /*072*/ u_int16_t rlsservice; /* rel time (us) for service */ u_int16_t reserved73; u_int16_t reserved74; /*075*/ u_int16_t queue; #define ATA_QUEUE_LEN(x) ((x) & 0x001f) /*76*/ u_int16_t satacapabilities; #define ATA_SATA_GEN1 0x0002 #define ATA_SATA_GEN2 0x0004 #define ATA_SATA_GEN3 0x0008 #define ATA_SUPPORT_NCQ 0x0100 #define ATA_SUPPORT_IFPWRMNGTRCV 0x0200 #define ATA_SUPPORT_PHYEVENTCNT 0x0400 #define ATA_SUPPORT_NCQ_UNLOAD 0x0800 #define ATA_SUPPORT_NCQ_PRIO 0x1000 #define ATA_SUPPORT_HAPST 0x2000 #define ATA_SUPPORT_DAPST 0x4000 #define ATA_SUPPORT_READLOGDMAEXT 0x8000 /*77*/ u_int16_t satacapabilities2; #define ATA_SATA_CURR_GEN_MASK 0x0006 #define ATA_SUPPORT_NCQ_STREAM 0x0010 #define ATA_SUPPORT_NCQ_QMANAGEMENT 0x0020 #define ATA_SUPPORT_RCVSND_FPDMA_QUEUED 0x0040 /*78*/ u_int16_t satasupport; #define ATA_SUPPORT_NONZERO 0x0002 #define ATA_SUPPORT_AUTOACTIVATE 0x0004 #define ATA_SUPPORT_IFPWRMNGT 0x0008 #define ATA_SUPPORT_INORDERDATA 0x0010 #define ATA_SUPPORT_ASYNCNOTIF 0x0020 #define ATA_SUPPORT_SOFTSETPRESERVE 0x0040 /*79*/ u_int16_t sataenabled; #define ATA_ENABLED_DAPST 0x0080 /*080*/ u_int16_t version_major; /*081*/ u_int16_t version_minor; struct { /*082/085*/ u_int16_t command1; #define ATA_SUPPORT_SMART 0x0001 #define ATA_SUPPORT_SECURITY 0x0002 #define ATA_SUPPORT_REMOVABLE 0x0004 #define ATA_SUPPORT_POWERMGT 0x0008 #define ATA_SUPPORT_PACKET 0x0010 #define ATA_SUPPORT_WRITECACHE 0x0020 #define ATA_SUPPORT_LOOKAHEAD 0x0040 #define ATA_SUPPORT_RELEASEIRQ 0x0080 #define ATA_SUPPORT_SERVICEIRQ 0x0100 #define ATA_SUPPORT_RESET 0x0200 #define ATA_SUPPORT_PROTECTED 0x0400 #define ATA_SUPPORT_WRITEBUFFER 0x1000 #define ATA_SUPPORT_READBUFFER 0x2000 #define ATA_SUPPORT_NOP 0x4000 /*083/086*/ u_int16_t command2; #define ATA_SUPPORT_MICROCODE 0x0001 #define ATA_SUPPORT_QUEUED 0x0002 #define ATA_SUPPORT_CFA 0x0004 #define ATA_SUPPORT_APM 0x0008 #define ATA_SUPPORT_NOTIFY 0x0010 #define ATA_SUPPORT_STANDBY 0x0020 #define ATA_SUPPORT_SPINUP 0x0040 #define ATA_SUPPORT_MAXSECURITY 0x0100 #define ATA_SUPPORT_AUTOACOUSTIC 0x0200 #define ATA_SUPPORT_ADDRESS48 0x0400 #define ATA_SUPPORT_OVERLAY 0x0800 #define ATA_SUPPORT_FLUSHCACHE 0x1000 #define ATA_SUPPORT_FLUSHCACHE48 0x2000 /*084/087*/ u_int16_t extension; #define ATA_SUPPORT_SMARTLOG 0x0001 #define ATA_SUPPORT_SMARTTEST 0x0002 #define ATA_SUPPORT_MEDIASN 0x0004 #define ATA_SUPPORT_MEDIAPASS 0x0008 #define ATA_SUPPORT_STREAMING 0x0010 #define ATA_SUPPORT_GENLOG 0x0020 #define ATA_SUPPORT_WRITEDMAFUAEXT 0x0040 #define ATA_SUPPORT_WRITEDMAQFUAEXT 0x0080 #define ATA_SUPPORT_64BITWWN 0x0100 #define ATA_SUPPORT_UNLOAD 0x2000 } __packed support, enabled; /*088*/ u_int16_t udmamodes; /* UltraDMA modes */ /*089*/ u_int16_t erase_time; /* time req'd in 2min units */ /*090*/ u_int16_t enhanced_erase_time; /* time req'd in 2min units */ /*091*/ u_int16_t apm_value; /*092*/ u_int16_t master_passwd_revision; /* password revision code */ /*093*/ u_int16_t hwres; #define ATA_CABLE_ID 0x2000 /*094*/ u_int16_t acoustic; #define ATA_ACOUSTIC_CURRENT(x) ((x) & 0x00ff) #define ATA_ACOUSTIC_VENDOR(x) (((x) & 0xff00) >> 8) /*095*/ u_int16_t stream_min_req_size; /*096*/ u_int16_t stream_transfer_time; /*097*/ u_int16_t stream_access_latency; /*098*/ u_int32_t stream_granularity; /*100*/ u_int16_t lba_size48_1; u_int16_t lba_size48_2; u_int16_t lba_size48_3; u_int16_t lba_size48_4; u_int16_t reserved104; /*105*/ u_int16_t max_dsm_blocks; /*106*/ u_int16_t pss; #define ATA_PSS_LSPPS 0x000F #define ATA_PSS_LSSABOVE512 0x1000 #define ATA_PSS_MULTLS 0x2000 #define ATA_PSS_VALID_MASK 0xC000 #define ATA_PSS_VALID_VALUE 0x4000 /*107*/ u_int16_t isd; /*108*/ u_int16_t wwn[4]; u_int16_t reserved112[5]; /*117*/ u_int16_t lss_1; /*118*/ u_int16_t lss_2; /*119*/ u_int16_t support2; #define ATA_SUPPORT_WRITEREADVERIFY 0x0002 #define ATA_SUPPORT_WRITEUNCORREXT 0x0004 #define ATA_SUPPORT_RWLOGDMAEXT 0x0008 #define ATA_SUPPORT_MICROCODE3 0x0010 #define ATA_SUPPORT_FREEFALL 0x0020 +#define ATA_SUPPORT_SENSE_REPORT 0x0040 +#define ATA_SUPPORT_EPC 0x0080 /*120*/ u_int16_t enabled2; +#define ATA_ENABLED_WRITEREADVERIFY 0x0002 +#define ATA_ENABLED_WRITEUNCORREXT 0x0004 +#define ATA_ENABLED_FREEFALL 0x0020 +#define ATA_ENABLED_SENSE_REPORT 0x0040 +#define ATA_ENABLED_EPC 0x0080 u_int16_t reserved121[6]; /*127*/ u_int16_t removable_status; /*128*/ u_int16_t security_status; #define ATA_SECURITY_LEVEL 0x0100 /* 0: high, 1: maximum */ #define ATA_SECURITY_ENH_SUPP 0x0020 /* enhanced erase supported */ #define ATA_SECURITY_COUNT_EXP 0x0010 /* count expired */ #define ATA_SECURITY_FROZEN 0x0008 /* security config is frozen */ #define ATA_SECURITY_LOCKED 0x0004 /* drive is locked */ #define ATA_SECURITY_ENABLED 0x0002 /* ATA Security is enabled */ #define ATA_SECURITY_SUPPORTED 0x0001 /* ATA Security is supported */ u_int16_t reserved129[31]; /*160*/ u_int16_t cfa_powermode1; u_int16_t reserved161; /*162*/ u_int16_t cfa_kms_support; /*163*/ u_int16_t cfa_trueide_modes; /*164*/ u_int16_t cfa_memory_modes; u_int16_t reserved165[4]; /*169*/ u_int16_t support_dsm; #define ATA_SUPPORT_DSM_TRIM 0x0001 u_int16_t reserved170[6]; /*176*/ u_int8_t media_serial[60]; /*206*/ u_int16_t sct; u_int16_t reserved206[2]; /*209*/ u_int16_t lsalign; /*210*/ u_int16_t wrv_sectors_m3_1; u_int16_t wrv_sectors_m3_2; /*212*/ u_int16_t wrv_sectors_m2_1; u_int16_t wrv_sectors_m2_2; /*214*/ u_int16_t nv_cache_caps; /*215*/ u_int16_t nv_cache_size_1; u_int16_t nv_cache_size_2; /*217*/ u_int16_t media_rotation_rate; #define ATA_RATE_NOT_REPORTED 0x0000 #define ATA_RATE_NON_ROTATING 0x0001 u_int16_t reserved218; /*219*/ u_int16_t nv_cache_opt; /*220*/ u_int16_t wrv_mode; u_int16_t reserved221; /*222*/ u_int16_t transport_major; /*223*/ u_int16_t transport_minor; u_int16_t reserved224[31]; /*255*/ u_int16_t integrity; } __packed; /* ATA Dataset Management */ #define ATA_DSM_BLK_SIZE 512 #define ATA_DSM_BLK_RANGES 64 #define ATA_DSM_RANGE_SIZE 8 #define ATA_DSM_RANGE_MAX 65535 /* * ATA Device Register * * bit 7 Obsolete (was 1 in early ATA specs) * bit 6 Sets LBA/CHS mode. 1=LBA, 0=CHS * bit 5 Obsolete (was 1 in early ATA specs) * bit 4 1 = Slave Drive, 0 = Master Drive * bit 3-0 In LBA mode, 27-24 of address. In CHS mode, head number */ #define ATA_DEV_MASTER 0x00 #define ATA_DEV_SLAVE 0x10 #define ATA_DEV_LBA 0x40 /* ATA limits */ #define ATA_MAX_28BIT_LBA 268435455UL /* ATA Status Register */ -#define ATA_STATUS_ERROR 0x01 -#define ATA_STATUS_DEVICE_FAULT 0x20 +#define ATA_STATUS_ERROR 0x01 +#define ATA_STATUS_SENSE_AVAIL 0x02 +#define ATA_STATUS_ALIGN_ERR 0x04 +#define ATA_STATUS_DATA_REQ 0x08 +#define ATA_STATUS_DEF_WRITE_ERR 0x10 +#define ATA_STATUS_DEVICE_FAULT 0x20 +#define ATA_STATUS_DEVICE_READY 0x40 +#define ATA_STATUS_BUSY 0x80 /* ATA Error Register */ #define ATA_ERROR_ABORT 0x04 #define ATA_ERROR_ID_NOT_FOUND 0x10 /* ATA HPA Features */ #define ATA_HPA_FEAT_MAX_ADDR 0x00 #define ATA_HPA_FEAT_SET_PWD 0x01 #define ATA_HPA_FEAT_LOCK 0x02 #define ATA_HPA_FEAT_UNLOCK 0x03 #define ATA_HPA_FEAT_FREEZE 0x04 /* ATA transfer modes */ #define ATA_MODE_MASK 0x0f #define ATA_DMA_MASK 0xf0 #define ATA_PIO 0x00 #define ATA_PIO0 0x08 #define ATA_PIO1 0x09 #define ATA_PIO2 0x0a #define ATA_PIO3 0x0b #define ATA_PIO4 0x0c #define ATA_PIO_MAX 0x0f #define ATA_DMA 0x10 #define ATA_WDMA0 0x20 #define ATA_WDMA1 0x21 #define ATA_WDMA2 0x22 #define ATA_UDMA0 0x40 #define ATA_UDMA1 0x41 #define ATA_UDMA2 0x42 #define ATA_UDMA3 0x43 #define ATA_UDMA4 0x44 #define ATA_UDMA5 0x45 #define ATA_UDMA6 0x46 #define ATA_SA150 0x47 #define ATA_SA300 0x48 #define ATA_SA600 0x49 #define ATA_DMA_MAX 0x4f /* ATA commands */ #define ATA_NOP 0x00 /* NOP */ #define ATA_NF_FLUSHQUEUE 0x00 /* flush queued cmd's */ #define ATA_NF_AUTOPOLL 0x01 /* start autopoll function */ #define ATA_DATA_SET_MANAGEMENT 0x06 #define ATA_DSM_TRIM 0x01 #define ATA_DEVICE_RESET 0x08 /* reset device */ #define ATA_READ 0x20 /* read */ #define ATA_READ48 0x24 /* read 48bit LBA */ #define ATA_READ_DMA48 0x25 /* read DMA 48bit LBA */ #define ATA_READ_DMA_QUEUED48 0x26 /* read DMA QUEUED 48bit LBA */ #define ATA_READ_NATIVE_MAX_ADDRESS48 0x27 /* read native max addr 48bit */ #define ATA_READ_MUL48 0x29 /* read multi 48bit LBA */ #define ATA_READ_STREAM_DMA48 0x2a /* read DMA stream 48bit LBA */ #define ATA_READ_LOG_EXT 0x2f /* read log ext - PIO Data-In */ #define ATA_READ_STREAM48 0x2b /* read stream 48bit LBA */ #define ATA_WRITE 0x30 /* write */ #define ATA_WRITE48 0x34 /* write 48bit LBA */ #define ATA_WRITE_DMA48 0x35 /* write DMA 48bit LBA */ #define ATA_WRITE_DMA_QUEUED48 0x36 /* write DMA QUEUED 48bit LBA*/ #define ATA_SET_MAX_ADDRESS48 0x37 /* set max address 48bit */ #define ATA_WRITE_MUL48 0x39 /* write multi 48bit LBA */ #define ATA_WRITE_STREAM_DMA48 0x3a #define ATA_WRITE_STREAM48 0x3b #define ATA_WRITE_DMA_FUA48 0x3d #define ATA_WRITE_DMA_QUEUED_FUA48 0x3e #define ATA_WRITE_LOG_EXT 0x3f #define ATA_READ_VERIFY 0x40 #define ATA_READ_VERIFY48 0x42 #define ATA_WRITE_UNCORRECTABLE48 0x45 /* write uncorrectable 48bit LBA */ #define ATA_WU_PSEUDO 0x55 /* pseudo-uncorrectable error */ #define ATA_WU_FLAGGED 0xaa /* flagged-uncorrectable error */ #define ATA_READ_LOG_DMA_EXT 0x47 /* read log DMA ext - PIO Data-In */ +#define ATA_ZAC_MANAGEMENT_IN 0x4a /* ZAC management in */ +#define ATA_ZM_REPORT_ZONES 0x00 /* report zones */ #define ATA_READ_FPDMA_QUEUED 0x60 /* read DMA NCQ */ #define ATA_WRITE_FPDMA_QUEUED 0x61 /* write DMA NCQ */ #define ATA_NCQ_NON_DATA 0x63 /* NCQ non-data command */ +#define ATA_ABORT_NCQ_QUEUE 0x00 /* abort NCQ queue */ +#define ATA_DEADLINE_HANDLING 0x01 /* deadline handling */ +#define ATA_SET_FEATURES 0x05 /* set features */ +#define ATA_ZERO_EXT 0x06 /* zero ext */ +#define ATA_NCQ_ZAC_MGMT_OUT 0x07 /* NCQ ZAC mgmt out no data */ #define ATA_SEND_FPDMA_QUEUED 0x64 /* send DMA NCQ */ #define ATA_SFPDMA_DSM 0x00 /* Data set management */ #define ATA_SFPDMA_DSM_TRIM 0x01 /* Set trim bit in auxiliary */ #define ATA_SFPDMA_HYBRID_EVICT 0x01 /* Hybrid Evict */ #define ATA_SFPDMA_WLDMA 0x02 /* Write Log DMA EXT */ -#define ATA_RECV_FPDMA_QUEUED 0x65 /* receive DMA NCQ */ +#define ATA_SFPDMA_ZAC_MGMT_OUT 0x03 /* NCQ ZAC mgmt out w/data */ +#define ATA_RECV_FPDMA_QUEUED 0x65 /* receive DMA NCQ */ +#define ATA_RFPDMA_RL_DMA_EXT 0x00 /* Read Log DMA EXT */ +#define ATA_RFPDMA_ZAC_MGMT_IN 0x02 /* NCQ ZAC mgmt in w/data */ #define ATA_SEP_ATTN 0x67 /* SEP request */ #define ATA_SEEK 0x70 /* seek */ +#define ATA_ZAC_MANAGEMENT_OUT 0x9f /* ZAC management out */ +#define ATA_ZM_CLOSE_ZONE 0x01 /* close zone */ +#define ATA_ZM_FINISH_ZONE 0x02 /* finish zone */ +#define ATA_ZM_OPEN_ZONE 0x03 /* open zone */ +#define ATA_ZM_RWP 0x04 /* reset write pointer */ #define ATA_PACKET_CMD 0xa0 /* packet command */ #define ATA_ATAPI_IDENTIFY 0xa1 /* get ATAPI params*/ #define ATA_SERVICE 0xa2 /* service command */ #define ATA_SMART_CMD 0xb0 /* SMART command */ #define ATA_CFA_ERASE 0xc0 /* CFA erase */ #define ATA_READ_MUL 0xc4 /* read multi */ #define ATA_WRITE_MUL 0xc5 /* write multi */ #define ATA_SET_MULTI 0xc6 /* set multi size */ #define ATA_READ_DMA_QUEUED 0xc7 /* read DMA QUEUED */ #define ATA_READ_DMA 0xc8 /* read DMA */ #define ATA_WRITE_DMA 0xca /* write DMA */ #define ATA_WRITE_DMA_QUEUED 0xcc /* write DMA QUEUED */ #define ATA_WRITE_MUL_FUA48 0xce #define ATA_STANDBY_IMMEDIATE 0xe0 /* standby immediate */ #define ATA_IDLE_IMMEDIATE 0xe1 /* idle immediate */ #define ATA_STANDBY_CMD 0xe2 /* standby */ #define ATA_IDLE_CMD 0xe3 /* idle */ #define ATA_READ_BUFFER 0xe4 /* read buffer */ #define ATA_READ_PM 0xe4 /* read portmultiplier */ #define ATA_CHECK_POWER_MODE 0xe5 /* device power mode */ #define ATA_SLEEP 0xe6 /* sleep */ #define ATA_FLUSHCACHE 0xe7 /* flush cache to disk */ #define ATA_WRITE_PM 0xe8 /* write portmultiplier */ #define ATA_FLUSHCACHE48 0xea /* flush cache to disk */ #define ATA_ATA_IDENTIFY 0xec /* get ATA params */ #define ATA_SETFEATURES 0xef /* features command */ -#define ATA_SF_SETXFER 0x03 /* set transfer mode */ #define ATA_SF_ENAB_WCACHE 0x02 /* enable write cache */ #define ATA_SF_DIS_WCACHE 0x82 /* disable write cache */ +#define ATA_SF_SETXFER 0x03 /* set transfer mode */ +#define ATA_SF_APM 0x05 /* Enable APM feature set */ #define ATA_SF_ENAB_PUIS 0x06 /* enable PUIS */ #define ATA_SF_DIS_PUIS 0x86 /* disable PUIS */ #define ATA_SF_PUIS_SPINUP 0x07 /* PUIS spin-up */ +#define ATA_SF_WRV 0x0b /* Enable Write-Read-Verify */ +#define ATA_SF_DLC 0x0c /* Enable device life control */ +#define ATA_SF_SATA 0x10 /* Enable use of SATA feature */ +#define ATA_SF_FFC 0x41 /* Free-fall Control */ +#define ATA_SF_MHIST 0x43 /* Set Max Host Sect. Times */ +#define ATA_SF_RATE 0x45 /* Set Rate Basis */ +#define ATA_SF_EPC 0x4A /* Extended Power Conditions */ #define ATA_SF_ENAB_RCACHE 0xaa /* enable readahead cache */ #define ATA_SF_DIS_RCACHE 0x55 /* disable readahead cache */ #define ATA_SF_ENAB_RELIRQ 0x5d /* enable release interrupt */ #define ATA_SF_DIS_RELIRQ 0xdd /* disable release interrupt */ #define ATA_SF_ENAB_SRVIRQ 0x5e /* enable service interrupt */ #define ATA_SF_DIS_SRVIRQ 0xde /* disable service interrupt */ +#define ATA_SF_LPSAERC 0x62 /* Long Phys Sect Align ErrRep*/ +#define ATA_SF_DSN 0x63 /* Device Stats Notification */ +#define ATA_CHECK_POWER_MODE 0xe5 /* Check Power Mode */ #define ATA_SECURITY_SET_PASSWORD 0xf1 /* set drive password */ #define ATA_SECURITY_UNLOCK 0xf2 /* unlock drive using passwd */ #define ATA_SECURITY_ERASE_PREPARE 0xf3 /* prepare to erase drive */ #define ATA_SECURITY_ERASE_UNIT 0xf4 /* erase all blocks on drive */ #define ATA_SECURITY_FREEZE_LOCK 0xf5 /* freeze security config */ #define ATA_SECURITY_DISABLE_PASSWORD 0xf6 /* disable drive password */ #define ATA_READ_NATIVE_MAX_ADDRESS 0xf8 /* read native max address */ #define ATA_SET_MAX_ADDRESS 0xf9 /* set max address */ /* ATAPI commands */ #define ATAPI_TEST_UNIT_READY 0x00 /* check if device is ready */ #define ATAPI_REZERO 0x01 /* rewind */ #define ATAPI_REQUEST_SENSE 0x03 /* get sense data */ #define ATAPI_FORMAT 0x04 /* format unit */ #define ATAPI_READ 0x08 /* read data */ #define ATAPI_WRITE 0x0a /* write data */ #define ATAPI_WEOF 0x10 /* write filemark */ #define ATAPI_WF_WRITE 0x01 #define ATAPI_SPACE 0x11 /* space command */ #define ATAPI_SP_FM 0x01 #define ATAPI_SP_EOD 0x03 #define ATAPI_INQUIRY 0x12 /* get inquiry data */ #define ATAPI_MODE_SELECT 0x15 /* mode select */ #define ATAPI_ERASE 0x19 /* erase */ #define ATAPI_MODE_SENSE 0x1a /* mode sense */ #define ATAPI_START_STOP 0x1b /* start/stop unit */ #define ATAPI_SS_LOAD 0x01 #define ATAPI_SS_RETENSION 0x02 #define ATAPI_SS_EJECT 0x04 #define ATAPI_PREVENT_ALLOW 0x1e /* media removal */ #define ATAPI_READ_FORMAT_CAPACITIES 0x23 /* get format capacities */ #define ATAPI_READ_CAPACITY 0x25 /* get volume capacity */ #define ATAPI_READ_BIG 0x28 /* read data */ #define ATAPI_WRITE_BIG 0x2a /* write data */ #define ATAPI_LOCATE 0x2b /* locate to position */ #define ATAPI_READ_POSITION 0x34 /* read position */ #define ATAPI_SYNCHRONIZE_CACHE 0x35 /* flush buf, close channel */ #define ATAPI_WRITE_BUFFER 0x3b /* write device buffer */ #define ATAPI_READ_BUFFER 0x3c /* read device buffer */ #define ATAPI_READ_SUBCHANNEL 0x42 /* get subchannel info */ #define ATAPI_READ_TOC 0x43 /* get table of contents */ #define ATAPI_PLAY_10 0x45 /* play by lba */ #define ATAPI_PLAY_MSF 0x47 /* play by MSF address */ #define ATAPI_PLAY_TRACK 0x48 /* play by track number */ #define ATAPI_PAUSE 0x4b /* pause audio operation */ #define ATAPI_READ_DISK_INFO 0x51 /* get disk info structure */ #define ATAPI_READ_TRACK_INFO 0x52 /* get track info structure */ #define ATAPI_RESERVE_TRACK 0x53 /* reserve track */ #define ATAPI_SEND_OPC_INFO 0x54 /* send OPC structurek */ #define ATAPI_MODE_SELECT_BIG 0x55 /* set device parameters */ #define ATAPI_REPAIR_TRACK 0x58 /* repair track */ #define ATAPI_READ_MASTER_CUE 0x59 /* read master CUE info */ #define ATAPI_MODE_SENSE_BIG 0x5a /* get device parameters */ #define ATAPI_CLOSE_TRACK 0x5b /* close track/session */ #define ATAPI_READ_BUFFER_CAPACITY 0x5c /* get buffer capicity */ #define ATAPI_SEND_CUE_SHEET 0x5d /* send CUE sheet */ #define ATAPI_SERVICE_ACTION_IN 0x96 /* get service data */ #define ATAPI_BLANK 0xa1 /* blank the media */ #define ATAPI_SEND_KEY 0xa3 /* send DVD key structure */ #define ATAPI_REPORT_KEY 0xa4 /* get DVD key structure */ #define ATAPI_PLAY_12 0xa5 /* play by lba */ #define ATAPI_LOAD_UNLOAD 0xa6 /* changer control command */ #define ATAPI_READ_STRUCTURE 0xad /* get DVD structure */ #define ATAPI_PLAY_CD 0xb4 /* universal play command */ #define ATAPI_SET_SPEED 0xbb /* set drive speed */ #define ATAPI_MECH_STATUS 0xbd /* get changer status */ #define ATAPI_READ_CD 0xbe /* read data */ #define ATAPI_POLL_DSC 0xff /* poll DSC status bit */ struct ata_ioc_devices { int channel; char name[2][32]; struct ata_params params[2]; }; /* pr channel ATA ioctl calls */ #define IOCATAGMAXCHANNEL _IOR('a', 1, int) #define IOCATAREINIT _IOW('a', 2, int) #define IOCATAATTACH _IOW('a', 3, int) #define IOCATADETACH _IOW('a', 4, int) #define IOCATADEVICES _IOWR('a', 5, struct ata_ioc_devices) /* ATAPI request sense structure */ struct atapi_sense { u_int8_t error; /* current or deferred errors */ #define ATA_SENSE_VALID 0x80 u_int8_t segment; /* segment number */ u_int8_t key; /* sense key */ #define ATA_SENSE_KEY_MASK 0x0f /* sense key mask */ #define ATA_SENSE_NO_SENSE 0x00 /* no specific sense key info */ #define ATA_SENSE_RECOVERED_ERROR 0x01 /* command OK, data recovered */ #define ATA_SENSE_NOT_READY 0x02 /* no access to drive */ #define ATA_SENSE_MEDIUM_ERROR 0x03 /* non-recovered data error */ #define ATA_SENSE_HARDWARE_ERROR 0x04 /* non-recoverable HW failure */ #define ATA_SENSE_ILLEGAL_REQUEST 0x05 /* invalid command param(s) */ #define ATA_SENSE_UNIT_ATTENTION 0x06 /* media changed */ #define ATA_SENSE_DATA_PROTECT 0x07 /* write protect */ #define ATA_SENSE_BLANK_CHECK 0x08 /* blank check */ #define ATA_SENSE_VENDOR_SPECIFIC 0x09 /* vendor specific skey */ #define ATA_SENSE_COPY_ABORTED 0x0a /* copy aborted */ #define ATA_SENSE_ABORTED_COMMAND 0x0b /* command aborted, try again */ #define ATA_SENSE_EQUAL 0x0c /* equal */ #define ATA_SENSE_VOLUME_OVERFLOW 0x0d /* volume overflow */ #define ATA_SENSE_MISCOMPARE 0x0e /* data dont match the medium */ #define ATA_SENSE_RESERVED 0x0f #define ATA_SENSE_ILI 0x20; #define ATA_SENSE_EOM 0x40; #define ATA_SENSE_FILEMARK 0x80; u_int32_t cmd_info; /* cmd information */ u_int8_t sense_length; /* additional sense len (n-7) */ u_int32_t cmd_specific_info; /* additional cmd spec info */ u_int8_t asc; /* additional sense code */ u_int8_t ascq; /* additional sense code qual */ u_int8_t replaceable_unit_code; /* replaceable unit code */ u_int8_t specific; /* sense key specific */ #define ATA_SENSE_SPEC_VALID 0x80 #define ATA_SENSE_SPEC_MASK 0x7f u_int8_t specific1; /* sense key specific */ u_int8_t specific2; /* sense key specific */ } __packed; + +/* + * SET FEATURES subcommands + */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * These values go in the LBA 3:0. + */ +#define ATA_SF_EPC_RESTORE 0x00 /* Restore Power Condition Settings */ +#define ATA_SF_EPC_GOTO 0x01 /* Go To Power Condition */ +#define ATA_SF_EPC_SET_TIMER 0x02 /* Set Power Condition Timer */ +#define ATA_SF_EPC_SET_STATE 0x03 /* Set Power Condition State */ +#define ATA_SF_EPC_ENABLE 0x04 /* Enable the EPC feature set */ +#define ATA_SF_EPC_DISABLE 0x05 /* Disable the EPC feature set */ +#define ATA_SF_EPC_SET_SOURCE 0x06 /* Set EPC Power Source */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Power Condition ID field + * These values go in the count register. + */ +#define ATA_EPC_STANDBY_Z 0x00 /* Substate of PM2:Standby */ +#define ATA_EPC_STANDBY_Y 0x01 /* Substate of PM2:Standby */ +#define ATA_EPC_IDLE_A 0x81 /* Substate of PM1:Idle */ +#define ATA_EPC_IDLE_B 0x82 /* Substate of PM1:Idle */ +#define ATA_EPC_IDLE_C 0x83 /* Substate of PM1:Idle */ +#define ATA_EPC_ALL 0xff /* All supported power conditions */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Restore Power Conditions Settings subcommand + * These values go in the LBA register. + */ +#define ATA_SF_EPC_RST_DFLT 0x40 /* 1=Rst from Default, 0= from Saved */ +#define ATA_SF_EPC_RST_SAVE 0x10 /* 1=Save on completion */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Got To Power Condition subcommand + * These values go in the LBA register. + */ +#define ATA_SF_EPC_GOTO_DELAY 0x02000000 /* Delayed entry bit */ +#define ATA_SF_EPC_GOTO_HOLD 0x01000000 /* Hold Power Cond bit */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Set Power Condition Timer subcommand + * These values go in the LBA register. + */ +#define ATA_SF_EPC_TIMER_MASK 0x00ffff00 /* Timer field */ +#define ATA_SF_EPC_TIMER_SHIFT 8 +#define ATA_SF_EPC_TIMER_SEC 0x00000080 /* Timer units, 1=sec, 0=.1s */ +#define ATA_SF_EPC_TIMER_EN 0x00000020 /* Enable/disable cond. */ +#define ATA_SF_EPC_TIMER_SAVE 0x00000010 /* Save settings on comp. */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Set Power Condition State subcommand + * These values go in the LBA register. + */ +#define ATA_SF_EPC_SETCON_EN 0x00000020 /* Enable power cond. */ +#define ATA_SF_EPC_SETCON_SAVE 0x00000010 /* Save settings on comp */ + +/* + * SET FEATURES command + * Extended Power Conditions subcommand -- ATA_SF_EPC (0x4A) + * Set EPC Power Source subcommand + * These values go in the count register. + */ +#define ATA_SF_EPC_SRC_UNKNOWN 0x0000 /* Unknown source */ +#define ATA_SF_EPC_SRC_BAT 0x0001 /* battery source */ +#define ATA_SF_EPC_SRC_NOT_BAT 0x0002 /* not battery source */ + +#define ATA_LOG_DIRECTORY 0x00 /* Directory of all logs */ +#define ATA_POWER_COND_LOG 0x08 /* Power Conditions Log */ +#define ATA_PCL_IDLE 0x00 /* Idle Power Conditions Page */ +#define ATA_PCL_STANDBY 0x01 /* Standby Power Conditions Page */ +#define ATA_IDENTIFY_DATA_LOG 0x30 /* Identify Device Data Log */ +#define ATA_IDL_PAGE_LIST 0x00 /* List of supported pages */ +#define ATA_IDL_IDENTIFY_DATA 0x01 /* Copy of Identify Device data */ +#define ATA_IDL_CAPACITY 0x02 /* Capacity */ +#define ATA_IDL_SUP_CAP 0x03 /* Supported Capabilities */ +#define ATA_IDL_CUR_SETTINGS 0x04 /* Current Settings */ +#define ATA_IDL_ATA_STRINGS 0x05 /* ATA Strings */ +#define ATA_IDL_SECURITY 0x06 /* Security */ +#define ATA_IDL_PARALLEL_ATA 0x07 /* Parallel ATA */ +#define ATA_IDL_SERIAL_ATA 0x08 /* Seiral ATA */ +#define ATA_IDL_ZDI 0x09 /* Zoned Device Information */ + +struct ata_gp_log_dir { + uint8_t header[2]; +#define ATA_GP_LOG_DIR_VERSION 0x0001 + uint8_t num_pages[255*2]; /* Number of log pages at address */ +}; + +/* + * ATA Power Conditions log descriptor + */ +struct ata_power_cond_log_desc { + uint8_t reserved1; + uint8_t flags; +#define ATA_PCL_COND_SUPPORTED 0x80 +#define ATA_PCL_COND_SAVEABLE 0x40 +#define ATA_PCL_COND_CHANGEABLE 0x20 +#define ATA_PCL_DEFAULT_TIMER_EN 0x10 +#define ATA_PCL_SAVED_TIMER_EN 0x08 +#define ATA_PCL_CURRENT_TIMER_EN 0x04 +#define ATA_PCL_HOLD_PC_NOT_SUP 0x02 + uint8_t reserved2[2]; + uint8_t default_timer[4]; + uint8_t saved_timer[4]; + uint8_t current_timer[4]; + uint8_t nom_time_to_active[4]; + uint8_t min_timer[4]; + uint8_t max_timer[4]; + uint8_t num_transitions_to_pc[4]; + uint8_t hours_in_pc[4]; + uint8_t reserved3[28]; +}; + +/* + * ATA Power Conditions Log (0x08), Idle power conditions page (0x00) + */ +struct ata_power_cond_log_idle { + struct ata_power_cond_log_desc idle_a_desc; + struct ata_power_cond_log_desc idle_b_desc; + struct ata_power_cond_log_desc idle_c_desc; + uint8_t reserved[320]; +}; + +/* + * ATA Power Conditions Log (0x08), Standby power conditions page (0x01) + */ +struct ata_power_cond_log_standby { + uint8_t reserved[384]; + struct ata_power_cond_log_desc standby_y_desc; + struct ata_power_cond_log_desc standby_z_desc; +}; + +/* + * ATA IDENTIFY DEVICE data log (0x30) page 0x00 + * List of Supported IDENTIFY DEVICE data pages. + */ +struct ata_identify_log_pages { + uint8_t header[8]; +#define ATA_IDLOG_REVISION 0x0000000000000001 + uint8_t entry_count; + uint8_t entries[503]; +}; + +/* + * ATA IDENTIFY DEVICE data log (0x30) + * Capacity (Page 0x02). + */ +struct ata_identify_log_capacity { + uint8_t header[8]; +#define ATA_CAP_HEADER_VALID 0x8000000000000000 +#define ATA_CAP_PAGE_NUM_MASK 0x0000000000ff0000 +#define ATA_CAP_PAGE_NUM_SHIFT 16 +#define ATA_CAP_REV_MASK 0x00000000000000ff + uint8_t capacity[8]; +#define ATA_CAP_CAPACITY_VALID 0x8000000000000000 +#define ATA_CAP_ACCESSIBLE_CAP 0x0000ffffffffffff + uint8_t phys_logical_sect_size[8]; +#define ATA_CAP_PL_VALID 0x8000000000000000 +#define ATA_CAP_LTOP_REL_SUP 0x4000000000000000 +#define ATA_CAP_LOG_SECT_SUP 0x2000000000000000 +#define ATA_CAP_ALIGN_ERR_MASK 0x0000000000300000 +#define ATA_CAP_LTOP_MASK 0x00000000000f0000 +#define ATA_CAP_LOG_SECT_OFF 0x000000000000ffff + uint8_t logical_sect_size[8]; +#define ATA_CAP_LOG_SECT_VALID 0x8000000000000000 +#define ATA_CAP_LOG_SECT_SIZE 0x00000000ffffffff + uint8_t nominal_buffer_size[8]; +#define ATA_CAP_NOM_BUF_VALID 0x8000000000000000 +#define ATA_CAP_NOM_BUF_SIZE 0x7fffffffffffffff + uint8_t reserved[472]; +}; + +/* + * ATA IDENTIFY DEVICE data log (0x30) + * Supported Capabilities (Page 0x03). + */ + +struct ata_identify_log_sup_cap { + uint8_t header[8]; +#define ATA_SUP_CAP_HEADER_VALID 0x8000000000000000 +#define ATA_SUP_CAP_PAGE_NUM_MASK 0x0000000000ff0000 +#define ATA_SUP_CAP_PAGE_NUM_SHIFT 16 +#define ATA_SUP_CAP_REV_MASK 0x00000000000000ff + uint8_t sup_cap[8]; +#define ATA_SUP_CAP_VALID 0x8000000000000000 +#define ATA_SC_SET_SECT_CONFIG_SUP 0x0002000000000000 /* Set Sect Conf*/ +#define ATA_SC_ZERO_EXT_SUP 0x0001000000000000 /* Zero EXT */ +#define ATA_SC_SUCC_NCQ_SENSE_SUP 0x0000800000000000 /* Succ. NCQ Sns */ +#define ATA_SC_DLC_SUP 0x0000400000000000 /* DLC */ +#define ATA_SC_RQSN_DEV_FAULT_SUP 0x0000200000000000 /* Req Sns Dev Flt*/ +#define ATA_SC_DSN_SUP 0x0000100000000000 /* DSN */ +#define ATA_SC_LP_STANDBY_SUP 0x0000080000000000 /* LP Standby */ +#define ATA_SC_SET_EPC_PS_SUP 0x0000040000000000 /* Set EPC PS */ +#define ATA_SC_AMAX_ADDR_SUP 0x0000020000000000 /* AMAX Addr */ +#define ATA_SC_DRAT_SUP 0x0000008000000000 /* DRAT */ +#define ATA_SC_LPS_MISALGN_SUP 0x0000004000000000 /* LPS Misalign */ +#define ATA_SC_RB_DMA_SUP 0x0000001000000000 /* Read Buf DMA */ +#define ATA_SC_WB_DMA_SUP 0x0000000800000000 /* Write Buf DMA */ +#define ATA_SC_DNLD_MC_DMA_SUP 0x0000000200000000 /* DL MCode DMA */ +#define ATA_SC_28BIT_SUP 0x0000000100000000 /* 28-bit */ +#define ATA_SC_RZAT_SUP 0x0000000080000000 /* RZAT */ +#define ATA_SC_NOP_SUP 0x0000000020000000 /* NOP */ +#define ATA_SC_READ_BUFFER_SUP 0x0000000010000000 /* Read Buffer */ +#define ATA_SC_WRITE_BUFFER_SUP 0x0000000008000000 /* Write Buffer */ +#define ATA_SC_READ_LOOK_AHEAD_SUP 0x0000000002000000 /* Read Look-Ahead*/ +#define ATA_SC_VOLATILE_WC_SUP 0x0000000001000000 /* Volatile WC */ +#define ATA_SC_SMART_SUP 0x0000000000800000 /* SMART */ +#define ATA_SC_FLUSH_CACHE_EXT_SUP 0x0000000000400000 /* Flush Cache Ext */ +#define ATA_SC_48BIT_SUP 0x0000000000100000 /* 48-Bit */ +#define ATA_SC_SPINUP_SUP 0x0000000000040000 /* Spin-Up */ +#define ATA_SC_PUIS_SUP 0x0000000000020000 /* PUIS */ +#define ATA_SC_APM_SUP 0x0000000000010000 /* APM */ +#define ATA_SC_DL_MICROCODE_SUP 0x0000000000004000 /* DL Microcode */ +#define ATA_SC_UNLOAD_SUP 0x0000000000002000 /* Unload */ +#define ATA_SC_WRITE_FUA_EXT_SUP 0x0000000000001000 /* Write FUA EXT */ +#define ATA_SC_GPL_SUP 0x0000000000000800 /* GPL */ +#define ATA_SC_STREAMING_SUP 0x0000000000000400 /* Streaming */ +#define ATA_SC_SMART_SELFTEST_SUP 0x0000000000000100 /* SMART self-test */ +#define ATA_SC_SMART_ERR_LOG_SUP 0x0000000000000080 /* SMART Err Log */ +#define ATA_SC_EPC_SUP 0x0000000000000040 /* EPC */ +#define ATA_SC_SENSE_SUP 0x0000000000000020 /* Sense data */ +#define ATA_SC_FREEFALL_SUP 0x0000000000000010 /* Free-Fall */ +#define ATA_SC_DM_MODE3_SUP 0x0000000000000008 /* DM Mode 3 */ +#define ATA_SC_GPL_DMA_SUP 0x0000000000000004 /* GPL DMA */ +#define ATA_SC_WRITE_UNCOR_SUP 0x0000000000000002 /* Write uncorr. */ +#define ATA_SC_WRV_SUP 0x0000000000000001 /* WRV */ + uint8_t download_code_cap[8]; +#define ATA_DL_CODE_VALID 0x8000000000000000 +#define ATA_DLC_DM_OFFSETS_DEFER_SUP 0x0000000400000000 +#define ATA_DLC_DM_IMMED_SUP 0x0000000200000000 +#define ATA_DLC_DM_OFF_IMMED_SUP 0x0000000100000000 +#define ATA_DLC_DM_MAX_XFER_SIZE_MASK 0x00000000ffff0000 +#define ATA_DLC_DM_MAX_XFER_SIZE_SHIFT 16 +#define ATA_DLC_DM_MIN_XFER_SIZE_MASK 0x000000000000ffff + uint8_t nom_media_rotation_rate[8]; +#define ATA_NOM_MEDIA_ROTATION_VALID 0x8000000000000000 +#define ATA_ROTATION_MASK 0x000000000000ffff + uint8_t form_factor[8]; +#define ATA_FORM_FACTOR_VALID 0x8000000000000000 +#define ATA_FF_MASK 0x000000000000000f +#define ATA_FF_NOT_REPORTED 0x0000000000000000 /* Not reported */ +#define ATA_FF_525_IN 0x0000000000000001 /* 5.25 inch */ +#define ATA_FF_35_IN 0x0000000000000002 /* 3.5 inch */ +#define ATA_FF_25_IN 0x0000000000000003 /* 2.5 inch */ +#define ATA_FF_18_IN 0x0000000000000004 /* 1.8 inch */ +#define ATA_FF_LT_18_IN 0x0000000000000005 /* < 1.8 inch */ +#define ATA_FF_MSATA 0x0000000000000006 /* mSATA */ +#define ATA_FF_M2 0x0000000000000007 /* M.2 */ +#define ATA_FF_MICROSSD 0x0000000000000008 /* MicroSSD */ +#define ATA_FF_CFAST 0x0000000000000009 /* CFast */ + uint8_t wrv_sec_cnt_mode3[8]; +#define ATA_WRV_MODE3_VALID 0x8000000000000000 +#define ATA_WRV_MODE3_COUNT 0x00000000ffffffff + uint8_t wrv_sec_cnt_mode2[8]; +#define ATA_WRV_MODE2_VALID 0x8000000000000000 +#define ATA_WRV_MODE2_COUNT 0x00000000ffffffff + uint8_t wwn[16]; + /* XXX KDM need to figure out how to handle 128-bit fields */ + uint8_t dsm[8]; +#define ATA_DSM_VALID 0x8000000000000000 +#define ATA_LB_MARKUP_SUP 0x000000000000ff00 +#define ATA_TRIM_SUP 0x0000000000000001 + uint8_t util_per_unit_time[16]; + /* XXX KDM need to figure out how to handle 128-bit fields */ + uint8_t util_usage_rate_sup[8]; +#define ATA_UTIL_USAGE_RATE_VALID 0x8000000000000000 +#define ATA_SETTING_RATE_SUP 0x0000000000800000 +#define ATA_SINCE_POWERON_SUP 0x0000000000000100 +#define ATA_POH_RATE_SUP 0x0000000000000010 +#define ATA_DATE_TIME_RATE_SUP 0x0000000000000001 + uint8_t zoned_cap[8]; +#define ATA_ZONED_VALID 0x8000000000000000 +#define ATA_ZONED_MASK 0x0000000000000003 + uint8_t sup_zac_cap[8]; +#define ATA_SUP_ZAC_CAP_VALID 0x8000000000000000 +#define ATA_ND_RWP_SUP 0x0000000000000010 /* Reset Write Ptr*/ +#define ATA_ND_FINISH_ZONE_SUP 0x0000000000000008 /* Finish Zone */ +#define ATA_ND_CLOSE_ZONE_SUP 0x0000000000000004 /* Close Zone */ +#define ATA_ND_OPEN_ZONE_SUP 0x0000000000000002 /* Open Zone */ +#define ATA_REPORT_ZONES_SUP 0x0000000000000001 /* Report Zones */ + uint8_t reserved[392]; +}; + +/* + * ATA Identify Device Data Log Zoned Device Information Page (0x09). + * Current as of ZAC r04a, August 25, 2015. + */ +struct ata_zoned_info_log { + uint8_t header[8]; +#define ATA_ZDI_HEADER_VALID 0x8000000000000000 +#define ATA_ZDI_PAGE_NUM_MASK 0x0000000000ff0000 +#define ATA_ZDI_PAGE_NUM_SHIFT 16 +#define ATA_ZDI_REV_MASK 0x00000000000000ff + uint8_t zoned_cap[8]; +#define ATA_ZDI_CAP_VALID 0x8000000000000000 +#define ATA_ZDI_CAP_URSWRZ 0x0000000000000001 + uint8_t zoned_settings[8]; +#define ATA_ZDI_SETTINGS_VALID 0x8000000000000000 + uint8_t optimal_seq_zones[8]; +#define ATA_ZDI_OPT_SEQ_VALID 0x8000000000000000 +#define ATA_ZDI_OPT_SEQ_MASK 0x00000000ffffffff + uint8_t optimal_nonseq_zones[8]; +#define ATA_ZDI_OPT_NS_VALID 0x8000000000000000 +#define ATA_ZDI_OPT_NS_MASK 0x00000000ffffffff + uint8_t max_seq_req_zones[8]; +#define ATA_ZDI_MAX_SEQ_VALID 0x8000000000000000 +#define ATA_ZDI_MAX_SEQ_MASK 0x00000000ffffffff + uint8_t version_info[8]; +#define ATA_ZDI_VER_VALID 0x8000000000000000 +#define ATA_ZDI_VER_ZAC_SUP 0x0100000000000000 +#define ATA_ZDI_VER_ZAC_MASK 0x00000000000000ff + uint8_t reserved[456]; +}; struct ata_ioc_request { union { struct { u_int8_t command; u_int8_t feature; u_int64_t lba; u_int16_t count; } ata; struct { char ccb[16]; struct atapi_sense sense; } atapi; } u; caddr_t data; int count; int flags; #define ATA_CMD_CONTROL 0x01 #define ATA_CMD_READ 0x02 #define ATA_CMD_WRITE 0x04 #define ATA_CMD_ATAPI 0x08 int timeout; int error; }; struct ata_security_password { u_int16_t ctrl; #define ATA_SECURITY_PASSWORD_USER 0x0000 #define ATA_SECURITY_PASSWORD_MASTER 0x0001 #define ATA_SECURITY_ERASE_NORMAL 0x0000 #define ATA_SECURITY_ERASE_ENHANCED 0x0002 #define ATA_SECURITY_LEVEL_HIGH 0x0000 #define ATA_SECURITY_LEVEL_MAXIMUM 0x0100 u_int8_t password[32]; u_int16_t revision; u_int16_t reserved[238]; }; /* pr device ATA ioctl calls */ #define IOCATAREQUEST _IOWR('a', 100, struct ata_ioc_request) #define IOCATAGPARM _IOR('a', 101, struct ata_params) #define IOCATAGMODE _IOR('a', 102, int) #define IOCATASMODE _IOW('a', 103, int) #define IOCATAGSPINDOWN _IOR('a', 104, int) #define IOCATASSPINDOWN _IOW('a', 105, int) struct ata_ioc_raid_config { int lun; int type; #define AR_JBOD 0x0001 #define AR_SPAN 0x0002 #define AR_RAID0 0x0004 #define AR_RAID1 0x0008 #define AR_RAID01 0x0010 #define AR_RAID3 0x0020 #define AR_RAID4 0x0040 #define AR_RAID5 0x0080 int interleave; int status; #define AR_READY 1 #define AR_DEGRADED 2 #define AR_REBUILDING 4 int progress; int total_disks; int disks[16]; }; struct ata_ioc_raid_status { int lun; int type; int interleave; int status; int progress; int total_disks; struct { int state; #define AR_DISK_ONLINE 0x01 #define AR_DISK_PRESENT 0x02 #define AR_DISK_SPARE 0x04 int lun; } disks[16]; }; /* ATA RAID ioctl calls */ #define IOCATARAIDCREATE _IOWR('a', 200, struct ata_ioc_raid_config) #define IOCATARAIDDELETE _IOW('a', 201, int) #define IOCATARAIDSTATUS _IOWR('a', 202, struct ata_ioc_raid_status) #define IOCATARAIDADDSPARE _IOW('a', 203, struct ata_ioc_raid_config) #define IOCATARAIDREBUILD _IOW('a', 204, int) #endif /* _SYS_ATA_H_ */ Index: head/sys/sys/bio.h =================================================================== --- head/sys/sys/bio.h (revision 300206) +++ head/sys/sys/bio.h (revision 300207) @@ -1,159 +1,162 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)buf.h 8.9 (Berkeley) 3/30/95 * $FreeBSD$ */ #ifndef _SYS_BIO_H_ #define _SYS_BIO_H_ #include +#include /* bio_cmd */ #define BIO_READ 0x01 /* Read I/O data */ #define BIO_WRITE 0x02 /* Write I/O data */ -#define BIO_DELETE 0x04 /* TRIM or free blocks, i.e. mark as unused */ -#define BIO_GETATTR 0x08 /* Get GEOM attributes of object */ -#define BIO_FLUSH 0x10 /* Commit outstanding I/O now */ -#define BIO_CMD0 0x20 /* Available for local hacks */ -#define BIO_CMD1 0x40 /* Available for local hacks */ -#define BIO_CMD2 0x80 /* Available for local hacks */ +#define BIO_DELETE 0x03 /* TRIM or free blocks, i.e. mark as unused */ +#define BIO_GETATTR 0x04 /* Get GEOM attributes of object */ +#define BIO_FLUSH 0x05 /* Commit outstanding I/O now */ +#define BIO_CMD0 0x06 /* Available for local hacks */ +#define BIO_CMD1 0x07 /* Available for local hacks */ +#define BIO_CMD2 0x08 /* Available for local hacks */ +#define BIO_ZONE 0x09 /* Zone command */ /* bio_flags */ #define BIO_ERROR 0x01 /* An error occurred processing this bio. */ #define BIO_DONE 0x02 /* This bio is finished. */ #define BIO_ONQUEUE 0x04 /* This bio is in a queue & not yet taken. */ /* * This bio must be executed after all previous bios in the queue have been * executed, and before any successive bios can be executed. */ #define BIO_ORDERED 0x08 #define BIO_UNMAPPED 0x10 #define BIO_TRANSIENT_MAPPING 0x20 #define BIO_VLIST 0x40 #ifdef _KERNEL struct disk; struct bio; struct vm_map; /* Empty classifier tag, to prevent further classification. */ #define BIO_NOTCLASSIFIED (void *)(~0UL) typedef void bio_task_t(void *); /* * The bio structure describes an I/O operation in the kernel. */ struct bio { uint16_t bio_cmd; /* I/O operation. */ uint16_t bio_flags; /* General flags. */ uint16_t bio_cflags; /* Private use by the consumer. */ uint16_t bio_pflags; /* Private use by the provider. */ struct cdev *bio_dev; /* Device to do I/O on. */ struct disk *bio_disk; /* Valid below geom_disk.c only */ off_t bio_offset; /* Offset into file. */ long bio_bcount; /* Valid bytes in buffer. */ caddr_t bio_data; /* Memory, superblocks, indirect etc. */ struct vm_page **bio_ma; /* Or unmapped. */ int bio_ma_offset; /* Offset in the first page of bio_ma. */ int bio_ma_n; /* Number of pages in bio_ma. */ int bio_error; /* Errno for BIO_ERROR. */ long bio_resid; /* Remaining I/O in bytes. */ void (*bio_done)(struct bio *); void *bio_driver1; /* Private use by the provider. */ void *bio_driver2; /* Private use by the provider. */ void *bio_caller1; /* Private use by the consumer. */ void *bio_caller2; /* Private use by the consumer. */ TAILQ_ENTRY(bio) bio_queue; /* Disksort queue. */ const char *bio_attribute; /* Attribute for BIO_[GS]ETATTR */ + struct disk_zone_args bio_zone;/* Used for BIO_ZONE */ struct g_consumer *bio_from; /* GEOM linkage */ struct g_provider *bio_to; /* GEOM linkage */ off_t bio_length; /* Like bio_bcount */ off_t bio_completed; /* Inverse of bio_resid */ u_int bio_children; /* Number of spawned bios */ u_int bio_inbed; /* Children safely home by now */ struct bio *bio_parent; /* Pointer to parent */ struct bintime bio_t0; /* Time request started */ bio_task_t *bio_task; /* Task_queue handler */ void *bio_task_arg; /* Argument to above */ void *bio_classifier1; /* Classifier tag. */ void *bio_classifier2; /* Classifier tag. */ #ifdef DIAGNOSTIC void *_bio_caller1; void *_bio_caller2; uint8_t _bio_cflags; #endif /* XXX: these go away when bio chaining is introduced */ daddr_t bio_pblkno; /* physical block number */ }; struct uio; struct devstat; struct bio_queue_head { TAILQ_HEAD(bio_queue, bio) queue; off_t last_offset; struct bio *insert_point; }; extern struct vm_map *bio_transient_map; extern int bio_transient_maxcnt; void biodone(struct bio *bp); void biofinish(struct bio *bp, struct devstat *stat, int error); int biowait(struct bio *bp, const char *wchan); void bioq_disksort(struct bio_queue_head *ap, struct bio *bp); struct bio *bioq_first(struct bio_queue_head *head); struct bio *bioq_takefirst(struct bio_queue_head *head); void bioq_flush(struct bio_queue_head *head, struct devstat *stp, int error); void bioq_init(struct bio_queue_head *head); void bioq_insert_head(struct bio_queue_head *head, struct bio *bp); void bioq_insert_tail(struct bio_queue_head *head, struct bio *bp); void bioq_remove(struct bio_queue_head *head, struct bio *bp); void bio_taskqueue(struct bio *bp, bio_task_t *fund, void *arg); int physio(struct cdev *dev, struct uio *uio, int ioflag); #define physread physio #define physwrite physio #endif /* _KERNEL */ #endif /* !_SYS_BIO_H_ */ Index: head/sys/sys/disk.h =================================================================== --- head/sys/sys/disk.h (revision 300206) +++ head/sys/sys/disk.h (revision 300207) @@ -1,139 +1,142 @@ /*- * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ * */ #ifndef _SYS_DISK_H_ #define _SYS_DISK_H_ #include #include +#include #ifdef _KERNEL #ifndef _SYS_CONF_H_ #include /* XXX: temporary to avoid breakage */ #endif void disk_err(struct bio *bp, const char *what, int blkdone, int nl); #endif #define DIOCGSECTORSIZE _IOR('d', 128, u_int) /* * Get the sector size of the device in bytes. The sector size is the * smallest unit of data which can be transferred from this device. * Usually this is a power of 2 but it might not be (i.e. CDROM audio). */ #define DIOCGMEDIASIZE _IOR('d', 129, off_t) /* Get media size in bytes */ /* * Get the size of the entire device in bytes. This should be a * multiple of the sector size. */ #define DIOCGFWSECTORS _IOR('d', 130, u_int) /* Get firmware's sectorcount */ /* * Get the firmware's notion of number of sectors per track. This * value is mostly used for compatibility with various ill designed * disk label formats. Don't use it unless you have to. */ #define DIOCGFWHEADS _IOR('d', 131, u_int) /* Get firmware's headcount */ /* * Get the firmwares notion of number of heads per cylinder. This * value is mostly used for compatibility with various ill designed * disk label formats. Don't use it unless you have to. */ #define DIOCSKERNELDUMP _IOW('d', 133, u_int) /* Set/Clear kernel dumps */ /* * Enable/Disable (the argument is boolean) the device for kernel * core dumps. */ #define DIOCGFRONTSTUFF _IOR('d', 134, off_t) /* * Many disk formats have some amount of space reserved at the * start of the disk to hold bootblocks, various disklabels and * similar stuff. This ioctl returns the number of such bytes * which may apply to the device. */ #define DIOCGFLUSH _IO('d', 135) /* Flush write cache */ /* * Flush write cache of the device. */ #define DIOCGDELETE _IOW('d', 136, off_t[2]) /* Delete data */ /* * Mark data on the device as unused. */ #define DISK_IDENT_SIZE 256 #define DIOCGIDENT _IOR('d', 137, char[DISK_IDENT_SIZE]) /*- * Get the ident of the given provider. Ident is (most of the time) * a uniqe and fixed provider's identifier. Ident's properties are as * follow: * - ident value is preserved between reboots, * - provider can be detached/attached and ident is preserved, * - provider's name can change - ident can't, * - ident value should not be based on on-disk metadata; in other * words copying whole data from one disk to another should not * yield the same ident for the other disk, * - there could be more than one provider with the same ident, but * only if they point at exactly the same physical storage, this is * the case for multipathing for example, * - GEOM classes that consumes single providers and provide single * providers, like geli, gbde, should just attach class name to the * ident of the underlying provider, * - ident is an ASCII string (is printable), * - ident is optional and applications can't relay on its presence. */ #define DIOCGPROVIDERNAME _IOR('d', 138, char[MAXPATHLEN]) /* * Store the provider name, given a device path, in a buffer. The buffer * must be at least MAXPATHLEN bytes long. */ #define DIOCGSTRIPESIZE _IOR('d', 139, off_t) /* Get stripe size in bytes */ /* * Get the size of the device's optimal access block in bytes. * This should be a multiple of the sector size. */ #define DIOCGSTRIPEOFFSET _IOR('d', 140, off_t) /* Get stripe offset in bytes */ /* * Get the offset of the first device's optimal access block in bytes. * This should be a multiple of the sector size. */ #define DIOCGPHYSPATH _IOR('d', 141, char[MAXPATHLEN]) /* * Get a string defining the physical path for a given provider. * This has similar rules to ident, but is intended to uniquely * identify the physical location of the device, not the current * occupant of that location. */ struct diocgattr_arg { char name[64]; int len; union { char str[DISK_IDENT_SIZE]; off_t off; int i; } value; }; #define DIOCGATTR _IOWR('d', 142, struct diocgattr_arg) + +#define DIOCZONECMD _IOWR('d', 143, struct disk_zone_args) #endif /* _SYS_DISK_H_ */ Index: head/sys/sys/disk_zone.h =================================================================== --- head/sys/sys/disk_zone.h (nonexistent) +++ head/sys/sys/disk_zone.h (revision 300207) @@ -0,0 +1,184 @@ +/*- + * Copyright (c) 2015 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * Authors: Ken Merry (Spectra Logic Corporation) + * + * $FreeBSD$ + */ + +#ifndef _SYS_DISK_ZONE_H_ +#define _SYS_DISK_ZONE_H_ + +/* + * Interface for Zone-based disks. This allows managing devices that + * conform to the SCSI Zoned Block Commands (ZBC) and ATA Zoned ATA Command + * Set (ZAC) specifications. Devices using these command sets are + * currently (October 2015) hard drives using Shingled Magnetic Recording + * (SMR). + */ + +/* + * There are currently three types of zoned devices: + * + * Drive Managed: + * Drive Managed drives look and act just like a standard random access + * block device, but underneath, the drive reads and writes the bulk of + * its capacity using SMR zones. Sequential writes will yield better + * performance, but writing sequentially is not required. + * + * Host Aware: + * Host Aware drives expose the underlying zone layout via SCSI or ATA + * commands and allow the host to manage the zone conditions. The host + * is not required to manage the zones on the drive, though. Sequential + * writes will yield better performance in Sequential Write Preferred + * zones, but the host can write randomly in those zones. + * + * Host Managed: + * Host Managed drives expose the underlying zone layout via SCSI or ATA + * commands. The host is required to access the zones according to the + * rules described by the zone layout. Any commands that violate the + * rules will be returned with an error. + */ +struct disk_zone_disk_params { + uint32_t zone_mode; +#define DISK_ZONE_MODE_NONE 0x00 +#define DISK_ZONE_MODE_HOST_AWARE 0x01 +#define DISK_ZONE_MODE_DRIVE_MANAGED 0x02 +#define DISK_ZONE_MODE_HOST_MANAGED 0x04 + uint64_t flags; +#define DISK_ZONE_DISK_URSWRZ 0x001 +#define DISK_ZONE_OPT_SEQ_SET 0x002 +#define DISK_ZONE_OPT_NONSEQ_SET 0x004 +#define DISK_ZONE_MAX_SEQ_SET 0x008 +#define DISK_ZONE_RZ_SUP 0x010 +#define DISK_ZONE_OPEN_SUP 0x020 +#define DISK_ZONE_CLOSE_SUP 0x040 +#define DISK_ZONE_FINISH_SUP 0x080 +#define DISK_ZONE_RWP_SUP 0x100 +#define DISK_ZONE_CMD_SUP_MASK 0x1f0 + uint64_t optimal_seq_zones; + uint64_t optimal_nonseq_zones; + uint64_t max_seq_zones; +}; + +/* + * Used for reset write pointer, open, close and finish. + */ +struct disk_zone_rwp { + uint64_t id; + uint8_t flags; +#define DISK_ZONE_RWP_FLAG_NONE 0x00 +#define DISK_ZONE_RWP_FLAG_ALL 0x01 +}; + +/* + * Report Zones header. All of these values are passed out. + */ +struct disk_zone_rep_header { + uint8_t same; +#define DISK_ZONE_SAME_ALL_DIFFERENT 0x0 /* Lengths and types vary */ +#define DISK_ZONE_SAME_ALL_SAME 0x1 /* Lengths and types the same */ +#define DISK_ZONE_SAME_LAST_DIFFERENT 0x2 /* Types same, last len varies */ +#define DISK_ZONE_SAME_TYPES_DIFFERENT 0x3 /* Types vary, length the same */ + uint64_t maximum_lba; + /* + * XXX KDM padding space may not be a good idea inside the bio. + */ + uint8_t reserved[64]; +}; + +/* + * Report Zones entry. Note that the zone types, conditions, and flags + * are mapped directly from the SCSI/ATA flag values. Any additional + * SCSI/ATA zone types or conditions or flags that are defined in the + * future could result in additional values that are not yet defined here. + */ +struct disk_zone_rep_entry { + uint8_t zone_type; +#define DISK_ZONE_TYPE_CONVENTIONAL 0x01 +#define DISK_ZONE_TYPE_SEQ_REQUIRED 0x02 /* Host Managed */ +#define DISK_ZONE_TYPE_SEQ_PREFERRED 0x03 /* Host Aware */ + uint8_t zone_condition; +#define DISK_ZONE_COND_NOT_WP 0x00 +#define DISK_ZONE_COND_EMPTY 0x01 +#define DISK_ZONE_COND_IMPLICIT_OPEN 0x02 +#define DISK_ZONE_COND_EXPLICIT_OPEN 0x03 +#define DISK_ZONE_COND_CLOSED 0x04 +#define DISK_ZONE_COND_READONLY 0x0D +#define DISK_ZONE_COND_FULL 0x0E +#define DISK_ZONE_COND_OFFLINE 0x0F + uint8_t zone_flags; +#define DISK_ZONE_FLAG_RESET 0x01 /* Zone needs RWP */ +#define DISK_ZONE_FLAG_NON_SEQ 0x02 /* Zone accssessed nonseq */ + uint64_t zone_length; + uint64_t zone_start_lba; + uint64_t write_pointer_lba; + /* XXX KDM padding space may not be a good idea inside the bio */ + uint8_t reserved[32]; +}; + +struct disk_zone_report { + uint64_t starting_id; /* Passed In */ + uint8_t rep_options; /* Passed In */ +#define DISK_ZONE_REP_ALL 0x00 +#define DISK_ZONE_REP_EMPTY 0x01 +#define DISK_ZONE_REP_IMP_OPEN 0x02 +#define DISK_ZONE_REP_EXP_OPEN 0x03 +#define DISK_ZONE_REP_CLOSED 0x04 +#define DISK_ZONE_REP_FULL 0x05 +#define DISK_ZONE_REP_READONLY 0x06 +#define DISK_ZONE_REP_OFFLINE 0x07 +#define DISK_ZONE_REP_RWP 0x10 +#define DISK_ZONE_REP_NON_SEQ 0x11 +#define DISK_ZONE_REP_NON_WP 0x3F + struct disk_zone_rep_header header; + uint32_t entries_allocated; /* Passed In */ + uint32_t entries_filled; /* Passed Out */ + uint32_t entries_available; /* Passed Out */ + struct disk_zone_rep_entry *entries; +}; + +union disk_zone_params { + struct disk_zone_disk_params disk_params; + struct disk_zone_rwp rwp; + struct disk_zone_report report; +}; + +struct disk_zone_args { + uint8_t zone_cmd; +#define DISK_ZONE_OPEN 0x00 +#define DISK_ZONE_CLOSE 0x01 +#define DISK_ZONE_FINISH 0x02 +#define DISK_ZONE_REPORT_ZONES 0x03 +#define DISK_ZONE_RWP 0x04 +#define DISK_ZONE_GET_PARAMS 0x05 + union disk_zone_params zone_params; +}; + +#endif /* _SYS_DISK_ZONE_H_ */ Property changes on: head/sys/sys/disk_zone.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h (revision 300206) +++ head/sys/sys/param.h (revision 300207) @@ -1,363 +1,363 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * x.0-CURRENT before RELENG_*_0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1100109 /* Master, propagated to newvers */ +#define __FreeBSD_version 1100110 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #ifdef _KERNEL #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . This should * probably be done to increase its value, when MAXBCACHEBUF is * defined as a larger value in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef lint #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* lint */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */ Index: head/usr.sbin/Makefile =================================================================== --- head/usr.sbin/Makefile (revision 300206) +++ head/usr.sbin/Makefile (revision 300207) @@ -1,216 +1,217 @@ # From: @(#)Makefile 5.20 (Berkeley) 6/12/93 # $FreeBSD$ .include SUBDIR= adduser \ arp \ binmiscctl \ bsdconfig \ camdd \ cdcontrol \ chkgrp \ chown \ chroot \ ckdist \ clear_locks \ crashinfo \ cron \ ctladm \ ctld \ daemon \ dconschat \ devctl \ devinfo \ digictl \ diskinfo \ dumpcis \ extattr \ extattrctl \ fifolog \ fstyp \ fwcontrol \ getfmac \ getpmac \ gstat \ i2c \ ifmcstat \ iostat \ iovctl \ kldxref \ mailwrapper \ makefs \ memcontrol \ mergemaster \ mfiutil \ mixer \ mlxcontrol \ mountd \ mount_smbfs \ mpsutil \ mptutil \ mtest \ newsyslog \ nfscbd \ nfsd \ nfsdumpstate \ nfsrevoke \ nfsuserd \ nmtree \ nologin \ pciconf \ periodic \ powerd \ procctl \ pstat \ pw \ pwd_mkdb \ quot \ rarpd \ rmt \ rpcbind \ rpc.lockd \ rpc.statd \ rpc.umntall \ rtprio \ service \ services_mkdb \ sesutil \ setfib \ setfmac \ setpmac \ smbmsg \ snapinfo \ spray \ syslogd \ sysrc \ tcpdrop \ tcpdump \ traceroute \ trpt \ tzsetup \ uefisign \ ugidfw \ vigr \ vipw \ wake \ watch \ watchdogd \ - zic + zic \ + zonectl # NB: keep these sorted by MK_* knobs SUBDIR.${MK_ACCT}+= accton SUBDIR.${MK_ACCT}+= sa SUBDIR.${MK_AMD}+= amd SUBDIR.${MK_AUDIT}+= audit SUBDIR.${MK_AUDIT}+= auditd .if ${MK_OPENSSL} != "no" SUBDIR.${MK_AUDIT}+= auditdistd .endif SUBDIR.${MK_AUDIT}+= auditreduce SUBDIR.${MK_AUDIT}+= praudit SUBDIR.${MK_AUTHPF}+= authpf SUBDIR.${MK_AUTOFS}+= autofs SUBDIR.${MK_BLUETOOTH}+= bluetooth SUBDIR.${MK_BOOTPARAMD}+= bootparamd SUBDIR.${MK_BSDINSTALL}+= bsdinstall SUBDIR.${MK_BSNMP}+= bsnmpd SUBDIR.${MK_CTM}+= ctm SUBDIR.${MK_FLOPPY}+= fdcontrol SUBDIR.${MK_FLOPPY}+= fdformat SUBDIR.${MK_FLOPPY}+= fdread SUBDIR.${MK_FLOPPY}+= fdwrite SUBDIR.${MK_FMTREE}+= fmtree SUBDIR.${MK_FREEBSD_UPDATE}+= freebsd-update SUBDIR.${MK_GSSAPI}+= gssd SUBDIR.${MK_GPIO}+= gpioctl SUBDIR.${MK_INET6}+= ip6addrctl SUBDIR.${MK_INET6}+= mld6query SUBDIR.${MK_INET6}+= ndp SUBDIR.${MK_INET6}+= rip6query SUBDIR.${MK_INET6}+= route6d SUBDIR.${MK_INET6}+= rrenumd SUBDIR.${MK_INET6}+= rtadvctl SUBDIR.${MK_INET6}+= rtadvd SUBDIR.${MK_INET6}+= rtsold SUBDIR.${MK_INET6}+= traceroute6 SUBDIR.${MK_INETD}+= inetd SUBDIR.${MK_IPFW}+= ipfwpcap SUBDIR.${MK_ISCSI}+= iscsid SUBDIR.${MK_JAIL}+= jail SUBDIR.${MK_JAIL}+= jexec SUBDIR.${MK_JAIL}+= jls # XXX MK_SYSCONS SUBDIR.${MK_LEGACY_CONSOLE}+= kbdcontrol SUBDIR.${MK_LEGACY_CONSOLE}+= kbdmap SUBDIR.${MK_LEGACY_CONSOLE}+= moused SUBDIR.${MK_LEGACY_CONSOLE}+= vidcontrol .if ${MK_LIBTHR} != "no" || ${MK_LIBPTHREAD} != "no" SUBDIR.${MK_PPP}+= pppctl SUBDIR.${MK_NS_CACHING}+= nscd .endif SUBDIR.${MK_LPR}+= lpr SUBDIR.${MK_MAN_UTILS}+= manctl SUBDIR.${MK_NAND}+= nandsim SUBDIR.${MK_NAND}+= nandtool SUBDIR.${MK_NETGRAPH}+= flowctl SUBDIR.${MK_NETGRAPH}+= lmcconfig SUBDIR.${MK_NETGRAPH}+= ngctl SUBDIR.${MK_NETGRAPH}+= nghook SUBDIR.${MK_NIS}+= rpc.yppasswdd SUBDIR.${MK_NIS}+= rpc.ypupdated SUBDIR.${MK_NIS}+= rpc.ypxfrd SUBDIR.${MK_NIS}+= ypbind SUBDIR.${MK_NIS}+= ypldap SUBDIR.${MK_NIS}+= yp_mkdb SUBDIR.${MK_NIS}+= yppoll SUBDIR.${MK_NIS}+= yppush SUBDIR.${MK_NIS}+= ypserv SUBDIR.${MK_NIS}+= ypset SUBDIR.${MK_NTP}+= ntp SUBDIR.${MK_OPENSSL}+= keyserv SUBDIR.${MK_PC_SYSINSTALL}+= pc-sysinstall SUBDIR.${MK_PF}+= ftp-proxy SUBDIR.${MK_PKGBOOTSTRAP}+= pkg SUBDIR.${MK_PMC}+= pmcannotate SUBDIR.${MK_PMC}+= pmccontrol SUBDIR.${MK_PMC}+= pmcstat SUBDIR.${MK_PORTSNAP}+= portsnap SUBDIR.${MK_PPP}+= ppp SUBDIR.${MK_QUOTAS}+= edquota SUBDIR.${MK_QUOTAS}+= quotaon SUBDIR.${MK_QUOTAS}+= repquota SUBDIR.${MK_RCMDS}+= rwhod SUBDIR.${MK_RCS}+= etcupdate SUBDIR.${MK_SENDMAIL}+= editmap SUBDIR.${MK_SENDMAIL}+= mailstats SUBDIR.${MK_SENDMAIL}+= makemap SUBDIR.${MK_SENDMAIL}+= praliases SUBDIR.${MK_SENDMAIL}+= sendmail SUBDIR.${MK_TCP_WRAPPERS}+= tcpdchk SUBDIR.${MK_TCP_WRAPPERS}+= tcpdmatch SUBDIR.${MK_TIMED}+= timed SUBDIR.${MK_TOOLCHAIN}+= config SUBDIR.${MK_TOOLCHAIN}+= crunch SUBDIR.${MK_UNBOUND}+= unbound SUBDIR.${MK_USB}+= uathload SUBDIR.${MK_USB}+= uhsoctl SUBDIR.${MK_USB}+= usbconfig SUBDIR.${MK_USB}+= usbdump SUBDIR.${MK_UTMPX}+= ac SUBDIR.${MK_UTMPX}+= lastlogin SUBDIR.${MK_UTMPX}+= utx SUBDIR.${MK_WIRELESS}+= ancontrol SUBDIR.${MK_WIRELESS}+= wlandebug SUBDIR.${MK_WIRELESS}+= wpa SUBDIR.${MK_TESTS}+= tests .include SUBDIR:= ${SUBDIR:O} SUBDIR_PARALLEL= .include Index: head/usr.sbin/diskinfo/diskinfo.c =================================================================== --- head/usr.sbin/diskinfo/diskinfo.c (revision 300206) +++ head/usr.sbin/diskinfo/diskinfo.c (revision 300207) @@ -1,388 +1,434 @@ /*- * Copyright (c) 2003 Poul-Henning Kamp + * Copyright (c) 2015 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The names of the authors may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include static void usage(void) { fprintf(stderr, "usage: diskinfo [-ctv] disk ...\n"); exit (1); } static int opt_c, opt_t, opt_v; static void speeddisk(int fd, off_t mediasize, u_int sectorsize); static void commandtime(int fd, off_t mediasize, u_int sectorsize); +static int zonecheck(int fd, uint32_t *zone_mode, char *zone_str, + size_t zone_str_len); int main(int argc, char **argv) { int i, ch, fd, error, exitval = 0; char buf[BUFSIZ], ident[DISK_IDENT_SIZE], physpath[MAXPATHLEN]; + char zone_desc[64]; off_t mediasize, stripesize, stripeoffset; - u_int sectorsize, fwsectors, fwheads; + u_int sectorsize, fwsectors, fwheads, zoned = 0; + uint32_t zone_mode; while ((ch = getopt(argc, argv, "ctv")) != -1) { switch (ch) { case 'c': opt_c = 1; opt_v = 1; break; case 't': opt_t = 1; opt_v = 1; break; case 'v': opt_v = 1; break; default: usage(); } } argc -= optind; argv += optind; if (argc < 1) usage(); for (i = 0; i < argc; i++) { fd = open(argv[i], O_RDONLY); if (fd < 0 && errno == ENOENT && *argv[i] != '/') { sprintf(buf, "%s%s", _PATH_DEV, argv[i]); fd = open(buf, O_RDONLY); } if (fd < 0) { warn("%s", argv[i]); exitval = 1; goto out; } error = ioctl(fd, DIOCGMEDIASIZE, &mediasize); if (error) { warnx("%s: ioctl(DIOCGMEDIASIZE) failed, probably not a disk.", argv[i]); exitval = 1; goto out; } error = ioctl(fd, DIOCGSECTORSIZE, §orsize); if (error) { warnx("%s: ioctl(DIOCGSECTORSIZE) failed, probably not a disk.", argv[i]); exitval = 1; goto out; } error = ioctl(fd, DIOCGFWSECTORS, &fwsectors); if (error) fwsectors = 0; error = ioctl(fd, DIOCGFWHEADS, &fwheads); if (error) fwheads = 0; error = ioctl(fd, DIOCGSTRIPESIZE, &stripesize); if (error) stripesize = 0; error = ioctl(fd, DIOCGSTRIPEOFFSET, &stripeoffset); if (error) stripeoffset = 0; + error = zonecheck(fd, &zone_mode, zone_desc, sizeof(zone_desc)); + if (error == 0) + zoned = 1; if (!opt_v) { printf("%s", argv[i]); printf("\t%u", sectorsize); printf("\t%jd", (intmax_t)mediasize); printf("\t%jd", (intmax_t)mediasize/sectorsize); printf("\t%jd", (intmax_t)stripesize); printf("\t%jd", (intmax_t)stripeoffset); if (fwsectors != 0 && fwheads != 0) { printf("\t%jd", (intmax_t)mediasize / (fwsectors * fwheads * sectorsize)); printf("\t%u", fwheads); printf("\t%u", fwsectors); } } else { humanize_number(buf, 5, (int64_t)mediasize, "", HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL); printf("%s\n", argv[i]); printf("\t%-12u\t# sectorsize\n", sectorsize); printf("\t%-12jd\t# mediasize in bytes (%s)\n", (intmax_t)mediasize, buf); printf("\t%-12jd\t# mediasize in sectors\n", (intmax_t)mediasize/sectorsize); printf("\t%-12jd\t# stripesize\n", stripesize); printf("\t%-12jd\t# stripeoffset\n", stripeoffset); if (fwsectors != 0 && fwheads != 0) { printf("\t%-12jd\t# Cylinders according to firmware.\n", (intmax_t)mediasize / (fwsectors * fwheads * sectorsize)); printf("\t%-12u\t# Heads according to firmware.\n", fwheads); printf("\t%-12u\t# Sectors according to firmware.\n", fwsectors); } if (ioctl(fd, DIOCGIDENT, ident) == 0) printf("\t%-12s\t# Disk ident.\n", ident); if (ioctl(fd, DIOCGPHYSPATH, physpath) == 0) printf("\t%-12s\t# Physical path\n", physpath); + if (zoned != 0) + printf("\t%-12s\t# Zone Mode\n", zone_desc); } printf("\n"); if (opt_c) commandtime(fd, mediasize, sectorsize); if (opt_t) speeddisk(fd, mediasize, sectorsize); out: close(fd); } exit (exitval); } static char sector[65536]; static char mega[1024 * 1024]; static void rdsect(int fd, off_t blockno, u_int sectorsize) { int error; lseek(fd, (off_t)blockno * sectorsize, SEEK_SET); error = read(fd, sector, sectorsize); if (error == -1) err(1, "read"); if (error != (int)sectorsize) errx(1, "disk too small for test."); } static void rdmega(int fd) { int error; error = read(fd, mega, sizeof(mega)); if (error == -1) err(1, "read"); if (error != sizeof(mega)) errx(1, "disk too small for test."); } static struct timeval tv1, tv2; static void T0(void) { fflush(stdout); sync(); sleep(1); sync(); sync(); gettimeofday(&tv1, NULL); } static void TN(int count) { double dt; gettimeofday(&tv2, NULL); dt = (tv2.tv_usec - tv1.tv_usec) / 1e6; dt += (tv2.tv_sec - tv1.tv_sec); printf("%5d iter in %10.6f sec = %8.3f msec\n", count, dt, dt * 1000.0 / count); } static void TR(double count) { double dt; gettimeofday(&tv2, NULL); dt = (tv2.tv_usec - tv1.tv_usec) / 1e6; dt += (tv2.tv_sec - tv1.tv_sec); printf("%8.0f kbytes in %10.6f sec = %8.0f kbytes/sec\n", count, dt, count / dt); } static void speeddisk(int fd, off_t mediasize, u_int sectorsize) { int bulk, i; off_t b0, b1, sectorcount, step; sectorcount = mediasize / sectorsize; step = 1ULL << (flsll(sectorcount / (4 * 200)) - 1); if (step > 16384) step = 16384; bulk = mediasize / (1024 * 1024); if (bulk > 100) bulk = 100; printf("Seek times:\n"); printf("\tFull stroke:\t"); b0 = 0; b1 = sectorcount - step; T0(); for (i = 0; i < 125; i++) { rdsect(fd, b0, sectorsize); b0 += step; rdsect(fd, b1, sectorsize); b1 -= step; } TN(250); printf("\tHalf stroke:\t"); b0 = sectorcount / 4; b1 = b0 + sectorcount / 2; T0(); for (i = 0; i < 125; i++) { rdsect(fd, b0, sectorsize); b0 += step; rdsect(fd, b1, sectorsize); b1 += step; } TN(250); printf("\tQuarter stroke:\t"); b0 = sectorcount / 4; b1 = b0 + sectorcount / 4; T0(); for (i = 0; i < 250; i++) { rdsect(fd, b0, sectorsize); b0 += step; rdsect(fd, b1, sectorsize); b1 += step; } TN(500); printf("\tShort forward:\t"); b0 = sectorcount / 2; T0(); for (i = 0; i < 400; i++) { rdsect(fd, b0, sectorsize); b0 += step; } TN(400); printf("\tShort backward:\t"); b0 = sectorcount / 2; T0(); for (i = 0; i < 400; i++) { rdsect(fd, b0, sectorsize); b0 -= step; } TN(400); printf("\tSeq outer:\t"); b0 = 0; T0(); for (i = 0; i < 2048; i++) { rdsect(fd, b0, sectorsize); b0++; } TN(2048); printf("\tSeq inner:\t"); b0 = sectorcount - 2048; T0(); for (i = 0; i < 2048; i++) { rdsect(fd, b0, sectorsize); b0++; } TN(2048); printf("Transfer rates:\n"); printf("\toutside: "); rdsect(fd, 0, sectorsize); T0(); for (i = 0; i < bulk; i++) { rdmega(fd); } TR(bulk * 1024); printf("\tmiddle: "); b0 = sectorcount / 2 - bulk * (1024*1024 / sectorsize) / 2 - 1; rdsect(fd, b0, sectorsize); T0(); for (i = 0; i < bulk; i++) { rdmega(fd); } TR(bulk * 1024); printf("\tinside: "); b0 = sectorcount - bulk * (1024*1024 / sectorsize) - 1; rdsect(fd, b0, sectorsize); T0(); for (i = 0; i < bulk; i++) { rdmega(fd); } TR(bulk * 1024); printf("\n"); return; } static void commandtime(int fd, off_t mediasize, u_int sectorsize) { double dtmega, dtsector; int i; printf("I/O command overhead:\n"); i = mediasize; rdsect(fd, 0, sectorsize); T0(); for (i = 0; i < 10; i++) rdmega(fd); gettimeofday(&tv2, NULL); dtmega = (tv2.tv_usec - tv1.tv_usec) / 1e6; dtmega += (tv2.tv_sec - tv1.tv_sec); printf("\ttime to read 10MB block %10.6f sec\t= %8.3f msec/sector\n", dtmega, dtmega*100/2048); rdsect(fd, 0, sectorsize); T0(); for (i = 0; i < 20480; i++) rdsect(fd, 0, sectorsize); gettimeofday(&tv2, NULL); dtsector = (tv2.tv_usec - tv1.tv_usec) / 1e6; dtsector += (tv2.tv_sec - tv1.tv_sec); printf("\ttime to read 20480 sectors %10.6f sec\t= %8.3f msec/sector\n", dtsector, dtsector*100/2048); printf("\tcalculated command overhead\t\t\t= %8.3f msec/sector\n", (dtsector - dtmega)*100/2048); printf("\n"); return; +} + +static int +zonecheck(int fd, uint32_t *zone_mode, char *zone_str, size_t zone_str_len) +{ + struct disk_zone_args zone_args; + int error; + + bzero(&zone_args, sizeof(zone_args)); + + zone_args.zone_cmd = DISK_ZONE_GET_PARAMS; + error = ioctl(fd, DIOCZONECMD, &zone_args); + + if (error == 0) { + *zone_mode = zone_args.zone_params.disk_params.zone_mode; + + switch (*zone_mode) { + case DISK_ZONE_MODE_NONE: + snprintf(zone_str, zone_str_len, "Not_Zoned"); + break; + case DISK_ZONE_MODE_HOST_AWARE: + snprintf(zone_str, zone_str_len, "Host_Aware"); + break; + case DISK_ZONE_MODE_DRIVE_MANAGED: + snprintf(zone_str, zone_str_len, "Drive_Managed"); + break; + case DISK_ZONE_MODE_HOST_MANAGED: + snprintf(zone_str, zone_str_len, "Host_Managed"); + break; + default: + snprintf(zone_str, zone_str_len, "Unknown_zone_mode_%u", + *zone_mode); + break; + } + } + return (error); } Index: head/usr.sbin/zonectl/Makefile =================================================================== --- head/usr.sbin/zonectl/Makefile (nonexistent) +++ head/usr.sbin/zonectl/Makefile (revision 300207) @@ -0,0 +1,10 @@ +# $FreeBSD$ + +PROG= zonectl +SRCS= zonectl.c +SDIR= ${.CURDIR}/../../sys +LIBADD= cam sbuf util +MAN= zonectl.8 +CFLAGS+=-g -O0 + +.include Property changes on: head/usr.sbin/zonectl/Makefile ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/usr.sbin/zonectl/zonectl.8 =================================================================== --- head/usr.sbin/zonectl/zonectl.8 (nonexistent) +++ head/usr.sbin/zonectl/zonectl.8 (revision 300207) @@ -0,0 +1,236 @@ +.\" +.\" Copyright (c) 2015 Spectra Logic Corporation +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions, and the following disclaimer, +.\" without modification. +.\" 2. Redistributions in binary form must reproduce at minimum a disclaimer +.\" substantially similar to the "NO WARRANTY" disclaimer below +.\" ("Disclaimer") and any redistribution must be conditioned upon +.\" including a substantially similar Disclaimer requirement for further +.\" binary redistribution. +.\" +.\" NO WARRANTY +.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +.\" "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +.\" LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR +.\" A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +.\" HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +.\" STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +.\" POSSIBILITY OF SUCH DAMAGES. +.\" +.\" Authors: Ken Merry (Spectra Logic Corporation) +.\" +.\" $FreeBSD$ +.\" +.Dd May 18, 2016 +.Dt ZONECTL 8 +.Os +.Sh NAME +.Nm zonectl +.Nd Shingled Magnetic Recording Zone Control utility +.Sh SYNOPSIS +.Nm +.Aq Fl d Ar dev +.Aq Fl c Ar cmd +.Op Fl a +.Op Fl l Ar LBA +.Op Fl o Ar rep_opts +.Op Fl P Ar print_opts +.Sh DESCRIPTION +Manage +.Tn SCSI +and +.Tn ATA +Zoned Block devices. +This allows managing devices that conform to the +.Tn SCSI +Zoned Block Commands (ZBC) and +.Tn ATA +Zoned ATA Command Set (ZAC) +specifications. +Devices using these command sets are usually hard drives using Shingled +Magnetic Recording (SMR). +There are three types of SMR drives: +.Bl -tag -width 13n +.It Drive Managed +Drive Managed drives look and act just like a standard random access block +device, but underneath, the drive reads and writes the bulk of its capacity +using SMR zones. +Sequential writes will yield better performance, but writing sequentially +is not required. +.It Host Aware +Host Aware drives expose the underlying zone layout via +.Tn SCSI +or +.Tn ATA +commands and allow the host to manage the zone conditions. +The host is not required to manage the zones on the drive, though. +Sequential writes will yield better performance in Sequential Write +Preferred zones, but the host can write randomly in those zones. +.It Host Managed +Host Managed drives expose the underlying zone layout via +.Tn SCSI +or +.Tn ATA +commands. +The host is required to access the zones according to the rules described +by the zone layout. +Any commands that violate the rules will be returned with an error. +.El +.Pp +SMR drives are divided into zones (typically in the range of 256MB each) +that fall into three general categories: +.Bl -tag -width 20n +.It Conventional +These are also known as Non Write Pointer zones. +These zones can be randomly written without an unexpected performance penalty. +.It Sequential Preferred +These zones should be written sequentially starting at the write pointer +for the zone. +They may be written randomly. +Writes that do not conform to the zone layout may be significantly slower +than expected. +.It Sequential Required +These zones must be written sequentially. +If they are not written sequentially, starting at the write pointer, the +command will fail. +.El +.Pp +.Bl -tag -width 12n +.It Fl c Ar cmd +Specify the zone subcommand: +.Bl -tag -width 6n +.It params +Display device parameters, including the type of device (Drive Managed, +Host Aware, Host Managed, Not Zoned), the zone commands supported, and +how many open zones it supports. +.It rz +Issue the Report Zones command. +All zones are returned by default. +Specify report options with +.Fl o +and printing options with +.Fl P . +Specify the starting LBA with +.Fl l . +Note that +.Dq reportzones +is also accepted as a command argument. +.It open +Explicitly open the zone specified by the starting LBA. +.It close +Close the zone specified by starting LBA. +.It finish +Finish the zone specified by the starting LBA. +.It rwp +Reset the write pointer for the zone specified by the starting LBA. +.El +.It Fl a +For the Open, Close, Finish, and Reset Write Pointer operations, apply the +operation to all zones on the drive. +.It Fl l Ar lba +Specify the starting LBA. +For the Report Zones command, this tells the drive to report starting with +the zone that starts at the given LBA. +For the other commands, this allows the user to identify the zone requested +by its starting LBA. +The LBA may be specified in decimal, hexadecimal or octal notation. +.It Fl o Ar rep_opt +For the Report Zones command, specify a subset of zones to report. +.Bl -tag -width 8n +.It all +Report all zones. +This is the default. +.It emtpy +Report only empty zones. +.It imp_open +Report zones that are implicitly open. +This means that the host has sent a write to the zone without explicitly +opening the zone. +.It exp_open +Report zones that are explicitly open. +.It closed +Report zones that have been closed by the host. +.It full +Report zones that are full. +.It ro +Report zones that are in the read only state. +Note that +.Dq readonly +is also accepted as an argument. +.It offline +Report zones that are in the offline state. +.It reset +Report zones that the device recommends should have their write pointers reset. +.It nonseq +Report zones that have the Non Sequential Resources Active flag set. +These are zones that are Sequential Write Preferred, but have been written +non-sequentially. +.It nonwp +Report Non Write Pointer zones, also known as Conventional zones. +.El +.It Fl P Ar print_opt +Specify a printing option for Report Zones: +.Bl -tag -width 7n +.It normal +Normal Report Zones output. +This is the default. +The summary and column headings are printed, fields are separated by spaces +and the fields themselves may contain spaces. +.It summary +Just print the summary: the number of zones, the maximum LBA (LBA of the +last logical block on the drive), and the value of the +.Dq same +field. +The +.Dq same +field describes whether the zones on the drive are all identical, all +different, or whether they are the same except for the last zone, etc. +.It script +Print the zones in a script friendly format. +The summary and column headings are omitted, the fields are separated by +commas, and the fields do not contain spaces. +The fields contain underscores where spaces would normally be used. +.El +.El +.Sh EXAMPLES +.Bd -literal -offset indent +zonectl -d /dev/da5 -c params +.Ed +.Pp +Display basic zoning information for disk da5. +.Pp +.Bd -literal -offset indent +zonectl -d /dev/da5 -c rz +.Ed +.Pp +Issue the Report Zones command to disk da5, and print out all +zones on the drive in the default format. +.Pp +.Bd -literal -offset indent +zonectl -d /dev/da5 -c rz -o reset -P script +.Ed +.Pp +Issue the Report Zones command to disk da5, and print out all +of the zones that have the Reset Write Pointer Recommended bit set to true. +Print the zones in a script friendly form. +.Pp +.Bd -literal -offset indent +zonectl -d /dev/da5 -c rwp -l 0x2c80000 +.Ed +.Pp +Issue the Reset Write Pointer command to disk da5 for the zone +that starts at LBA 0x2c80000. +.Pp +.Bd -literal -offset indent +.Sh AUTHORS +.An Kenneth Merry Aq ken@FreeBSD.org Property changes on: head/usr.sbin/zonectl/zonectl.8 ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: head/usr.sbin/zonectl/zonectl.c =================================================================== --- head/usr.sbin/zonectl/zonectl.c (nonexistent) +++ head/usr.sbin/zonectl/zonectl.c (revision 300207) @@ -0,0 +1,591 @@ +/*- + * Copyright (c) 2015, 2016 Spectra Logic Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + * Authors: Ken Merry (Spectra Logic Corporation) + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static struct scsi_nv zone_cmd_map[] = { + { "rz", DISK_ZONE_REPORT_ZONES }, + { "reportzones", DISK_ZONE_REPORT_ZONES }, + { "close", DISK_ZONE_CLOSE }, + { "finish", DISK_ZONE_FINISH }, + { "open", DISK_ZONE_OPEN }, + { "rwp", DISK_ZONE_RWP }, + { "params", DISK_ZONE_GET_PARAMS } +}; + +static struct scsi_nv zone_rep_opts[] = { + { "all", DISK_ZONE_REP_ALL }, + { "empty", DISK_ZONE_REP_EMPTY }, + { "imp_open", DISK_ZONE_REP_IMP_OPEN }, + { "exp_open", DISK_ZONE_REP_EXP_OPEN }, + { "closed", DISK_ZONE_REP_CLOSED }, + { "full", DISK_ZONE_REP_FULL }, + { "readonly", DISK_ZONE_REP_READONLY }, + { "ro", DISK_ZONE_REP_READONLY }, + { "offline", DISK_ZONE_REP_OFFLINE }, + { "reset", DISK_ZONE_REP_RWP }, + { "rwp", DISK_ZONE_REP_RWP }, + { "nonseq", DISK_ZONE_REP_NON_SEQ }, + { "nonwp", DISK_ZONE_REP_NON_WP } +}; + + +typedef enum { + ZONE_OF_NORMAL = 0x00, + ZONE_OF_SUMMARY = 0x01, + ZONE_OF_SCRIPT = 0x02 +} zone_output_flags; + +static struct scsi_nv zone_print_opts[] = { + { "normal", ZONE_OF_NORMAL }, + { "summary", ZONE_OF_SUMMARY }, + { "script", ZONE_OF_SCRIPT } +}; + +static struct scsi_nv zone_cmd_desc_table[] = { + {"Report Zones", DISK_ZONE_RZ_SUP }, + {"Open", DISK_ZONE_OPEN_SUP }, + {"Close", DISK_ZONE_CLOSE_SUP }, + {"Finish", DISK_ZONE_FINISH_SUP }, + {"Reset Write Pointer", DISK_ZONE_RWP_SUP } +}; + +typedef enum { + ZONE_PRINT_OK, + ZONE_PRINT_MORE_DATA, + ZONE_PRINT_ERROR +} zone_print_status; + +typedef enum { + ZONE_FW_START, + ZONE_FW_LEN, + ZONE_FW_WP, + ZONE_FW_TYPE, + ZONE_FW_COND, + ZONE_FW_SEQ, + ZONE_FW_RESET, + ZONE_NUM_FIELDS +} zone_field_widths; + + +static void usage(int error); +static void zonectl_print_params(struct disk_zone_disk_params *params); +zone_print_status zonectl_print_rz(struct disk_zone_report *report, + zone_output_flags out_flags, int first_pass); + +static void +usage(int error) +{ + fprintf(error ? stderr : stdout, +"usage: zonectl <-d dev> <-c cmd> [-a][-o rep_opts] [-l lba][-P print_opts]\n" + ); +} + +static void +zonectl_print_params(struct disk_zone_disk_params *params) +{ + unsigned int i; + int first; + + printf("Zone Mode: "); + switch (params->zone_mode) { + case DISK_ZONE_MODE_NONE: + printf("None"); + break; + case DISK_ZONE_MODE_HOST_AWARE: + printf("Host Aware"); + break; + case DISK_ZONE_MODE_DRIVE_MANAGED: + printf("Drive Managed"); + break; + case DISK_ZONE_MODE_HOST_MANAGED: + printf("Host Managed"); + break; + default: + printf("Unknown mode %#x", params->zone_mode); + break; + } + printf("\n"); + + first = 1; + printf("Command support: "); + for (i = 0; i < sizeof(zone_cmd_desc_table) / + sizeof(zone_cmd_desc_table[0]); i++) { + if (params->flags & zone_cmd_desc_table[i].value) { + if (first == 0) + printf(", "); + else + first = 0; + printf("%s", zone_cmd_desc_table[i].name); + } + } + if (first == 1) + printf("None"); + printf("\n"); + + printf("Unrestricted Read in Sequential Write Required Zone " + "(URSWRZ): %s\n", (params->flags & DISK_ZONE_DISK_URSWRZ) ? + "Yes" : "No"); + + printf("Optimal Number of Open Sequential Write Preferred Zones: "); + if (params->flags & DISK_ZONE_OPT_SEQ_SET) + if (params->optimal_seq_zones == SVPD_ZBDC_OPT_SEQ_NR) + printf("Not Reported"); + else + printf("%ju", (uintmax_t)params->optimal_seq_zones); + else + printf("Not Set"); + printf("\n"); + + + printf("Optimal Number of Non-Sequentially Written Sequential Write " + "Preferred Zones: "); + if (params->flags & DISK_ZONE_OPT_NONSEQ_SET) + if (params->optimal_nonseq_zones == SVPD_ZBDC_OPT_NONSEQ_NR) + printf("Not Reported"); + else + printf("%ju",(uintmax_t)params->optimal_nonseq_zones); + else + printf("Not Set"); + printf("\n"); + + printf("Maximum Number of Open Sequential Write Required Zones: "); + if (params->flags & DISK_ZONE_MAX_SEQ_SET) + if (params->max_seq_zones == SVPD_ZBDC_MAX_SEQ_UNLIMITED) + printf("Unlimited"); + else + printf("%ju", (uintmax_t)params->max_seq_zones); + else + printf("Not Set"); + printf("\n"); +} + +zone_print_status +zonectl_print_rz(struct disk_zone_report *report, zone_output_flags out_flags, + int first_pass) +{ + zone_print_status status = ZONE_PRINT_OK; + struct disk_zone_rep_header *header = &report->header; + int field_widths[ZONE_NUM_FIELDS]; + struct disk_zone_rep_entry *entry; + uint64_t next_lba = 0; + char tmpstr[80]; + char word_sep; + int more_data = 0; + uint32_t i; + + field_widths[ZONE_FW_START] = 11; + field_widths[ZONE_FW_LEN] = 6; + field_widths[ZONE_FW_WP] = 11; + field_widths[ZONE_FW_TYPE] = 13; + field_widths[ZONE_FW_COND] = 13; + field_widths[ZONE_FW_SEQ] = 14; + field_widths[ZONE_FW_RESET] = 16; + + if ((report->entries_available - report->entries_filled) > 0) { + more_data = 1; + status = ZONE_PRINT_MORE_DATA; + } + + if (out_flags == ZONE_OF_SCRIPT) + word_sep = '_'; + else + word_sep = ' '; + + if ((out_flags != ZONE_OF_SCRIPT) + && (first_pass != 0)) { + printf("%u zones, Maximum LBA %#jx (%ju)\n", + report->entries_available, + (uintmax_t)header->maximum_lba, + (uintmax_t)header->maximum_lba); + + switch (header->same) { + case DISK_ZONE_SAME_ALL_DIFFERENT: + printf("Zone lengths and types may vary\n"); + break; + case DISK_ZONE_SAME_ALL_SAME: + printf("Zone lengths and types are all the same\n"); + break; + case DISK_ZONE_SAME_LAST_DIFFERENT: + printf("Zone types are the same, last zone length " + "differs\n"); + break; + case DISK_ZONE_SAME_TYPES_DIFFERENT: + printf("Zone lengths are the same, types vary\n"); + break; + default: + printf("Unknown SAME field value %#x\n",header->same); + break; + } + } + if (out_flags == ZONE_OF_SUMMARY) { + status = ZONE_PRINT_OK; + goto bailout; + } + + if ((out_flags == ZONE_OF_NORMAL) + && (first_pass != 0)) { + printf("%*s %*s %*s %*s %*s %*s %*s\n", + field_widths[ZONE_FW_START], "Start LBA", + field_widths[ZONE_FW_LEN], "Length", + field_widths[ZONE_FW_WP], "WP LBA", + field_widths[ZONE_FW_TYPE], "Zone Type", + field_widths[ZONE_FW_COND], "Condition", + field_widths[ZONE_FW_SEQ], "Sequential", + field_widths[ZONE_FW_RESET], "Reset"); + } + + for (i = 0; i < report->entries_filled; i++) { + entry = &report->entries[i]; + + printf("%#*jx, %*ju, %#*jx, ", field_widths[ZONE_FW_START], + (uintmax_t)entry->zone_start_lba, + field_widths[ZONE_FW_LEN], + (uintmax_t)entry->zone_length, field_widths[ZONE_FW_WP], + (uintmax_t)entry->write_pointer_lba); + + switch (entry->zone_type) { + case DISK_ZONE_TYPE_CONVENTIONAL: + snprintf(tmpstr, sizeof(tmpstr), "Conventional"); + break; + case DISK_ZONE_TYPE_SEQ_PREFERRED: + case DISK_ZONE_TYPE_SEQ_REQUIRED: + snprintf(tmpstr, sizeof(tmpstr), "Seq%c%s", + word_sep, (entry->zone_type == + DISK_ZONE_TYPE_SEQ_PREFERRED) ? "Preferred" : + "Required"); + break; + default: + snprintf(tmpstr, sizeof(tmpstr), "Zone%ctype%c%#x", + word_sep, word_sep, entry->zone_type); + break; + } + printf("%*s, ", field_widths[ZONE_FW_TYPE], tmpstr); + + switch (entry->zone_condition) { + case DISK_ZONE_COND_NOT_WP: + snprintf(tmpstr, sizeof(tmpstr), "NWP"); + break; + case DISK_ZONE_COND_EMPTY: + snprintf(tmpstr, sizeof(tmpstr), "Empty"); + break; + case DISK_ZONE_COND_IMPLICIT_OPEN: + snprintf(tmpstr, sizeof(tmpstr), "Implicit%cOpen", + word_sep); + break; + case DISK_ZONE_COND_EXPLICIT_OPEN: + snprintf(tmpstr, sizeof(tmpstr), "Explicit%cOpen", + word_sep); + break; + case DISK_ZONE_COND_CLOSED: + snprintf(tmpstr, sizeof(tmpstr), "Closed"); + break; + case DISK_ZONE_COND_READONLY: + snprintf(tmpstr, sizeof(tmpstr), "Readonly"); + break; + case DISK_ZONE_COND_FULL: + snprintf(tmpstr, sizeof(tmpstr), "Full"); + break; + case DISK_ZONE_COND_OFFLINE: + snprintf(tmpstr, sizeof(tmpstr), "Offline"); + break; + default: + snprintf(tmpstr, sizeof(tmpstr), "%#x", + entry->zone_condition); + break; + } + + printf("%*s, ", field_widths[ZONE_FW_COND], tmpstr); + + if (entry->zone_flags & DISK_ZONE_FLAG_NON_SEQ) + snprintf(tmpstr, sizeof(tmpstr), "Non%cSequential", + word_sep); + else + snprintf(tmpstr, sizeof(tmpstr), "Sequential"); + + printf("%*s, ", field_widths[ZONE_FW_SEQ], tmpstr); + + if (entry->zone_flags & DISK_ZONE_FLAG_RESET) + snprintf(tmpstr, sizeof(tmpstr), "Reset%cNeeded", + word_sep); + else + snprintf(tmpstr, sizeof(tmpstr), "No%cReset%cNeeded", + word_sep, word_sep); + + printf("%*s\n", field_widths[ZONE_FW_RESET], tmpstr); + + next_lba = entry->zone_start_lba + entry->zone_length; + } +bailout: + report->starting_id = next_lba; + + return (status); +} + +int +main(int argc, char **argv) +{ + int c; + int all_zones = 0; + int error = 0; + int action = -1, rep_option = -1; + int fd = -1; + uint64_t lba = 0; + zone_output_flags out_flags = ZONE_OF_NORMAL; + char *filename = NULL; + struct disk_zone_args zone_args; + struct disk_zone_rep_entry *entries = NULL; + uint32_t num_entries = 16384; + zone_print_status zp_status; + int first_pass = 1; + size_t entry_alloc_size; + int open_flags = O_RDONLY; + + while ((c = getopt(argc, argv, "ac:d:hl:o:P:?")) != -1) { + switch (c) { + case 'a': + all_zones = 1; + break; + case 'c': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_cmd_map, + (sizeof(zone_cmd_map) / sizeof(zone_cmd_map[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + action = zone_cmd_map[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "zone command", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'd': + filename = strdup(optarg); + if (filename == NULL) + err(1, "Unable to allocate memory for " + "filename"); + break; + case 'l': { + char *endptr; + + lba = strtoull(optarg, &endptr, 0); + if (*endptr != '\0') { + warnx("%s: invalid lba argument %s", __func__, + optarg); + error = 1; + goto bailout; + } + break; + } + case 'o': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_rep_opts, + (sizeof(zone_rep_opts) / + sizeof(zone_rep_opts[0])), + optarg, &entry_num, SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + rep_option = zone_rep_opts[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "report zones", + optarg); + error = 1; + goto bailout; + } + break; + } + case 'P': { + scsi_nv_status status; + int entry_num; + + status = scsi_get_nv(zone_print_opts, + (sizeof(zone_print_opts) / + sizeof(zone_print_opts[0])), optarg, &entry_num, + SCSI_NV_FLAG_IG_CASE); + if (status == SCSI_NV_FOUND) + out_flags = zone_print_opts[entry_num].value; + else { + warnx("%s: %s: %s option %s", __func__, + (status == SCSI_NV_AMBIGUOUS) ? + "ambiguous" : "invalid", "print", + optarg); + error = 1; + goto bailout; + } + break; + } + default: + error = 1; + case 'h': /*FALLTHROUGH*/ + usage(error); + goto bailout; + break; /*NOTREACHED*/ + } + } + + if (filename == NULL) { + warnx("You must specify a device with -d"); + error = 1; + } + if (action == -1) { + warnx("You must specify an action with -c"); + error = 1; + } + + if (error != 0) { + usage(error); + goto bailout; + } + + bzero(&zone_args, sizeof(zone_args)); + + zone_args.zone_cmd = action; + + switch (action) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: + open_flags = O_RDWR; + zone_args.zone_params.rwp.id = lba; + if (all_zones != 0) + zone_args.zone_params.rwp.flags |= + DISK_ZONE_RWP_FLAG_ALL; + break; + case DISK_ZONE_REPORT_ZONES: { + entry_alloc_size = num_entries * + sizeof(struct disk_zone_rep_entry); + entries = malloc(entry_alloc_size); + if (entries == NULL) { + warn("Could not allocate %zu bytes", + entry_alloc_size); + error = 1; + goto bailout; + } + zone_args.zone_params.report.entries_allocated = num_entries; + zone_args.zone_params.report.entries = entries; + zone_args.zone_params.report.starting_id = lba; + if (rep_option != -1) + zone_args.zone_params.report.rep_options = rep_option; + break; + } + case DISK_ZONE_GET_PARAMS: + break; + default: + warnx("Unknown action %d", action); + error = 1; + goto bailout; + break; /*NOTREACHED*/ + } + + fd = open(filename, open_flags); + if (fd == -1) { + warn("Unable to open device %s", filename); + error = 1; + goto bailout; + } +next_chunk: + error = ioctl(fd, DIOCZONECMD, &zone_args); + if (error == -1) { + warn("DIOCZONECMD ioctl failed"); + error = 1; + goto bailout; + } + + switch (action) { + case DISK_ZONE_OPEN: + case DISK_ZONE_CLOSE: + case DISK_ZONE_FINISH: + case DISK_ZONE_RWP: + break; + case DISK_ZONE_REPORT_ZONES: + zp_status = zonectl_print_rz(&zone_args.zone_params.report, + out_flags, first_pass); + if (zp_status == ZONE_PRINT_MORE_DATA) { + first_pass = 0; + bzero(entries, entry_alloc_size); + zone_args.zone_params.report.entries_filled = 0; + goto next_chunk; + } else if (zp_status == ZONE_PRINT_ERROR) + error = 1; + break; + case DISK_ZONE_GET_PARAMS: + zonectl_print_params(&zone_args.zone_params.disk_params); + break; + default: + warnx("Unknown action %d", action); + error = 1; + goto bailout; + break; /*NOTREACHED*/ + } +bailout: + free(entries); + + if (fd != -1) + close(fd); + exit (error); +} Property changes on: head/usr.sbin/zonectl/zonectl.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property