diff --git a/share/man/man4/smartpqi.4 b/share/man/man4/smartpqi.4 index fbe435ca3a7f..3e61ba85cc1a 100644 --- a/share/man/man4/smartpqi.4 +++ b/share/man/man4/smartpqi.4 @@ -1,103 +1,96 @@ -.\" Copyright (c) 2018 Murthy Bhat -.\" All rights reserved. +.\" Copyright (C) 2019-2023, Microchip Technology Inc. and its subsidiaries +.\" Copyright (C) 2016-2018, Microsemi Corporation +.\" Copyright (C) 2016, PMC-Sierra, Inc. +.\" Written by John Hall .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.\" $FreeBSD$ stable/10/share/man/man4/smartpqi.4 195614 2017-01-11 08:10:18Z jkim $ -.Dd April 6, 2018 +.\" $Id$ +.Dd $Mdocdate$ .Dt SMARTPQI 4 .Os .Sh NAME .Nm smartpqi -.Nd Microsemi smartpqi SCSI driver for PQI controllers +.Nd "Microchip Smart Storage SCSI driver" .Sh SYNOPSIS -To compile this driver into the kernel, -place the following lines in your -kernel configuration file: +To compile this driver into the kernel, place these lines in the kernel +configuration file: .Bd -ragged -offset indent .Cd device pci .Cd device scbus .Cd device smartpqi .Ed .Pp -Alternatively, to load the driver as a -module at boot time, place the following line in +The driver can be loaded as a module at boot time by placing this line in .Xr loader.conf 5 : .Bd -literal -offset indent smartpqi_load="YES" .Ed .Sh DESCRIPTION The .Nm -SCSI driver provides support for the new generation of PQI controllers from -Microsemi. -The -.Nm -driver is the first SCSI driver to implement the PQI queuing model. -.Pp -The -.Nm -driver will replace the aacraid driver for Adaptec Series 9 controllers. -.Pp -The -.Pa /dev/smartpqi? -device nodes provide access to the management interface of the controller. -One node exists per installed card. +driver provides support for Microchip Technology Inc. / Adaptec SmartRaid and +SmartHBA SATA/SAS/NVME PCIe controllers .Sh HARDWARE Controllers supported by the .Nm -driver include: +driver include, but not limited to: .Pp .Bl -bullet -compact .It HPE Gen10 Smart Array Controller Family .It -OEM Controllers based on the Microsemi Chipset +Adaptec SmartRaid and SmartHBA Controllers +.It +OEM Controllers based on the Microchip Technology Inc. SmartROC +and SmartIOC Chipsets .El .Sh FILES -.Bl -tag -width /boot/kernel/aac.ko -compact +.Bl -tag -width /boot/kernel/smartpqi.ko -compact .It Pa /dev/smartpqi? smartpqi management interface .El +.Sh NOTES +.Ss Configuration +To configure a Microchip Smart Storage controller, +refer to the User Guide for the controller, +which can be found by searching for the specific controller at +https://www.microchip.com/design-centers/storage .Sh SEE ALSO .Xr kld 4 , .Xr linux 4 , .Xr pass 4 , .Xr scsi 4 , .Xr xpt 4 , .Xr loader.conf 5 , .Xr camcontrol 8 , .Xr kldload 8 -.Rs -.%T "Microsemi Website" -.%U https://www.microsemi.com/ -.Re .Sh HISTORY The .Nm driver first appeared in .Fx 11.1 . .Sh AUTHORS -.An Murthy Bhat -.Aq murthy.bhat@microsemi.com +.An John Hall +.Aq john.hall@microchip.com .Sh BUGS The controller is not actually paused on suspend/resume. diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64 index 5ad0447f847d..e1499b19bb12 100644 --- a/sys/conf/files.amd64 +++ b/sys/conf/files.amd64 @@ -1,475 +1,476 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # # common files stuff between i386 and amd64 include "conf/files.x86" # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # elf-vdso.so.o standard \ dependency "$S/amd64/amd64/sigtramp.S assym.inc $S/conf/vdso_amd64.ldscript $S/tools/amd64_vdso.sh" \ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \ no-implicit-rule before-depend \ clean "elf-vdso.so.o elf-vdso.so.1 vdso_offsets.h sigtramp.pico" # elf-vdso32.so.o optional compat_freebsd32 \ dependency "$S/amd64/ia32/ia32_sigtramp.S ia32_assym.h $S/conf/vdso_amd64_ia32.ldscript $S/tools/amd64_ia32_vdso.sh" \ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \ no-implicit-rule before-depend \ clean "elf-vdso32.so.o elf-vdso32.so.1 vdso_ia32_offsets.h ia32_sigtramp.pico" # ia32_genassym.o standard \ dependency "$S/compat/ia32/ia32_genassym.c offset.inc" \ compile-with "${CC} ${CFLAGS:N-flto:N-fno-common} -fcommon -c ${.IMPSRC}" \ no-obj no-implicit-rule \ clean "ia32_genassym.o" # ia32_assym.h standard \ dependency "$S/kern/genassym.sh ia32_genassym.o" \ compile-with "env NM='${NM}' NMFLAGS='${NMFLAGS}' sh $S/kern/genassym.sh ia32_genassym.o > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "ia32_assym.h" # amd64/acpica/acpi_machdep.c optional acpi amd64/acpica/acpi_wakeup.c optional acpi acpi_wakecode.o optional acpi \ dependency "$S/amd64/acpica/acpi_wakecode.S assym.inc" \ compile-with "${NORMAL_S}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.o" acpi_wakecode.bin optional acpi \ dependency "acpi_wakecode.o" \ compile-with "${OBJCOPY} -S -O binary acpi_wakecode.o ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.bin" acpi_wakecode.h optional acpi \ dependency "acpi_wakecode.bin" \ compile-with "file2c -sx 'static char wakecode[] = {' '};' < acpi_wakecode.bin > ${.TARGET}" \ no-obj no-implicit-rule before-depend \ clean "acpi_wakecode.h" acpi_wakedata.h optional acpi \ dependency "acpi_wakecode.o" \ compile-with '${NM} -n --defined-only acpi_wakecode.o | while read offset dummy what; do echo "#define $${what} 0x$${offset}"; done > ${.TARGET}' \ no-obj no-implicit-rule before-depend \ clean "acpi_wakedata.h" # #amd64/amd64/apic_vector.S standard amd64/amd64/bios.c standard amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/copyout.c standard amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb amd64/amd64/efirt_machdep.c optional efirt amd64/amd64/efirt_support.S optional efirt amd64/amd64/elf_machdep.c standard amd64/amd64/exception.S standard amd64/amd64/exec_machdep.c standard amd64/amd64/fpu.c standard amd64/amd64/gdb_machdep.c optional gdb amd64/amd64/initcpu.c standard amd64/amd64/io.c optional io amd64/amd64/locore.S standard no-obj amd64/amd64/xen-locore.S optional xenhvm \ compile-with "${NORMAL_S} -g0" \ no-ctfconvert amd64/amd64/machdep.c standard amd64/amd64/mem.c optional mem amd64/amd64/minidump_machdep.c standard amd64/amd64/mp_machdep.c optional smp amd64/amd64/mpboot.S optional smp amd64/amd64/pmap.c standard amd64/amd64/ptrace_machdep.c standard amd64/amd64/support.S standard amd64/amd64/sys_machdep.c standard amd64/amd64/trap.c standard amd64/amd64/uio_machdep.c standard amd64/amd64/uma_machdep.c standard amd64/amd64/vm_machdep.c standard amd64/pci/pci_cfgreg.c optional pci cddl/dev/dtrace/amd64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/amd64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" crypto/aesni/aeskeys_amd64.S optional aesni crypto/des/des_enc.c optional netsmb crypto/openssl/amd64/aes-gcm-avx512.S optional ossl crypto/openssl/amd64/aesni-x86_64.S optional ossl crypto/openssl/amd64/aesni-gcm-x86_64.S optional ossl crypto/openssl/amd64/chacha-x86_64.S optional ossl crypto/openssl/amd64/ghash-x86_64.S optional ossl crypto/openssl/amd64/poly1305-x86_64.S optional ossl crypto/openssl/amd64/sha1-x86_64.S optional ossl crypto/openssl/amd64/sha256-x86_64.S optional ossl crypto/openssl/amd64/sha512-x86_64.S optional ossl crypto/openssl/amd64/ossl_aes_gcm.c optional ossl dev/acpi_support/acpi_wmi_if.m standard dev/agp/agp_amd64.c optional agp dev/agp/agp_i810.c optional agp dev/agp/agp_via.c optional agp dev/amdgpio/amdgpio.c optional amdgpio dev/axgbe/if_axgbe_pci.c optional axp dev/axgbe/xgbe-desc.c optional axp dev/axgbe/xgbe-dev.c optional axp dev/axgbe/xgbe-drv.c optional axp dev/axgbe/xgbe-mdio.c optional axp dev/axgbe/xgbe-sysctl.c optional axp dev/axgbe/xgbe-txrx.c optional axp dev/axgbe/xgbe_osdep.c optional axp dev/axgbe/xgbe-i2c.c optional axp dev/axgbe/xgbe-phy-v2.c optional axp dev/enic/enic_res.c optional enic dev/enic/enic_txrx.c optional enic dev/enic/if_enic.c optional enic dev/enic/vnic_cq.c optional enic dev/enic/vnic_dev.c optional enic dev/enic/vnic_intr.c optional enic dev/enic/vnic_rq.c optional enic dev/enic/vnic_wq.c optional enic dev/ftgpio/ftgpio.c optional ftgpio superio dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv dev/iavf/if_iavf_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_lib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_osdep.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_txrx_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_common.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_adminq.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_vc_common.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/iavf/iavf_vc_iflib.c optional iavf pci \ compile-with "${NORMAL_C} -I$S/dev/iavf" dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/ice_ddp_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01031e00 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.30.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.30.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/ioat/ioat.c optional ioat pci dev/ioat/ioat_test.c optional ioat pci dev/ixl/if_ixl.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_main.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iflib.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_qmgr.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_iov.c optional ixl pci pci_iov \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_pf_i2c.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/ixl_txrx.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_osdep.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_lan_hmc.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_hmc.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_common.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_nvm.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_adminq.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/ixl/i40e_dcb.c optional ixl pci \ compile-with "${NORMAL_C} -I$S/dev/ixl" dev/nctgpio/nctgpio.c optional nctgpio dev/ncthwm/ncthwm.c optional ncthwm superio dev/nfe/if_nfe.c optional nfe pci dev/ntb/if_ntb/if_ntb.c optional if_ntb dev/ntb/ntb_transport.c optional ntb_transport | if_ntb dev/ntb/ntb.c optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_if.m optional ntb | ntb_transport | if_ntb | ntb_hw_amd | ntb_hw_intel | ntb_hw_plx | ntb_hw dev/ntb/ntb_hw/ntb_hw_amd.c optional ntb_hw_amd | ntb_hw dev/ntb/ntb_hw/ntb_hw_intel.c optional ntb_hw_intel | ntb_hw dev/ntb/ntb_hw/ntb_hw_plx.c optional ntb_hw_plx | ntb_hw dev/ntb/test/ntb_tool.c optional ntb_tool dev/nvram/nvram.c optional nvram isa dev/random/ivy.c optional rdrand_rng !random_loadable dev/random/nehemiah.c optional padlock_rng !random_loadable dev/qlxge/qls_dbg.c optional qlxge pci dev/qlxge/qls_dump.c optional qlxge pci dev/qlxge/qls_hw.c optional qlxge pci dev/qlxge/qls_ioctl.c optional qlxge pci dev/qlxge/qls_isr.c optional qlxge pci dev/qlxge/qls_os.c optional qlxge pci dev/qlxgb/qla_dbg.c optional qlxgb pci dev/qlxgb/qla_hw.c optional qlxgb pci dev/qlxgb/qla_ioctl.c optional qlxgb pci dev/qlxgb/qla_isr.c optional qlxgb pci dev/qlxgb/qla_misc.c optional qlxgb pci dev/qlxgb/qla_os.c optional qlxgb pci dev/qlxgbe/ql_dbg.c optional qlxgbe pci dev/qlxgbe/ql_hw.c optional qlxgbe pci dev/qlxgbe/ql_ioctl.c optional qlxgbe pci dev/qlxgbe/ql_isr.c optional qlxgbe pci dev/qlxgbe/ql_misc.c optional qlxgbe pci dev/qlxgbe/ql_os.c optional qlxgbe pci dev/qlxgbe/ql_reset.c optional qlxgbe pci dev/qlxgbe/ql_fw.c optional qlxgbe pci dev/qlxgbe/ql_boot.c optional qlxgbe pci dev/qlxgbe/ql_minidump.c optional qlxgbe pci dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_sriov.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_vf.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_ll2.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_iwarp.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_rdma.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_roce.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/ecore_ooo.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_rdma.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci \ compile-with "${LINUXKPI_C}" dev/sfxge/common/ef10_ev.c optional sfxge pci dev/sfxge/common/ef10_filter.c optional sfxge pci dev/sfxge/common/ef10_image.c optional sfxge pci dev/sfxge/common/ef10_intr.c optional sfxge pci dev/sfxge/common/ef10_mac.c optional sfxge pci dev/sfxge/common/ef10_mcdi.c optional sfxge pci dev/sfxge/common/ef10_nic.c optional sfxge pci dev/sfxge/common/ef10_nvram.c optional sfxge pci dev/sfxge/common/ef10_phy.c optional sfxge pci dev/sfxge/common/ef10_rx.c optional sfxge pci dev/sfxge/common/ef10_tx.c optional sfxge pci dev/sfxge/common/ef10_vpd.c optional sfxge pci dev/sfxge/common/efx_bootcfg.c optional sfxge pci dev/sfxge/common/efx_crc32.c optional sfxge pci dev/sfxge/common/efx_ev.c optional sfxge pci dev/sfxge/common/efx_filter.c optional sfxge pci dev/sfxge/common/efx_hash.c optional sfxge pci dev/sfxge/common/efx_intr.c optional sfxge pci dev/sfxge/common/efx_lic.c optional sfxge pci dev/sfxge/common/efx_mac.c optional sfxge pci dev/sfxge/common/efx_mcdi.c optional sfxge pci dev/sfxge/common/efx_mon.c optional sfxge pci dev/sfxge/common/efx_nic.c optional sfxge pci dev/sfxge/common/efx_nvram.c optional sfxge pci dev/sfxge/common/efx_phy.c optional sfxge pci dev/sfxge/common/efx_port.c optional sfxge pci dev/sfxge/common/efx_rx.c optional sfxge pci dev/sfxge/common/efx_sram.c optional sfxge pci dev/sfxge/common/efx_tunnel.c optional sfxge pci dev/sfxge/common/efx_tx.c optional sfxge pci dev/sfxge/common/efx_vpd.c optional sfxge pci dev/sfxge/common/hunt_nic.c optional sfxge pci dev/sfxge/common/mcdi_mon.c optional sfxge pci dev/sfxge/common/medford_nic.c optional sfxge pci dev/sfxge/common/medford2_nic.c optional sfxge pci dev/sfxge/common/siena_mac.c optional sfxge pci dev/sfxge/common/siena_mcdi.c optional sfxge pci dev/sfxge/common/siena_nic.c optional sfxge pci dev/sfxge/common/siena_nvram.c optional sfxge pci dev/sfxge/common/siena_phy.c optional sfxge pci dev/sfxge/common/siena_sram.c optional sfxge pci dev/sfxge/common/siena_vpd.c optional sfxge pci dev/sfxge/sfxge.c optional sfxge pci dev/sfxge/sfxge_dma.c optional sfxge pci dev/sfxge/sfxge_ev.c optional sfxge pci dev/sfxge/sfxge_intr.c optional sfxge pci dev/sfxge/sfxge_mcdi.c optional sfxge pci dev/sfxge/sfxge_nvram.c optional sfxge pci dev/sfxge/sfxge_port.c optional sfxge pci dev/sfxge/sfxge_rx.c optional sfxge pci dev/sfxge/sfxge_tx.c optional sfxge pci dev/smartpqi/smartpqi_cam.c optional smartpqi dev/smartpqi/smartpqi_cmd.c optional smartpqi dev/smartpqi/smartpqi_discovery.c optional smartpqi dev/smartpqi/smartpqi_event.c optional smartpqi +dev/smartpqi/smartpqi_features.c optional smartpqi dev/smartpqi/smartpqi_helper.c optional smartpqi dev/smartpqi/smartpqi_init.c optional smartpqi dev/smartpqi/smartpqi_intr.c optional smartpqi dev/smartpqi/smartpqi_ioctl.c optional smartpqi dev/smartpqi/smartpqi_main.c optional smartpqi dev/smartpqi/smartpqi_mem.c optional smartpqi dev/smartpqi/smartpqi_misc.c optional smartpqi dev/smartpqi/smartpqi_queue.c optional smartpqi dev/smartpqi/smartpqi_request.c optional smartpqi dev/smartpqi/smartpqi_response.c optional smartpqi dev/smartpqi/smartpqi_sis.c optional smartpqi dev/smartpqi/smartpqi_tag.c optional smartpqi dev/speaker/spkr.c optional speaker dev/sume/if_sume.c optional sume dev/superio/superio.c optional superio isa dev/syscons/apm/apm_saver.c optional apm_saver apm dev/syscons/scvesactl.c optional sc vga vesa dev/syscons/scvgarndr.c optional sc vga dev/tpm/tpm.c optional tpm dev/tpm/tpm20.c optional tpm dev/tpm/tpm_crb.c optional tpm acpi dev/tpm/tpm_tis.c optional tpm acpi dev/tpm/tpm_acpi.c optional tpm acpi dev/tpm/tpm_isa.c optional tpm isa dev/uart/uart_cpu_x86.c optional uart dev/viawd/viawd.c optional viawd dev/vmd/vmd.c optional vmd | vmd_bus dev/wbwd/wbwd.c optional wbwd dev/wdatwd/wdatwd.c optional wdatwd dev/p2sb/p2sb.c optional p2sb pci dev/p2sb/lewisburg_gpiocm.c optional lbggpiocm p2sb dev/p2sb/lewisburg_gpio.c optional lbggpio lbggpiocm isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/imgact_aout.c optional compat_aout kern/link_elf_obj.c standard # # IA32 binary support # #amd64/ia32/ia32_exception.S optional compat_freebsd32 amd64/ia32/ia32_reg.c optional compat_freebsd32 amd64/ia32/ia32_signal.c optional compat_freebsd32 amd64/ia32/ia32_syscall.c optional compat_freebsd32 amd64/ia32/ia32_misc.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 # # x86 real mode BIOS emulator, required by dpms/pci/vesa # compat/x86bios/x86bios.c optional x86bios | dpms | pci | vesa contrib/x86emu/x86emu.c optional x86bios | dpms | pci | vesa # Common files where we currently configure the system differently, but perhaps shouldn't # config(8) doesn't have a way to force standard options, so we've been inconsistent # about marking non-optional things 'standard'. x86/acpica/madt.c optional acpi x86/isa/atpic.c optional atpic isa x86/isa/elcr.c optional atpic isa | mptable x86/isa/isa.c standard x86/isa/isa_dma.c standard x86/pci/pci_early_quirks.c optional pci x86/x86/io_apic.c standard x86/x86/local_apic.c standard x86/x86/mptable.c optional mptable x86/x86/mptable_pci.c optional mptable pci x86/x86/msi.c optional pci x86/xen/pv.c optional xenhvm # zfs blake3 hash support contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_avx2.S optional zfs compile-with "${ZFS_S}" contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_avx512.S optional zfs compile-with "${ZFS_S}" contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse2.S optional zfs compile-with "${ZFS_S}" contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse41.S optional zfs compile-with "${ZFS_S}" # zfs sha2 hash support zfs-sha256-x86_64.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha256-x86_64.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha256-x86_64.S" \ no-implicit-rule \ clean "zfs-sha256-x86_64.o" zfs-sha512-x86_64.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha512-x86_64.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha512-x86_64.S" \ no-implicit-rule \ clean "zfs-sha512-x86_64.o" # zfs checksums / zcommon contrib/openzfs/module/zcommon/zfs_fletcher_avx512.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_intel.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zcommon/zfs_fletcher_sse.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512bw.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_avx512f.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_sse2.c optional zfs compile-with "${ZFS_C}" contrib/openzfs/module/zfs/vdev_raidz_math_ssse3.c optional zfs compile-with "${ZFS_C}" # Clock calibration subroutine; uses floating-point arithmetic subr_clockcalib.o standard \ dependency "$S/kern/subr_clockcalib.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc} ${WERROR} -mmmx -msse -msse2 ${.IMPSRC}" \ no-implicit-rule \ clean "subr_clockcalib.o" diff --git a/sys/dev/smartpqi/smartpqi_cam.c b/sys/dev/smartpqi/smartpqi_cam.c index 96e1dc10729e..ffdd9fd7da79 100644 --- a/sys/dev/smartpqi/smartpqi_cam.c +++ b/sys/dev/smartpqi/smartpqi_cam.c @@ -1,1353 +1,1391 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * CAM interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Set cam sim properties of the smartpqi adapter. */ static void update_sim_properties(struct cam_sim *sim, struct ccb_pathinq *cpi) { pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; cpi->hba_eng_cnt = 0; cpi->max_lun = PQI_MAX_MULTILUN; - cpi->max_target = 1088; + cpi->max_target = MAX_TARGET_DEVICES; cpi->maxio = (softs->pqi_cap.max_sg_elem - 1) * PAGE_SIZE; cpi->initiator_id = 255; - strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); - strlcpy(cpi->hba_vid, "Microsemi", HBA_IDLEN); - strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN-1); + cpi->sim_vid[sizeof(cpi->sim_vid)-1] = '\0'; + strncpy(cpi->hba_vid, "Microsemi", HBA_IDLEN-1); + cpi->hba_vid[sizeof(cpi->hba_vid)-1] = '\0'; + strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN-1); + cpi->dev_name[sizeof(cpi->dev_name)-1] = '\0'; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 1200000; /* Base bus speed in KB/sec */ cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC4; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->ccb_h.status = CAM_REQ_CMP; cpi->hba_vendor = pci_get_vendor(dev); cpi->hba_device = pci_get_device(dev); cpi->hba_subvendor = pci_get_subvendor(dev); cpi->hba_subdevice = pci_get_subdevice(dev); DBG_FUNC("OUT\n"); } /* - * Get transport settings of the smartpqi adapter + * Get transport settings of the smartpqi adapter. */ static void get_transport_settings(struct pqisrc_softstate *softs, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; DBG_FUNC("IN\n"); - + cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC4; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; sas->valid = CTS_SAS_VALID_SPEED; cts->ccb_h.status = CAM_REQ_CMP; DBG_FUNC("OUT\n"); } /* * Add the target to CAM layer and rescan, when a new device is found */ void os_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { union ccb *ccb; + uint64_t lun; DBG_FUNC("IN\n"); - if(softs->os_specific.sim_registered) { + lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun; + if(softs->os_specific.sim_registered) { if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { DBG_ERR("rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), - device->target, device->lun) != CAM_REQ_CMP) { + device->target, lun) != CAM_REQ_CMP) { DBG_ERR("rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); } DBG_FUNC("OUT\n"); } /* * Remove the device from CAM layer when deleted or hot removed */ void os_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { - struct cam_path *tmppath; + struct cam_path *tmppath = NULL; + uint64_t lun; DBG_FUNC("IN\n"); - + + lun = (device->is_multi_lun) ? CAM_LUN_WILDCARD : device->lun; if(softs->os_specific.sim_registered) { - if (xpt_create_path(&tmppath, NULL, + if (xpt_create_path(&tmppath, NULL, cam_sim_path(softs->os_specific.sim), - device->target, device->lun) != CAM_REQ_CMP) { - DBG_ERR("unable to create path for async event"); + device->target, lun) != CAM_REQ_CMP) { + DBG_ERR("unable to create path for async event\n"); return; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); - softs->device_list[device->target][device->lun] = NULL; + /* softs->device_list[device->target][device->lun] = NULL; */ + int index = pqisrc_find_device_list_index(softs,device); + if (index >= 0 && index < PQI_MAX_DEVICES) + softs->dev_list[index] = NULL; pqisrc_free_device(softs, device); } DBG_FUNC("OUT\n"); } /* * Function to release the frozen simq */ static void pqi_release_camq(rcb_t *rcb) { pqisrc_softstate_t *softs; struct ccb_scsiio *csio; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; softs = rcb->softs; DBG_FUNC("IN\n"); if (softs->os_specific.pqi_flags & PQI_FLAG_BUSY) { softs->os_specific.pqi_flags &= ~PQI_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } DBG_FUNC("OUT\n"); } static void pqi_synch_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN rcb = %p\n", rcb); if (!(rcb->cm_flags & PQI_CMD_MAPPED)) return; if (rcb->bcount != 0 ) { - if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) + if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, - rcb->cm_datamap, - BUS_DMASYNC_POSTREAD); - if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) + rcb->cm_datamap,BUS_DMASYNC_POSTREAD); + if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, - rcb->cm_datamap, - BUS_DMASYNC_POSTWRITE); + rcb->cm_datamap,BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(softs->os_specific.pqi_buffer_dmat, - rcb->cm_datamap); + rcb->cm_datamap); } rcb->cm_flags &= ~PQI_CMD_MAPPED; if(rcb->sgt && rcb->nseg) os_mem_free(rcb->softs, (void*)rcb->sgt, - rcb->nseg*sizeof(sgt_t)); + rcb->nseg*sizeof(sgt_t)); DBG_IO("OUT\n"); } /* * Function to dma-unmap the completed request */ static inline void pqi_unmap_request(rcb_t *rcb) { DBG_IO("IN rcb = %p\n", rcb); pqi_synch_request(rcb); pqisrc_put_tag(&rcb->softs->taglist, rcb->tag); DBG_IO("OUT\n"); } /* * Construct meaningful LD name for volume here. */ static void smartpqi_fix_ld_inquiry(pqisrc_softstate_t *softs, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq = NULL; uint8_t *cdb = NULL; pqi_scsi_dev_t *device = NULL; DBG_FUNC("IN\n"); if (pqisrc_ctrl_offline(softs)) return; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; + if(cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; - device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; + /* device = softs->device_list[csio->ccb_h.target_id][csio->ccb_h.target_lun]; */ + int target = csio->ccb_h.target_id; + int lun = csio->ccb_h.target_lun; + int index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun); + if (index != INVALID_ELEM) + device = softs->dev_list[index]; /* Let the disks be probed and dealt with via CAM. Only for LD let it fall through and inquiry be tweaked */ - if (!device || !pqisrc_is_logical_device(device) || - (device->devtype != DISK_DEVICE) || + if( !device || !pqisrc_is_logical_device(device) || + (device->devtype != DISK_DEVICE) || pqisrc_is_external_raid_device(device)) { return; } strncpy(inq->vendor, device->vendor, - SID_VENDOR_SIZE); + SID_VENDOR_SIZE-1); + inq->vendor[sizeof(inq->vendor)-1] = '\0'; strncpy(inq->product, pqisrc_raidlevel_to_string(device->raid_level), - SID_PRODUCT_SIZE); + SID_PRODUCT_SIZE-1); + inq->product[sizeof(inq->product)-1] = '\0'; strncpy(inq->revision, device->volume_offline?"OFF":"OK", - SID_REVISION_SIZE); + SID_REVISION_SIZE-1); + inq->revision[sizeof(inq->revision)-1] = '\0'; } DBG_FUNC("OUT\n"); } static void pqi_complete_scsi_io(struct ccb_scsiio *csio, rcb_t *rcb) { uint32_t release_tag; pqisrc_softstate_t *softs = rcb->softs; DBG_IO("IN scsi io = %p\n", csio); pqi_synch_request(rcb); smartpqi_fix_ld_inquiry(rcb->softs, csio); pqi_release_camq(rcb); release_tag = rcb->tag; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, release_tag); xpt_done((union ccb *)csio); DBG_FUNC("OUT\n"); } /* * Handle completion of a command - pass results back through the CCB */ void os_io_response_success(rcb_t *rcb) { struct ccb_scsiio *csio; DBG_IO("IN rcb = %p\n", rcb); if (rcb == NULL) panic("rcb is null"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; csio->ccb_h.status = CAM_REQ_CMP; pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void copy_sense_data_to_csio(struct ccb_scsiio *csio, uint8_t *sense_data, uint16_t sense_data_len) { DBG_IO("IN csio = %p\n", csio); memset(&csio->sense_data, 0, csio->sense_len); sense_data_len = (sense_data_len > csio->sense_len) ? csio->sense_len : sense_data_len; if (sense_data) memcpy(&csio->sense_data, sense_data, sense_data_len); if (csio->sense_len > sense_data_len) csio->sense_resid = csio->sense_len - sense_data_len; else csio->sense_resid = 0; DBG_IO("OUT\n"); } /* * Error response handling for raid IO */ void os_raid_response_error(rcb_t *rcb, raid_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; csio->ccb_h.status = CAM_REQ_CMP_ERR; if (!err_info || !rcb->dvp) { DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } csio->scsi_status = err_info->status; if (csio->ccb_h.func_code == XPT_SCSI_IO) { /* * Handle specific SCSI status values. */ switch(csio->scsi_status) { case PQI_RAID_STATUS_QUEUE_FULL: csio->ccb_h.status = CAM_REQ_CMP; DBG_ERR("Queue Full error\n"); break; /* check condition, sense data included */ case PQI_RAID_STATUS_CHECK_CONDITION: { uint16_t sense_data_len = LE_16(err_info->sense_data_len); uint8_t *sense_data = NULL; if (sense_data_len) sense_data = err_info->data; + copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR - | CAM_AUTOSNS_VALID - | CAM_REQ_CMP_ERR; + | CAM_AUTOSNS_VALID + | CAM_REQ_CMP_ERR; } break; case PQI_RAID_DATA_IN_OUT_UNDERFLOW: { uint32_t resid = 0; resid = rcb->bcount-err_info->data_out_transferred; csio->resid = resid; csio->ccb_h.status = CAM_REQ_CMP; } break; default: csio->ccb_h.status = CAM_REQ_CMP; break; } } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } /* * Error response handling for aio. */ void os_aio_response_error(rcb_t *rcb, aio_path_error_info_elem_t *err_info) { struct ccb_scsiio *csio; pqisrc_softstate_t *softs; DBG_IO("IN\n"); if (rcb == NULL) panic("rcb is null"); - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; csio = (struct ccb_scsiio *)&rcb->cm_ccb->csio; if (csio == NULL) panic("csio is null"); softs = rcb->softs; if (!err_info || !rcb->dvp) { csio->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("couldn't be accessed! error info = %p, rcb->dvp = %p\n", err_info, rcb->dvp); goto error_out; } switch (err_info->service_resp) { case PQI_AIO_SERV_RESPONSE_COMPLETE: csio->ccb_h.status = err_info->status; break; case PQI_AIO_SERV_RESPONSE_FAILURE: switch(err_info->status) { case PQI_AIO_STATUS_IO_ABORTED: csio->ccb_h.status = CAM_REQ_ABORTED; DBG_WARN_BTL(rcb->dvp, "IO aborted\n"); break; case PQI_AIO_STATUS_UNDERRUN: csio->ccb_h.status = CAM_REQ_CMP; csio->resid = LE_32(err_info->resd_count); break; case PQI_AIO_STATUS_OVERRUN: csio->ccb_h.status = CAM_REQ_CMP; break; case PQI_AIO_STATUS_AIO_PATH_DISABLED: DBG_WARN_BTL(rcb->dvp,"AIO Path Disabled\n"); /* Timed out TMF response comes here */ if (rcb->tm_req) { rcb->req_pending = false; - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; DBG_ERR("AIO Disabled for TMF\n"); return; } rcb->dvp->aio_enabled = false; rcb->dvp->offload_enabled = false; csio->ccb_h.status |= CAM_REQUEUE_REQ; break; case PQI_AIO_STATUS_IO_ERROR: case PQI_AIO_STATUS_IO_NO_DEVICE: case PQI_AIO_STATUS_INVALID_DEVICE: default: DBG_WARN_BTL(rcb->dvp,"IO Error/Invalid/No device\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } break; case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_COMPLETE) ? "COMPLETE" : "SUCCEEDED"); - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; rcb->req_pending = false; return; case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: DBG_ERR("PQI_AIO_SERV_RESPONSE_TMF %s\n", (err_info->service_resp == PQI_AIO_SERV_RESPONSE_TMF_REJECTED) ? "REJECTED" : "INCORRECT LUN"); - rcb->status = REQUEST_FAILED; + rcb->status = PQI_STATUS_TIMEOUT; rcb->req_pending = false; return; default: DBG_WARN_BTL(rcb->dvp,"Scsi Status Error\n"); csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; } if(err_info->data_pres == DATA_PRESENT_SENSE_DATA ) { csio->scsi_status = PQI_AIO_STATUS_CHECK_CONDITION; uint8_t *sense_data = NULL; unsigned sense_data_len = LE_16(err_info->data_len); if (sense_data_len) sense_data = err_info->data; DBG_INFO("SCSI_STATUS_CHECK_COND sense size %u\n", sense_data_len); copy_sense_data_to_csio(csio, sense_data, sense_data_len); csio->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } error_out: pqi_complete_scsi_io(csio, rcb); DBG_IO("OUT\n"); } static void pqi_freeze_ccb(union ccb *ccb) { if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void pqi_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { rcb_t *rcb = (rcb_t *)arg; pqisrc_softstate_t *softs = rcb->softs; union ccb *ccb; - if (error || nseg > softs->pqi_cap.max_sg_elem) { - DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%d)\n", + if (error || nseg > softs->pqi_cap.max_sg_elem) + { + DBG_ERR_BTL(rcb->dvp, "map failed err = %d or nseg(%d) > sgelem(%u)\n", error, nseg, softs->pqi_cap.max_sg_elem); goto error_io; } rcb->sgt = os_mem_alloc(softs, nseg * sizeof(sgt_t)); if (!rcb->sgt) { DBG_ERR_BTL(rcb->dvp, "os_mem_alloc() failed; nseg = %d\n", nseg); goto error_io; } rcb->nseg = nseg; for (int i = 0; i < nseg; i++) { rcb->sgt[i].addr = segs[i].ds_addr; rcb->sgt[i].len = segs[i].ds_len; rcb->sgt[i].flags = 0; } - if (rcb->data_dir == SOP_DATA_DIR_FROM_DEVICE) - bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, - rcb->cm_datamap, BUS_DMASYNC_PREREAD); - if (rcb->data_dir == SOP_DATA_DIR_TO_DEVICE) - bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, - rcb->cm_datamap, BUS_DMASYNC_PREWRITE); + if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) + bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, + rcb->cm_datamap, BUS_DMASYNC_PREREAD); + if ((rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) + bus_dmamap_sync(softs->os_specific.pqi_buffer_dmat, + rcb->cm_datamap, BUS_DMASYNC_PREWRITE); /* Call IO functions depending on pd or ld */ - rcb->status = REQUEST_PENDING; + rcb->status = PQI_STATUS_FAILURE; error = pqisrc_build_send_io(softs, rcb); if (error) { rcb->req_pending = false; DBG_ERR_BTL(rcb->dvp, "Build IO failed, error = %d\n", error); } else { /* Successfully IO was submitted to the device. */ return; } error_io: ccb = rcb->cm_ccb; ccb->ccb_h.status = CAM_RESRC_UNAVAIL; pqi_freeze_ccb(ccb); pqi_unmap_request(rcb); xpt_done(ccb); return; } /* * Function to dma-map the request buffer */ static int pqi_map_request(rcb_t *rcb) { pqisrc_softstate_t *softs = rcb->softs; int bsd_status = BSD_SUCCESS; union ccb *ccb = rcb->cm_ccb; DBG_FUNC("IN\n"); /* check that mapping is necessary */ if (rcb->cm_flags & PQI_CMD_MAPPED) return BSD_SUCCESS; rcb->cm_flags |= PQI_CMD_MAPPED; if (rcb->bcount) { bsd_status = bus_dmamap_load_ccb(softs->os_specific.pqi_buffer_dmat, rcb->cm_datamap, ccb, pqi_request_map_helper, rcb, 0); if (bsd_status != BSD_SUCCESS && bsd_status != EINPROGRESS) { - DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %d\n", + DBG_ERR_BTL(rcb->dvp, "bus_dmamap_load_ccb failed, return status = %d transfer length = %u\n", bsd_status, rcb->bcount); return bsd_status; } } else { /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ /* Call IO functions depending on pd or ld */ - rcb->status = REQUEST_PENDING; + rcb->status = PQI_STATUS_FAILURE; if (pqisrc_build_send_io(softs, rcb) != PQI_STATUS_SUCCESS) { bsd_status = EIO; } } DBG_FUNC("OUT error = %d\n", bsd_status); return bsd_status; } /* * Function to clear the request control block */ void os_reset_rcb(rcb_t *rcb) { rcb->error_info = NULL; rcb->req = NULL; rcb->status = -1; rcb->tag = INVALID_ELEM; rcb->dvp = NULL; rcb->cdbp = NULL; rcb->softs = NULL; rcb->cm_flags = 0; rcb->cm_data = NULL; rcb->bcount = 0; rcb->nseg = 0; rcb->sgt = NULL; rcb->cm_ccb = NULL; rcb->encrypt_enable = false; rcb->ioaccel_handle = 0; rcb->resp_qid = 0; rcb->req_pending = false; rcb->tm_req = false; } /* * Callback function for the lun rescan */ static void smartpqi_lunrescan_cb(struct cam_periph *periph, union ccb *ccb) { xpt_free_path(ccb->ccb_h.path); xpt_free_ccb(ccb); } /* * Function to rescan the lun */ static void smartpqi_lun_rescan(struct pqisrc_softstate *softs, int target, int lun) { union ccb *ccb = NULL; cam_status status = 0; struct cam_path *path = NULL; DBG_FUNC("IN\n"); ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("Unable to alloc ccb for lun rescan\n"); return; } status = xpt_create_path(&path, NULL, cam_sim_path(softs->os_specific.sim), target, lun); if (status != CAM_REQ_CMP) { DBG_ERR("xpt_create_path status(%d) != CAM_REQ_CMP \n", status); xpt_free_ccb(ccb); return; } - bzero(ccb, sizeof(union ccb)); + memset(ccb, 0, sizeof(union ccb)); xpt_setup_ccb(&ccb->ccb_h, path, 5); ccb->ccb_h.func_code = XPT_SCAN_LUN; ccb->ccb_h.cbfcnp = smartpqi_lunrescan_cb; ccb->crcn.flags = CAM_FLAG_NONE; xpt_action(ccb); DBG_FUNC("OUT\n"); } /* * Function to rescan the lun under each target */ void smartpqi_target_rescan(struct pqisrc_softstate *softs) { - int target = 0, lun = 0; + pqi_scsi_dev_t *device; + int index; DBG_FUNC("IN\n"); - for(target = 0; target < PQI_MAX_DEVICES; target++){ - for(lun = 0; lun < PQI_MAX_MULTILUN; lun++){ - if(softs->device_list[target][lun]){ - smartpqi_lun_rescan(softs, target, lun); - } + for(index = 0; index < PQI_MAX_DEVICES; index++){ + /* if(softs->device_list[target][lun]){ */ + if(softs->dev_list[index] != NULL) { + device = softs->dev_list[index]; + DBG_INFO("calling smartpqi_lun_rescan with TL = %d:%d\n",device->target,device->lun); + smartpqi_lun_rescan(softs, device->target, device->lun); } } DBG_FUNC("OUT\n"); } /* * Set the mode of tagged command queueing for the current task. */ uint8_t os_get_task_attr(rcb_t *rcb) { union ccb *ccb = rcb->cm_ccb; uint8_t tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE; break; case MSG_ORDERED_Q_TAG: tag_action = SOP_TASK_ATTRIBUTE_ORDERED; break; case MSG_SIMPLE_Q_TAG: default: tag_action = SOP_TASK_ATTRIBUTE_SIMPLE; break; } return tag_action; } /* * Complete all outstanding commands */ void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *softs) { int tag = 0; - pqi_scsi_dev_t *dvp = NULL; + pqi_scsi_dev_t *dvp = NULL; DBG_FUNC("IN\n"); for (tag = 1; tag <= softs->max_outstanding_io; tag++) { rcb_t *prcb = &softs->rcb[tag]; dvp = prcb->dvp; if(prcb->req_pending && prcb->cm_ccb ) { prcb->req_pending = false; prcb->cm_ccb->ccb_h.status = CAM_REQ_ABORTED | CAM_REQ_CMP; pqi_complete_scsi_io(&prcb->cm_ccb->csio, prcb); if (dvp) pqisrc_decrement_device_active_io(softs, dvp); - } } DBG_FUNC("OUT\n"); } /* * IO handling functionality entry point */ static int pqisrc_io_start(struct cam_sim *sim, union ccb *ccb) { rcb_t *rcb; - uint32_t tag, no_transfer = 0; + uint32_t tag; pqisrc_softstate_t *softs = (struct pqisrc_softstate *) cam_sim_softc(sim); int32_t error; pqi_scsi_dev_t *dvp; + int target, lun, index; DBG_FUNC("IN\n"); - if (softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL) { + /* if( softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun] == NULL ) { */ + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + index = pqisrc_find_btl_list_index(softs,softs->bus_id,target,lun); + + if (index == INVALID_ELEM) { + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + DBG_INFO("Invalid index/device!!!, Device BTL %u:%d:%d\n", softs->bus_id, target, lun); + return ENXIO; + } + + if( softs->dev_list[index] == NULL ) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not there\n", ccb->ccb_h.target_id); return ENXIO; } - dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; + /* DBG_INFO("starting IO on BTL = %d:%d:%d index = %d\n",softs->bus_id,target,lun,index); */ + + /* dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */ + dvp = softs->dev_list[index]; /* Check controller state */ if (IN_PQI_RESET(softs)) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET | CAM_BUSY | CAM_REQ_INPROG; DBG_WARN("Device = %d BUSY/IN_RESET\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device state */ if (pqisrc_ctrl_offline(softs) || DEV_GONE(dvp)) { ccb->ccb_h.status = CAM_DEV_NOT_THERE | CAM_REQ_CMP; DBG_WARN("Device = %d GONE/OFFLINE\n", ccb->ccb_h.target_id); return ENXIO; } /* Check device reset */ if (DEVICE_RESET(dvp)) { ccb->ccb_h.status = CAM_SCSI_BUSY | CAM_REQ_INPROG | CAM_BUSY; DBG_WARN("Device %d reset returned busy\n", ccb->ccb_h.target_id); return EBUSY; } if (dvp->expose_device == false) { ccb->ccb_h.status = CAM_DEV_NOT_THERE; DBG_INFO("Device = %d not exposed\n", ccb->ccb_h.target_id); return ENXIO; } tag = pqisrc_get_tag(&softs->taglist); - if (tag == INVALID_ELEM) { + if( tag == INVALID_ELEM ) { DBG_ERR("Get Tag failed\n"); xpt_freeze_simq(softs->os_specific.sim, 1); softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; ccb->ccb_h.status |= (CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ); return EIO; } - DBG_IO("tag = %d &softs->taglist : %p\n", tag, &softs->taglist); + DBG_IO("tag = %u &softs->taglist : %p\n", tag, &softs->taglist); rcb = &softs->rcb[tag]; os_reset_rcb(rcb); rcb->tag = tag; rcb->softs = softs; rcb->cmdlen = ccb->csio.cdb_len; ccb->ccb_h.sim_priv.entries[0].ptr = rcb; - switch (ccb->ccb_h.flags & CAM_DIR_MASK) { - case CAM_DIR_IN: - rcb->data_dir = SOP_DATA_DIR_FROM_DEVICE; - break; - case CAM_DIR_OUT: - rcb->data_dir = SOP_DATA_DIR_TO_DEVICE; - break; - case CAM_DIR_NONE: - no_transfer = 1; - break; - default: - DBG_ERR("Unknown Dir\n"); - break; - } rcb->cm_ccb = ccb; - rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; + /* rcb->dvp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */ + rcb->dvp = softs->dev_list[index]; + + rcb->cm_data = (void *)ccb->csio.data_ptr; + rcb->bcount = ccb->csio.dxfer_len; - if (!no_transfer) { - rcb->cm_data = (void *)ccb->csio.data_ptr; - rcb->bcount = ccb->csio.dxfer_len; - } else { - rcb->cm_data = NULL; - rcb->bcount = 0; - } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = pqi_map_request(rcb)) != BSD_SUCCESS) { xpt_freeze_simq(softs->os_specific.sim, 1); if (error == EINPROGRESS) { /* Release simq in the completion */ softs->os_specific.pqi_flags |= PQI_FLAG_BUSY; error = BSD_SUCCESS; } else { rcb->req_pending = false; ccb->ccb_h.status |= CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; DBG_WARN("Requeue req error = %d target = %d\n", error, ccb->ccb_h.target_id); pqi_unmap_request(rcb); error = EIO; } } DBG_FUNC("OUT error = %d\n", error); return error; } static inline int pqi_tmf_status_to_bsd_tmf_status(int pqi_status, rcb_t *rcb) { if (PQI_STATUS_SUCCESS == pqi_status && - REQUEST_SUCCESS == rcb->status) + PQI_STATUS_SUCCESS == rcb->status) return BSD_SUCCESS; else return EIO; } /* * Abort a task, task management functionality */ static int pqisrc_scsi_abort_task(pqisrc_softstate_t *softs, union ccb *ccb) { - struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; + struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *prcb = ccb->ccb_h.sim_priv.entries[0].ptr; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; - if (!rcb->dvp) { + if (rcb->dvp == NULL) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, prcb, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK); if ((rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb)) == BSD_SUCCESS) ccb->ccb_h.status = CAM_REQ_ABORTED; error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Abort a taskset, task management functionality */ static int pqisrc_scsi_abort_task_set(pqisrc_softstate_t *softs, union ccb *ccb) { struct ccb_hdr *ccb_h = &ccb->ccb_h; rcb_t *rcb = NULL; uint32_t tag; int rval; DBG_FUNC("IN\n"); tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; + rcb->cm_ccb = ccb; - if (!rcb->dvp) { + if (rcb->dvp == NULL) { DBG_ERR("dvp is null, tmf type : 0x%x\n", ccb_h->func_code); rval = ENXIO; goto error_tmf; } rcb->tm_req = true; rval = pqisrc_send_tmf(softs, rcb->dvp, rcb, NULL, SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); error_tmf: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * Target reset task management functionality */ static int pqisrc_target_reset( pqisrc_softstate_t *softs, union ccb *ccb) { + + /* pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; */ struct ccb_hdr *ccb_h = &ccb->ccb_h; - pqi_scsi_dev_t *devp = softs->device_list[ccb->ccb_h.target_id][ccb->ccb_h.target_lun]; rcb_t *rcb = NULL; uint32_t tag; int rval; + int bus, target, lun; + int index; + DBG_FUNC("IN\n"); + bus = softs->bus_id; + target = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + + index = pqisrc_find_btl_list_index(softs,bus,target,lun); + if (index == INVALID_ELEM) { + DBG_ERR("device not found at BTL %d:%d:%d\n",bus,target,lun); + return (-1); + } + + pqi_scsi_dev_t *devp = softs->dev_list[index]; if (devp == NULL) { DBG_ERR("bad target %d, tmf type : 0x%x\n", ccb_h->target_id, ccb_h->func_code); - return ENXIO; + return (-1); } tag = pqisrc_get_tag(&softs->taglist); rcb = &softs->rcb[tag]; rcb->tag = tag; - - devp->reset_in_progress = true; + rcb->cm_ccb = ccb; rcb->tm_req = true; rval = pqisrc_send_tmf(softs, devp, rcb, NULL, SOP_TASK_MANAGEMENT_LUN_RESET); rval = pqi_tmf_status_to_bsd_tmf_status(rval, rcb); + devp->reset_in_progress = false; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); DBG_FUNC("OUT rval = %d\n", rval); return rval; } /* * cam entry point of the smartpqi module. */ static void smartpqi_cam_action(struct cam_sim *sim, union ccb *ccb) { struct pqisrc_softstate *softs = cam_sim_softc(sim); struct ccb_hdr *ccb_h = &ccb->ccb_h; DBG_FUNC("IN\n"); switch (ccb_h->func_code) { case XPT_SCSI_IO: { if(!pqisrc_io_start(sim, ccb)) { return; } break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQ_INVALID; break; } cam_calc_geometry(ccg, /* extended */ 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_PATH_INQ: { update_sim_properties(sim, &ccb->cpi); ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: get_transport_settings(softs, &ccb->cts); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: if(pqisrc_scsi_abort_task(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); DBG_ERR("Abort task failed on %d\n", ccb->ccb_h.target_id); return; } break; case XPT_TERM_IO: if (pqisrc_scsi_abort_task_set(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Abort task set failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } break; case XPT_RESET_DEV: if(pqisrc_target_reset(softs, ccb)) { ccb->ccb_h.status = CAM_REQ_CMP_ERR; DBG_ERR("Target reset failed on %d\n", ccb->ccb_h.target_id); xpt_done(ccb); return; } else { ccb->ccb_h.status = CAM_REQ_CMP; } break; case XPT_RESET_BUS: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; return; default: DBG_WARN("UNSUPPORTED FUNC CODE\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } xpt_done(ccb); DBG_FUNC("OUT\n"); } /* * Function to poll the response, when interrupts are unavailable * This also serves supporting crash dump. */ static void smartpqi_poll(struct cam_sim *sim) { struct pqisrc_softstate *softs = cam_sim_softc(sim); int i; for (i = 1; i < softs->intr_count; i++ ) pqisrc_process_response_queue(softs, i); } /* * Function to adjust the queue depth of a device */ void smartpqi_adjust_queue_depth(struct cam_path *path, uint32_t queue_depth) { struct ccb_relsim crs; - DBG_INFO("IN\n"); + DBG_FUNC("IN\n"); - memset(&crs, 0, sizeof(crs)); + memset(&crs, 0, sizeof(struct ccb_relsim)); xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = queue_depth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed stat=%d\n", crs.ccb_h.status); } - DBG_INFO("OUT\n"); + DBG_FUNC("OUT\n"); } /* * Function to register async callback for setting queue depth */ static void smartpqi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct pqisrc_softstate *softs; softs = (struct pqisrc_softstate*)callback_arg; DBG_FUNC("IN\n"); switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } uint32_t t_id = cgd->ccb_h.target_id; - if (t_id <= (PQI_CTLR_INDEX - 1)) { + /* if (t_id <= (PQI_CTLR_INDEX - 1)) { */ + if (t_id >= PQI_CTLR_INDEX) { if (softs != NULL) { - pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; - if (dvp == NULL) { - DBG_ERR("Target is null, target id=%d\n", t_id); - break; + /* pqi_scsi_dev_t *dvp = softs->device_list[t_id][cgd->ccb_h.target_lun]; */ + int lun = cgd->ccb_h.target_lun; + int index = pqisrc_find_btl_list_index(softs,softs->bus_id,t_id,lun); + if (index != INVALID_ELEM) { + pqi_scsi_dev_t *dvp = softs->dev_list[index]; + if (dvp == NULL) { + DBG_ERR("Target is null, target id=%u\n", t_id); + break; + } + smartpqi_adjust_queue_depth(path, dvp->queue_depth); } - smartpqi_adjust_queue_depth(path, - dvp->queue_depth); } } break; } default: break; } DBG_FUNC("OUT\n"); } /* * Function to register sim with CAM layer for smartpqi driver */ int register_sim(struct pqisrc_softstate *softs, int card_index) { int max_transactions; union ccb *ccb = NULL; - int error; + cam_status status = 0; struct ccb_setasync csa; struct cam_sim *sim; DBG_FUNC("IN\n"); max_transactions = softs->max_io_for_scsi_ml; softs->os_specific.devq = cam_simq_alloc(max_transactions); if (softs->os_specific.devq == NULL) { DBG_ERR("cam_simq_alloc failed txns = %d\n", max_transactions); return ENOMEM; } sim = cam_sim_alloc(smartpqi_cam_action, \ smartpqi_poll, "smartpqi", softs, \ card_index, &softs->os_specific.cam_lock, \ 1, max_transactions, softs->os_specific.devq); if (sim == NULL) { DBG_ERR("cam_sim_alloc failed txns = %d\n", max_transactions); cam_simq_free(softs->os_specific.devq); return ENOMEM; } softs->os_specific.sim = sim; mtx_lock(&softs->os_specific.cam_lock); - error = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); - if (error != CAM_SUCCESS) { - DBG_ERR("xpt_bus_register failed errno %d\n", error); + status = xpt_bus_register(sim, softs->os_specific.pqi_dev, 0); + if (status != CAM_SUCCESS) { + DBG_ERR("xpt_bus_register failed status=%d\n", status); cam_sim_free(softs->os_specific.sim, FALSE); cam_simq_free(softs->os_specific.devq); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } softs->os_specific.sim_registered = TRUE; ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { DBG_ERR("xpt_create_path failed\n"); return ENXIO; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(softs->os_specific.sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { DBG_ERR("xpt_create_path failed\n"); xpt_free_ccb(ccb); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); cam_sim_free(softs->os_specific.sim, TRUE); mtx_unlock(&softs->os_specific.cam_lock); return ENXIO; } /* - * Callback to set the queue depth per target which is + * Callback to set the queue depth per target which is * derived from the FW. - */ + */ softs->os_specific.path = ccb->ccb_h.path; - memset(&csa, 0, sizeof(csa)); + memset(&csa, 0, sizeof(struct ccb_setasync)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { - DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", + DBG_ERR("Unable to register smartpqi_aysnc handler: %d!\n", csa.ccb_h.status); } mtx_unlock(&softs->os_specific.cam_lock); - DBG_INFO("OUT\n"); + DBG_FUNC("OUT\n"); return BSD_SUCCESS; } /* * Function to deregister smartpqi sim from cam layer */ void deregister_sim(struct pqisrc_softstate *softs) { struct ccb_setasync csa; - + DBG_FUNC("IN\n"); if (softs->os_specific.mtx_init) { mtx_lock(&softs->os_specific.cam_lock); } - - memset(&csa, 0, sizeof(csa)); + memset(&csa, 0, sizeof(struct ccb_setasync)); xpt_setup_ccb(&csa.ccb_h, softs->os_specific.path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = smartpqi_async; csa.callback_arg = softs; xpt_action((union ccb *)&csa); xpt_free_path(softs->os_specific.path); if (softs->os_specific.sim) { xpt_release_simq(softs->os_specific.sim, 0); xpt_bus_deregister(cam_sim_path(softs->os_specific.sim)); softs->os_specific.sim_registered = FALSE; cam_sim_free(softs->os_specific.sim, FALSE); softs->os_specific.sim = NULL; } if (softs->os_specific.mtx_init) { mtx_unlock(&softs->os_specific.cam_lock); } if (softs->os_specific.devq != NULL) { cam_simq_free(softs->os_specific.devq); } if (softs->os_specific.mtx_init) { mtx_destroy(&softs->os_specific.cam_lock); softs->os_specific.mtx_init = FALSE; } mtx_destroy(&softs->os_specific.map_lock); DBG_FUNC("OUT\n"); } void os_rescan_target(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { - struct cam_path *tmppath; + struct cam_path *tmppath = NULL; - DBG_FUNC("IN\n"); + DBG_FUNC("IN\n"); - if(softs->os_specific.sim_registered) { - if (xpt_create_path(&tmppath, NULL, - cam_sim_path(softs->os_specific.sim), - device->target, device->lun) != CAM_REQ_CMP) { - DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n", - device->bus, device->target, device->lun); - return; - } - xpt_async(AC_INQ_CHANGED, tmppath, NULL); - xpt_free_path(tmppath); - } + if(softs->os_specific.sim_registered) { + if (xpt_create_path(&tmppath, NULL, + cam_sim_path(softs->os_specific.sim), + device->target, device->lun) != CAM_REQ_CMP) { + DBG_ERR("unable to create path for async event!!! Bus: %d Target: %d Lun: %d\n", + device->bus, device->target, device->lun); + return; + } + xpt_async(AC_INQ_CHANGED, tmppath, NULL); + xpt_free_path(tmppath); + } - device->scsi_rescan = false; + device->scsi_rescan = false; - DBG_FUNC("OUT\n"); + DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_cmd.c index f5820647fed4..8486ac12df79 100644 --- a/sys/dev/smartpqi/smartpqi_cmd.c +++ b/sys/dev/smartpqi/smartpqi_cmd.c @@ -1,74 +1,78 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" /* * Function to submit the request to the adapter. */ int pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req) { char *slot = NULL; uint32_t offset; iu_header_t *hdr = (iu_header_t *)req; + /*TODO : Can be fixed a size copying of IU ? */ uint32_t iu_len = hdr->iu_length + 4 ; /* header size */ int i = 0; DBG_FUNC("IN\n"); + /* The code below assumes we only take 1 element (no spanning) */ + ASSERT(iu_len <= ib_q->elem_size); + PQI_LOCK(&ib_q->lock); /* Check queue full */ if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) { DBG_WARN("OUT Q full\n"); PQI_UNLOCK(&ib_q->lock); return PQI_STATUS_QFULL; } /* Get the slot */ offset = ib_q->pi_local * ib_q->elem_size; slot = ib_q->array_virt_addr + offset; /* Copy the IU */ memcpy(slot, req, iu_len); - DBG_INFO("IU : \n"); + DBG_IO("IU : \n"); for(i = 0; i< iu_len; i++) - DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i))); + DBG_IO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i))); /* Update the local PI */ ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem; - DBG_INFO("ib_q->pi_local : %x IU size : %d\n", + DBG_IO("ib_q->pi_local : %x IU size : %d\n", ib_q->pi_local, hdr->iu_length); - DBG_INFO("*ib_q->ci_virt_addr: %x\n", + DBG_IO("*ib_q->ci_virt_addr: %x\n", *(ib_q->ci_virt_addr)); /* Inform the fw about the new IU */ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); PQI_UNLOCK(&ib_q->lock); DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } diff --git a/sys/dev/smartpqi/smartpqi_defines.h b/sys/dev/smartpqi/smartpqi_defines.h index 20a9fc841140..bb0bb2b709aa 100644 --- a/sys/dev/smartpqi/smartpqi_defines.h +++ b/sys/dev/smartpqi/smartpqi_defines.h @@ -1,1168 +1,1333 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _PQI_DEFINES_H #define _PQI_DEFINES_H -#define PQI_STATUS_FAILURE -1 -#define PQI_STATUS_TIMEOUT -2 -#define PQI_STATUS_QFULL -3 -#define PQI_STATUS_SUCCESS 0 +#define SIS_POLL_WAIT +#define DEVICE_HINT + +#ifndef CT_ASSERT +/* If the OS hasn't specified a preferred compile time assert, create one */ +#if !defined(__C_ASSERT__) + #define CT_ASSERT(e) extern char __assert_test_case[1 - (2*(!(e)))] +#else + #define CT_ASSERT(e) typedef char __C_ASSERT__[(e)?1:-1] +#endif +#endif +#define PQI_STATUS_FAILURE -1 +#define PQI_STATUS_TIMEOUT -2 +#define PQI_STATUS_QFULL -3 +#define PQI_STATUS_SUCCESS 0 + +#define BITS_PER_BYTE 8 +#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 +#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 +#define PQI_REQUEST_HEADER_LENGTH 4 /* Maximum timeout for internal command completion */ -#define TIMEOUT_INFINITE ((uint32_t) (-1)) -#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE +#define TIMEOUT_INFINITE ((uint32_t) (-1)) +#define PQISRC_CMD_TIMEOUT TIMEOUT_INFINITE #define PQISRC_PASSTHROUGH_CMD_TIMEOUT PQISRC_CMD_TIMEOUT /* Delay in milli seconds */ -#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000) +#define PQISRC_TMF_TIMEOUT (OS_TMF_TIMEOUT_SEC * 1000) /* Delay in micro seconds */ -#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */ +#define PQISRC_PENDING_IO_TIMEOUT_USEC 30000000 /* 30 seconds */ /* If want to disable atomic operations on device active io, then set to zero */ -#define PQISRC_DEVICE_IO_COUNTER 1 +#define PQISRC_DEVICE_IO_COUNTER 1 + +/* #define SHARE_EVENT_QUEUE_FOR_IO 1 */ -#define INVALID_ELEM 0xffff +#define INVALID_ELEM 0xffff #ifndef MIN -#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define MIN(a,b) ((a) < (b) ? (a) : (b)) #endif #ifndef MAX -#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#define MAX(a,b) ((a) > (b) ? (a) : (b)) +#endif + +/* defines for stream detection */ +#define TICKS ticks + +#ifndef INT_MAX +#define INT_MAX 0x7FFFFFFF #endif -#define PQISRC_ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y)) -#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) +#define PQISRC_ROUND_UP(x, y) (((x) + (y) - 1) / (y) * (y)) +#define PQISRC_ROUND_DOWN(x, y) (((x) / (y)) * (y)) +#define PQISRC_DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) + +#if !defined(offsetofend) +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) +#endif #define ALIGN_BOUNDARY(a, n) { \ if (a % n) \ a = a + (n - a % n); \ } /* Busy wait timeout on a condition */ #define COND_BUSYWAIT(cond, timeout /* in millisecond */) { \ if (!(cond)) { \ while (timeout) { \ OS_BUSYWAIT(1000); \ if (cond) \ break; \ timeout--; \ } \ } \ } /* Wait timeout on a condition*/ #define COND_WAIT(cond, timeout /* in millisecond */) { \ if (!(cond)) { \ while (timeout) { \ OS_SLEEP(1000); \ if (cond) \ break; \ timeout--; \ } \ } \ } #define FILL_QUEUE_ARRAY_ADDR(q,virt,dma) { \ q->array_virt_addr = virt; \ q->array_dma_addr = dma; \ } #define true 1 #define false 0 enum INTR_TYPE { LOCK_INTR, LOCK_SLEEP }; -#define LOCKNAME_SIZE 32 +#define LOCKNAME_SIZE 32 #define INTR_TYPE_NONE 0x0 #define INTR_TYPE_FIXED 0x1 #define INTR_TYPE_MSI 0x2 #define INTR_TYPE_MSIX 0x4 #define SIS_ENABLE_MSIX 0x40 #define SIS_ENABLE_INTX 0x80 #define PQISRC_LEGACY_INTX_MASK 0x1 -#define DMA_TO_VIRT(mem) ((mem)->virt_addr) -#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff) -#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32) - +#define DMA_TO_VIRT(mem) ((mem)->virt_addr) +#define DMA_PHYS_LOW(mem) (((mem)->dma_addr) & 0x00000000ffffffff) +#define DMA_PHYS_HIGH(mem) ((((mem)->dma_addr) & 0xffffffff00000000) >> 32) -typedef enum REQUEST_STATUS { - REQUEST_SUCCESS = 0, - REQUEST_PENDING = -1, - REQUEST_FAILED = -2, -}REQUEST_STATUS_T; typedef enum IO_PATH { + UNKNOWN_PATH, AIO_PATH, RAID_PATH }IO_PATH_T; typedef enum device_type { DISK_DEVICE, TAPE_DEVICE, ROM_DEVICE = 5, SES_DEVICE, CONTROLLER_DEVICE, MEDIUM_CHANGER_DEVICE, RAID_DEVICE = 0x0c, ENCLOSURE_DEVICE, ZBC_DEVICE = 0x14 } device_type_t; typedef enum controller_state { PQI_UP_RUNNING, PQI_BUS_RESET, }controller_state_t; #define PQISRC_MAX_MSIX_SUPPORTED 64 /* SIS Specific */ #define PQISRC_INIT_STRUCT_REVISION 9 #define PQISRC_SECTOR_SIZE 512 #define PQISRC_BLK_SIZE PQISRC_SECTOR_SIZE #define PQISRC_DEFAULT_DMA_ALIGN 4 #define PQISRC_DMA_ALIGN_MASK (PQISRC_DEFAULT_DMA_ALIGN - 1) #define PQISRC_ERR_BUF_DMA_ALIGN 32 #define PQISRC_ERR_BUF_ELEM_SIZE MAX(sizeof(raid_path_error_info_elem_t),sizeof(aio_path_error_info_elem_t)) #define PQISRC_INIT_STRUCT_DMA_ALIGN 16 #define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19 #define SIS_CMD_GET_COMM_PREFERRED_SETTINGS 0x26 #define SIS_CMD_GET_PQI_CAPABILITIES 0x3000 #define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b #define SIS_SUPPORT_EXT_OPT 0x00800000 #define SIS_SUPPORT_PQI 0x00000004 #define SIS_SUPPORT_PQI_RESET_QUIESCE 0x00000008 #define SIS_PQI_RESET_QUIESCE 0x1000000 #define SIS_STATUS_OK_TIMEOUT 120000 /* in milli sec, 5 sec */ #define SIS_CMD_COMPLETE_TIMEOUT 30000 /* in milli sec, 30 secs */ #define SIS_POLL_START_WAIT_TIME 20000 /* in micro sec, 20 milli sec */ #define SIS_DB_BIT_CLEAR_TIMEOUT_CNT 120000 /* 500usec * 120000 = 60 sec */ #define SIS_ENABLE_TIMEOUT 3000 #define REENABLE_SIS 0x1 #define TRIGGER_NMI_SIS 0x800000 /*SIS Register status defines */ #define PQI_CTRL_KERNEL_UP_AND_RUNNING 0x80 #define PQI_CTRL_KERNEL_PANIC 0x100 -#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF -#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000 -#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */ -#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */ +#define SIS_CTL_TO_HOST_DB_DISABLE_ALL 0xFFFFFFFF +#define SIS_CTL_TO_HOST_DB_CLEAR 0x00001000 +#define SIS_CMD_SUBMIT 0x00000200 /* Bit 9 */ +#define SIS_CMD_COMPLETE 0x00001000 /* Bit 12 */ #define SIS_CMD_STATUS_SUCCESS 0x1 /* PQI specific */ /* defines */ -#define PQISRC_PQI_REG_OFFSET 0x4000 -#define PQISRC_MAX_OUTSTANDING_REQ 4096 -#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16 -#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16 - - +#define PQISRC_PQI_REG_OFFSET 0x4000 -#define PQI_MIN_OP_IB_QUEUE_ID 1 -#define PQI_OP_EVENT_QUEUE_ID 1 -#define PQI_MIN_OP_OB_QUEUE_ID 2 -#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128 +/* Number of Queues this driver compile can potentially support */ +#define PQISRC_MAX_SUPPORTED_OP_IB_Q 128 #define PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q (PQISRC_MAX_SUPPORTED_OP_IB_Q / 2) #define PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q (PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q) -#define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q) -#define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ -#define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2 -#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64 -#define PQISRC_OP_MAX_IBQ_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */ -#define PQISRC_OP_MIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */ -#define PQISRC_OP_OBQ_ELEM_SIZE 1 /* 16 bytes */ -#define PQISRC_ADMIN_IBQ_ELEM_SIZE 2 /* 2 * 16 = 32 bytes */ -#define PQISRC_INTR_COALSC_GRAN 0 -#define PQISRC_PROTO_BIT_MASK 0 -#define PQISRC_SGL_SUPPORTED_BIT_MASK 0 - -#define PQISRC_NUM_EVENT_Q_ELEM 32 -#define PQISRC_EVENT_Q_ELEM_SIZE 32 +#define PQISRC_MAX_SUPPORTED_OP_OB_Q 64 + + +/* PQI Capability maxes (from controller) */ +#define PQISRC_MAX_ELEMENTS 8192 +#define PQISRC_OP_MIN_ELEM_SIZE 1 /* 16 bytes */ +#define PQISRC_OP_MAX_ELEM_SIZE 8 /* 8 * 16 = 128 bytes */ +#define PQISRC_MAX_SPANNING_IU_LENGTH 1152 +#define PQISRC_MAX_OUTSTANDING_REQ 4096 +/* #define PQISRC_MAX_OP_IB_QUEUE_ELEM_NUM (PQISRC_MAX_OUTSTANDING_REQ / PQISRC_MAX_SUPPORTED_OP_IB_Q) */ +/* #define PQISRC_MAX_OP_OB_QUEUE_ELEM_NUM PQISRC_MAX_OUTSTANDING_REQ */ +/* #define PQISRC_MIN_OP_OB_QUEUE_ELEM_NUM 2 */ + +#ifdef DEVICE_HINT +#define PQISRC_MIN_OUTSTANDING_REQ (PQI_RESERVED_IO_SLOTS_CNT + OS_MIN_OUTSTANDING_REQ) +#endif + + +/* Queue IDs Enumeration */ +#define PQI_ADMIN_IB_QUEUE_ID 0 +#define PQI_ADMIN_OB_QUEUE_ID 0 +#define PQI_MIN_OP_IB_QUEUE_ID 1 +#define PQI_OP_EVENT_QUEUE_ID 1 +#define PQI_MIN_OP_OB_QUEUE_ID 2 + + +/* PQI IU Element Sizes */ +#define PQISRC_ADMIN_IBQ_ELEM_SIZE_BYTES 64 +#define PQISRC_ADMIN_OBQ_ELEM_SIZE_BYTES 64 +#define PQISRC_OP_IBQ_ELEM_SIZE_BYTES 128 +#define PQISRC_OP_OBQ_ELEM_SIZE_BYTES 16 +#define PQISRC_EVENT_Q_ELEM_SIZE_BYTES 32 + + +/* Number of elements this driver compile will potentially use */ +#define PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM 16 +#define PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM 16 +#define PQISRC_MAX_EVENT_QUEUE_ELEM_NUM 32 +#define PQISRC_MAX_SPANNING_ELEMS 9 + +/* setting maximums for adv aio */ +#define PQISRC_MAX_AIO_RAID5_OR_6_WRITE (8*1024) /* 8 KiB */ +#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV 0x0000 /* No Limit */ +#define PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV 0x0000 /* No Limit */ +#define PQISRC_MAX_AIO_NVME_CRYPTO (32*1024) /* 32 KiB */ +#define PQISRC_MAX_AIO_NO_LIMIT 0x0000 /* No Limit */ +#define PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO 0x0000 /* No Limit */ +#define PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO (32*1024) + +#define SENSE_FEATURES_CRYPTO_OFFSET offsetof(bmic_sense_feature_page_io_aio_subpage_t, max_aio_rw_xfer_crypto_sas_sata) +#define MINIMUM_AIO_SUBPAGE_LENGTH \ + (offsetofend(bmic_sense_feature_page_io_aio_subpage_t, \ + max_aio_write_raid1_10_3drv) - \ + (sizeof(((bmic_sense_feature_page_io_aio_subpage_t *)0)->header))) + +/* Not used or useful yet */ +/* #define PQISRC_INTR_COALSC_GRAN 0 */ +/* #define PQISRC_PROTO_BIT_MASK 0 */ +/* #define PQISRC_SGL_SUPPORTED_BIT_MASK 0 */ + +#define PQISRC_MAX_SUPPORTED_MIRRORS 3 /* PQI Registers state status */ #define PQI_RESET_ACTION_RESET 0x1 #define PQI_RESET_ACTION_COMPLETED 0x2 #define PQI_RESET_TYPE_NO_RESET 0x0 #define PQI_RESET_TYPE_SOFT_RESET 0x1 #define PQI_RESET_TYPE_FIRM_RESET 0x2 #define PQI_RESET_TYPE_HARD_RESET 0x3 #define PQI_RESET_POLL_INTERVAL 100000 /*100 msec*/ enum pqisrc_ctrl_mode{ CTRL_SIS_MODE = 0, CTRL_PQI_MODE }; /* PQI device performing internal initialization (e.g., POST). */ #define PQI_DEV_STATE_POWER_ON_AND_RESET 0x0 /* Upon entry to this state PQI device initialization begins. */ #define PQI_DEV_STATE_PQI_STATUS_AVAILABLE 0x1 /* PQI device Standard registers are available to the driver. */ #define PQI_DEV_STATE_ALL_REGISTERS_READY 0x2 /* PQI device is initialized and ready to process any PCI transactions. */ #define PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY 0x3 /* The PQI Device Error register indicates the error. */ #define PQI_DEV_STATE_ERROR 0x4 #define PQI_DEV_STATE_AT_INIT ( PQI_DEV_STATE_PQI_STATUS_AVAILABLE | \ PQI_DEV_STATE_ALL_REGISTERS_READY | \ PQI_DEV_STATE_ADMIN_QUEUE_PAIR_READY ) #define PQISRC_PQI_DEVICE_SIGNATURE "PQI DREG" -#define PQI_ADMINQ_ELEM_ARRAY_ALIGN 64 -#define PQI_ADMINQ_CI_PI_ALIGN 64 -#define PQI_OPQ_ELEM_ARRAY_ALIGN 64 -#define PQI_OPQ_CI_PI_ALIGN 4 -#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */ -#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */ + +#define PQI_ADDR_ALIGN_MASK_4K 0xFFF/* lsb 12 bits */ +#define PQI_ADDR_ALIGN_MASK_1K 0x3FF/* lsb 10 bits */ +#define PQI_ADDR_ALIGN_MASK_64 0x3F /* lsb 6 bits */ +#define PQI_ADDR_ALIGN_MASK_4 0x3 /* lsb 2 bits */ +#define PQI_ADDR_ALIGN 4096 +#define PQI_ADDR_ALIGN_MASK PQI_ADDR_ALIGN_MASK_4K + + +#define PQI_FORCE_IQ_ELEMENTS 32 /* 4096/128 = 32 (see PQISRC_OP_IBQ_ELEM_SIZE_BYTES) */ +#define PQI_FORCE_OQ_ELEMENTS 256 /* 4096/16 = 256 (see PQISRC_OP_OBQ_ELEM_SIZE_BYTES) */ + +#define PQI_CI_PI_ALIGN 64 +#define PQI_CI_PI_ALIGN_MASK PQI_ADDR_ALIGN_MASK_64 #define PQISRC_PQIMODE_READY_TIMEOUT (30 * 1000 ) /* 30 secs */ #define PQISRC_MODE_READY_POLL_INTERVAL 1000 /* 1 msec */ #define PRINT_PQI_SIGNATURE(sign) { int i = 0; \ char si[9]; \ for(i=0;i<8;i++) \ si[i] = *((char *)&(sign)+i); \ si[i] = '\0'; \ DBG_INFO("Signature is %s",si); \ } #define PQI_CONF_TABLE_MAX_LEN ((uint16_t)~0) #define PQI_CONF_TABLE_SIGNATURE "CFGTABLE" /* PQI configuration table section IDs */ #define PQI_CONF_TABLE_ALL_SECTIONS (-1) -#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0 +#define PQI_CONF_TABLE_SECTION_GENERAL_INFO 0 #define PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES 1 #define PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA 2 -#define PQI_CONF_TABLE_SECTION_DEBUG 3 -#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4 - - -#define PQI_FIRMWARE_FEATURE_OFA 0 -#define PQI_FIRMWARE_FEATURE_SMP 1 -#define PQI_FIRMWARE_FEATURE_MAX_KNOWN 2 -#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_0 3 -#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_1_10 4 -#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_5_50 5 -#define PQI_FIRMWARE_FEATURE_AIO_READ_RAID_6_60 6 -#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_0 7 -#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_1_10 8 -#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_5_50 9 -#define PQI_FIRMWARE_FEATURE_AIO_WRITE_RAID_6_60 10 -#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 -#define PQI_FIRMWARE_FEATURE_SATA_WWN_FOR_DEV_UNIQUE_ID 12 -#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT 13 -#define PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT 14 -#define PQI_FIRMWARE_FEATURE_MAXIMUM 14 - -#define CTRLR_HEARTBEAT_CNT(softs) \ - LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off)) +#define PQI_CONF_TABLE_SECTION_DEBUG 3 +#define PQI_CONF_TABLE_SECTION_HEARTBEAT 4 +#define PQI_CONF_TABLE_SOFT_RESET 5 + +/* PQI feature bits as defined in PQI_SPEC.doc */ +#define PQI_FIRMWARE_FEATURE_OFA 0 +#define PQI_FIRMWARE_FEATURE_SMP 1 +#define PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE 2 +#define PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS 3 +#define PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS 4 +#define PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS 5 +#define PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS 6 +#define PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS 7 +#define PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS 8 +#define PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS 9 +#define PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS 10 +#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 +#define PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN 12 +#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13 +#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14 +#define PQI_FIRMWARE_FEATURE_MAXIMUM 14 +#define PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID 16 + +#define CTRLR_HEARTBEAT_CNT(softs) LE_64(PCI_MEM_GET64(softs, softs->heartbeat_counter_abs_addr, softs->heartbeat_counter_off)) #define PQI_HEARTBEAT_TIMEOUT_SEC (10) /* 10 sec interval */ #define PQI_HOST_WELLNESS_TIMEOUT_SEC (24*3600) /* pqi-2r00a table 36 */ -#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000) +#define PQI_ADMIN_QUEUE_MSIX_DISABLE (0x80000000) #define PQI_ADMIN_QUEUE_MSIX_ENABLE (0 << 31) #define PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR 0x01 #define PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR 0x02 #define PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE 0x00 #define PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT 1000 /* in miLLI sec, 1 sec, 100 ms is standard */ #define PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT 100 /* 100 ms is standard */ #define PQISRC_ADMIN_CMD_RESP_TIMEOUT 3000 /* 3 sec */ #define PQISRC_RAIDPATH_CMD_TIMEOUT 30000 /* 30 sec */ #define REPORT_PQI_DEV_CAP_DATA_BUF_SIZE sizeof(pqi_dev_cap_t) #define REPORT_MANUFACTURER_INFO_DATA_BUF_SIZE 0x80 /* Data buffer size specified in bytes 0-1 of data buffer. 128 bytes. */ /* PQI IUs */ /* Admin IU request length not including header. */ #define PQI_STANDARD_IU_LENGTH 0x003C /* 60 bytes. */ #define PQI_IU_TYPE_GENERAL_ADMIN_REQUEST 0x60 #define PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE 0xe0 /* PQI / Vendor specific IU */ #define PQI_FUNCTION_REPORT_DEV_CAP 0x00 #define PQI_REQUEST_IU_RAID_TASK_MANAGEMENT 0x13 #define PQI_IU_TYPE_RAID_PATH_IO_REQUEST 0x14 #define PQI_IU_TYPE_AIO_PATH_IO_REQUEST 0x15 #define PQI_REQUEST_IU_AIO_TASK_MANAGEMENT 0x16 +#define PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST 0x18 +#define PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST 0x19 +#define PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST 0x1A +#define PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT 0x20 #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 #define PQI_REQUEST_IU_VENDOR_GENERAL 0x75 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 #define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93 #define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0 #define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0 #define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1 #define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2 #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3 #define PQI_RESPONSE_IU_AIO_PATH_IS_OFF 0xf4 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 #define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7 #define PQI_REQUEST_HEADER_LENGTH 4 #define PQI_FUNCTION_CREATE_OPERATIONAL_IQ 0x10 #define PQI_FUNCTION_CREATE_OPERATIONAL_OQ 0x11 #define PQI_FUNCTION_DELETE_OPERATIONAL_IQ 0x12 #define PQI_FUNCTION_DELETE_OPERATIONAL_OQ 0x13 #define PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP 0x14 #define PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO 1 -#define PQI_DEFAULT_IB_QUEUE 0 +#define PQI_DEFAULT_IB_QUEUE 0 #define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 -#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0 -#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1 +#define PQI_VENDOR_RESPONSE_IU_SUCCESS 0 +#define PQI_VENDOR_RESPONSE_IU_UNSUCCESS 1 #define PQI_VENDOR_RESPONSE_IU_INVALID_PARAM 2 -#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3 +#define PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC 3 /* Interface macros */ #define GET_FW_STATUS(softs) \ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad3_fw_status, LEGACY_SIS_OMR)) #define SIS_IS_KERNEL_PANIC(softs) \ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC) #define SIS_IS_KERNEL_UP(softs) \ (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_UP_AND_RUNNING) #define PQI_GET_CTRL_MODE(softs) \ (PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0)) -#define PQI_SAVE_CTRL_MODE(softs, mode) \ - PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode) +#define PQI_SAVE_CTRL_MODE(softs, mode) { \ + PCI_MEM_PUT32(softs, &softs->ioa_reg->scratchpad0, LEGACY_SIS_SCR0, mode); \ + OS_SLEEP(1000); \ + } -#define PQISRC_MAX_TARGETID 1024 -#define PQISRC_MAX_TARGETLUN 64 +#define LEGACY_SIS_SCR_REG_LENGTH 4 +#define LEGACY_SIS_SCR1 LEGACY_SIS_SCR0 + LEGACY_SIS_SCR_REG_LENGTH +#define PQI_GET_CTRL_TYPE(softs) \ + ((PCI_MEM_GET32(softs, &softs->ioa_reg->scratchpad1, LEGACY_SIS_SCR1)) \ + & 0x0000FFFF) + +/* smart raid-hba pqi functional spec, scratchpad register 1 spec */ +#define PQI_CTRL_PRODUCT_ID_GEN1 0x0000 +#define PQI_CTRL_PRODUCT_ID_GEN2_REV_A 0x0007 +#define PQI_CTRL_PRODUCT_ID_GEN2_REV_B 0x0107 + +#define PQISRC_MAX_TARGETID 1024 +#define PQISRC_MAX_TARGETLUN 64 /* Vendor specific IU Type for Event config Cmds */ -#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72 -#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73 -#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 +#define PQI_REQUEST_IU_REPORT_EVENT_CONFIG 0x72 +#define PQI_REQUEST_IU_SET_EVENT_CONFIG 0x73 +#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 + #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 #define PQI_MANAGEMENT_CMD_RESP_TIMEOUT 3000 #define PQISRC_EVENT_ACK_RESP_TIMEOUT 1000 /* Supported Event types by controller */ #define PQI_NUM_SUPPORTED_EVENTS 6 #define PQI_EVENT_TYPE_HOTPLUG 0x1 #define PQI_EVENT_TYPE_HARDWARE 0x2 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe /* for indexing into the pending_events[] field of struct pqisrc_softstate */ #define PQI_EVENT_HOTPLUG 0 #define PQI_EVENT_HARDWARE 1 #define PQI_EVENT_PHYSICAL_DEVICE 2 #define PQI_EVENT_LOGICAL_DEVICE 3 #define PQI_EVENT_AIO_STATE_CHANGE 4 #define PQI_EVENT_AIO_CONFIG_CHANGE 5 /* Device flags */ -#define PQISRC_DFLAG_VALID (1 << 0) -#define PQISRC_DFLAG_CONFIGURING (1 << 1) +#define PQISRC_DFLAG_VALID (1 << 0) +#define PQISRC_DFLAG_CONFIGURING (1 << 1) -#define MAX_EMBEDDED_SG_IN_FIRST_IU 4 -#define MAX_EMBEDDED_SG_IN_IU 8 +#define MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT 4 +#define MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO 3 +#define MAX_EMBEDDED_SG_IN_IU 8 #define SG_FLAG_LAST 0x40000000 #define SG_FLAG_CHAIN 0x80000000 -#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET) -#define DEV_GONE(dev) (!dev || (dev->invalid == true)) -#define IS_AIO_PATH(dev) (dev->aio_enabled) -#define IS_RAID_PATH(dev) (!dev->aio_enabled) +#define IN_PQI_RESET(softs) (softs->ctlr_state & PQI_BUS_RESET) +#define DEV_GONE(dev) (!dev || (dev->invalid == true)) +#define IS_AIO_PATH(dev) (dev->aio_enabled) +#define IS_RAID_PATH(dev) (!dev->aio_enabled) -#define DEVICE_RESET(dvp) (dvp->reset_in_progress) +#define DEVICE_RESET(dvp) (dvp->reset_in_progress) /* SOP data direction flags */ -#define SOP_DATA_DIR_NONE 0x00 -#define SOP_DATA_DIR_FROM_DEVICE 0x01 -#define SOP_DATA_DIR_TO_DEVICE 0x02 -#define SOP_DATA_DIR_BIDIRECTIONAL 0x03 -#define SOP_PARTIAL_DATA_BUFFER 0x04 +#define SOP_DATA_DIR_UNKNOWN 0xFF +#define SOP_DATA_DIR_NONE 0x00 +#define SOP_DATA_DIR_FROM_DEVICE 0x01 +#define SOP_DATA_DIR_TO_DEVICE 0x02 +#define SOP_DATA_DIR_BIDIRECTIONAL 0x03 +#define SOP_PARTIAL_DATA_BUFFER 0x04 -#define PQISRC_DMA_VALID (1 << 0) -#define PQISRC_CMD_NO_INTR (1 << 1) +#define PQISRC_DMA_VALID (1 << 0) +#define PQISRC_CMD_NO_INTR (1 << 1) -#define SOP_TASK_ATTRIBUTE_SIMPLE 0 +#define SOP_TASK_ATTRIBUTE_SIMPLE 0 #define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1 -#define SOP_TASK_ATTRIBUTE_ORDERED 2 -#define SOP_TASK_ATTRIBUTE_ACA 4 +#define SOP_TASK_ATTRIBUTE_ORDERED 2 +#define SOP_TASK_ATTRIBUTE_ACA 4 -#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0 -#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4 +#define SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE 0x0 +#define SOP_TASK_MANAGEMENT_FUNCTION_REJECTED 0x4 #define SOP_TASK_MANAGEMENT_FUNCTION_FAILED 0x5 -#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8 +#define SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED 0x8 #define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK 0x01 #define SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK_SET 0x02 #define SOP_TASK_MANAGEMENT_LUN_RESET 0x8 /* Additional CDB bytes */ #define PQI_ADDITIONAL_CDB_BYTES_0 0 /* 16 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_4 1 /* 20 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_8 2 /* 24 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_12 3 /* 28 byte CDB */ #define PQI_ADDITIONAL_CDB_BYTES_16 4 /* 32 byte CDB */ #define PQI_PROTOCOL_SOP 0x0 #define PQI_AIO_STATUS_GOOD 0x0 #define PQI_AIO_STATUS_CHECK_CONDITION 0x2 #define PQI_AIO_STATUS_CONDITION_MET 0x4 #define PQI_AIO_STATUS_DEVICE_BUSY 0x8 #define PQI_AIO_STATUS_INT_GOOD 0x10 #define PQI_AIO_STATUS_INT_COND_MET 0x14 #define PQI_AIO_STATUS_RESERV_CONFLICT 0x18 #define PQI_AIO_STATUS_CMD_TERMINATED 0x22 #define PQI_AIO_STATUS_QUEUE_FULL 0x28 #define PQI_AIO_STATUS_TASK_ABORTED 0x40 #define PQI_AIO_STATUS_UNDERRUN 0x51 #define PQI_AIO_STATUS_OVERRUN 0x75 /* Status when Target Failure */ #define PQI_AIO_STATUS_IO_ERROR 0x1 #define PQI_AIO_STATUS_IO_ABORTED 0x2 #define PQI_AIO_STATUS_IO_NO_DEVICE 0x3 #define PQI_AIO_STATUS_INVALID_DEVICE 0x4 #define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe /* Service Response */ #define PQI_AIO_SERV_RESPONSE_COMPLETE 0 #define PQI_AIO_SERV_RESPONSE_FAILURE 1 #define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2 #define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3 #define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4 #define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5 #define PQI_TMF_WAIT_DELAY 10000000 /* 10 seconds */ #define PQI_RAID_STATUS_GOOD PQI_AIO_STATUS_GOOD #define PQI_RAID_STATUS_CHECK_CONDITION PQI_AIO_STATUS_CHECK_CONDITION #define PQI_RAID_STATUS_CONDITION_MET PQI_AIO_STATUS_CONDITION_MET #define PQI_RAID_STATUS_DEVICE_BUSY PQI_AIO_STATUS_DEVICE_BUSY #define PQI_RAID_STATUS_INT_GOOD PQI_AIO_STATUS_INT_GOOD #define PQI_RAID_STATUS_INT_COND_MET PQI_AIO_STATUS_INT_COND_MET #define PQI_RAID_STATUS_RESERV_CONFLICT PQI_AIO_STATUS_RESERV_CONFLICT #define PQI_RAID_STATUS_CMD_TERMINATED PQI_AIO_STATUS_CMD_TERMINATED #define PQI_RAID_STATUS_QUEUE_FULL PQI_AIO_STATUS_QUEUE_FULL #define PQI_RAID_STATUS_TASK_ABORTED PQI_AIO_STATUS_TASK_ABORTED #define PQI_RAID_STATUS_UNDERRUN PQI_AIO_STATUS_UNDERRUN #define PQI_RAID_STATUS_OVERRUN PQI_AIO_STATUS_OVERRUN +#define NUM_STREAMS_PER_LUN 8 + /* VPD inquiry pages */ #define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */ -#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ +#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ #define SA_VPD_PHYS_DEVICE_ID 0xc0 /* vendor-specific page */ #define SA_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ #define SA_VPD_LV_IOACCEL_STATUS 0xc2 /* vendor-specific page */ -#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */ +#define SA_VPD_LV_STATUS 0xc3 /* vendor-specific page */ #define VPD_PAGE (1 << 8) /* logical volume states */ #define SA_LV_OK 0x0 #define SA_LV_FAILED 0x1 #define SA_LV_NOT_CONFIGURED 0x2 #define SA_LV_DEGRADED 0x3 #define SA_LV_READY_FOR_RECOVERY 0x4 #define SA_LV_UNDERGOING_RECOVERY 0x5 #define SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED 0x6 #define SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 0x7 #define SA_LV_HARDWARE_OVERHEATING 0x8 #define SA_LV_HARDWARE_HAS_OVERHEATED 0x9 #define SA_LV_UNDERGOING_EXPANSION 0xA #define SA_LV_NOT_AVAILABLE 0xb #define SA_LV_QUEUED_FOR_EXPANSION 0xc #define SA_LV_DISABLED_SCSI_ID_CONFLICT 0xd #define SA_LV_EJECTED 0xe #define SA_LV_UNDERGOING_ERASE 0xf #define SA_LV_UNDERGOING_RPI 0x12 #define SA_LV_PENDING_RPI 0x13 #define SA_LV_ENCRYPTED_NO_KEY 0x14 #define SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER 0x15 #define SA_LV_UNDERGOING_ENCRYPTION 0x16 #define SA_LV_UNDERGOING_ENCRYPTION_REKEYING 0x17 #define SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 0x18 #define SA_LV_PENDING_ENCRYPTION 0x19 #define SA_LV_PENDING_ENCRYPTION_REKEYING 0x1a #define SA_LV_STATUS_VPD_UNSUPPORTED 0xff /* constants for flags field of ciss_vpd_logical_volume_status */ #define SA_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */ /* * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands */ #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 /* 0 = no limit */ #define PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 0 #define PQI_LOG_EXT_QUEUE_DEPTH_ENABLED 0x20 #define PQI_LOG_EXT_QUEUE_ENABLE 0x56 #define MAX_RAW_M256_QDEPTH 32512 #define MAX_RAW_M16_QDEPTH 2032 #define PQI_PTRAID_UPDATE_ON_RESCAN_LUNS 0x80000000 -#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" +#define RAID_CTLR_LUNID ((uint8_t *) "\0\0\0\0\0\0\0\0") + +/* SCSI Cmds @todo: move SCMD_READ_6, etc. into library */ +#define SCSI_INQUIRY 0x12 +#define SCSI_MODE_SENSE 0x1a +#define SCSI_REPORT_LUNS 0xa0 +#define SCSI_LOG_SENSE 0x4d +#define SCSI_ATA_PASSTHRU16 0x85 -#define SA_CACHE_FLUSH 0x1 #define PQISRC_INQUIRY_TIMEOUT 30 + #define SA_INQUIRY 0x12 #define SA_REPORT_LOG 0xc2 /* Report Logical LUNs */ #define SA_REPORT_PHYS 0xc3 /* Report Physical LUNs */ #define SA_CISS_READ 0xc0 #define SA_GET_RAID_MAP 0xc8 #define SCSI_SENSE_RESPONSE_70 0x70 #define SCSI_SENSE_RESPONSE_71 0x71 #define SCSI_SENSE_RESPONSE_72 0x72 #define SCSI_SENSE_RESPONSE_73 0x73 -#define SA_REPORT_LOG_EXTENDED 0x1 -#define SA_REPORT_PHYS_EXTENDED 0x2 - -#define SA_CACHE_FLUSH_BUF_LEN 4 +#define SA_REPORT_LOG_EXTENDED 0x1 +#define SA_REPORT_PHYS_EXTENDED 0x2 #define GET_SCSI_SNO(cmd) (cmd->cmdId.serialNumber) #define REPORT_LUN_DEV_FLAG_AIO_ENABLED 0x8 #define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) #define RAID_MAP_MAX_ENTRIES 1024 #define RAID_MAP_ENCRYPTION_ENABLED 0x1 #define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27 #define ASC_LUN_NOT_READY 0x4 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x4 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x2 #define OBDR_SIG_OFFSET 43 #define OBDR_TAPE_SIG "$DR-10" #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) #define IOACCEL_STATUS_BYTE 4 #define OFFLOAD_CONFIGURED_BIT 0x1 #define OFFLOAD_ENABLED_BIT 0x2 #define PQI_RAID_DATA_IN_OUT_GOOD 0x0 #define PQI_RAID_DATA_IN_OUT_UNDERFLOW 0x1 #define PQI_RAID_DATA_IN_OUT_BUFFER_ERROR 0x40 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW 0x41 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42 #define PQI_RAID_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43 #define PQI_RAID_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60 #define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61 #define PQI_RAID_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62 #define PQI_RAID_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x63 #define PQI_RAID_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64 #define PQI_RAID_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65 #define PQI_RAID_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66 #define PQI_RAID_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67 #define PQI_RAID_DATA_IN_OUT_ERROR 0xf0 #define PQI_RAID_DATA_IN_OUT_PROTOCOL_ERROR 0xf1 #define PQI_RAID_DATA_IN_OUT_HARDWARE_ERROR 0xf2 #define PQI_RAID_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3 #define PQI_RAID_DATA_IN_OUT_ABORTED 0xf4 #define PQI_RAID_DATA_IN_OUT_TIMEOUT 0xf5 #define PQI_PHYSICAL_DEVICE_BUS 0 #define PQI_RAID_VOLUME_BUS 1 #define PQI_HBA_BUS 2 #define PQI_EXTERNAL_RAID_VOLUME_BUS 3 #define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS #define TEST_UNIT_READY 0x00 #define SCSI_VPD_HEADER_LENGTH 64 #define PQI_MAX_MULTILUN 256 #define PQI_MAX_LOGICALS 64 #define PQI_MAX_PHYSICALS 1024 #define PQI_MAX_DEVICES (PQI_MAX_LOGICALS + PQI_MAX_PHYSICALS + 1) /* 1 for controller device entry */ #define PQI_MAX_EXT_TARGETS 32 -#define PQI_CTLR_INDEX (PQI_MAX_DEVICES - 1) +#define PQI_CTLR_INDEX 0 #define PQI_PD_INDEX(t) (t + PQI_MAX_LOGICALS) #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define MAX_TARGET_DEVICES 1024 +#define MAX_TARGET_BIT 1025 +#define SLOT_AVAILABLE false +#define SLOT_TAKEN true #define PQI_NO_MEM 2 typedef enum pqisrc_device_status { DEVICE_NOT_FOUND, DEVICE_CHANGED, DEVICE_UNCHANGED, + DEVICE_IN_REMOVE, } device_status_t; #define SA_RAID_0 0 #define SA_RAID_4 1 #define SA_RAID_1 2 /* also used for RAID 10 */ #define SA_RAID_5 3 /* also used for RAID 50 */ #define SA_RAID_51 4 #define SA_RAID_6 5 /* also used for RAID 60 */ #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ #define SA_RAID_MAX SA_RAID_ADM #define SA_RAID_UNKNOWN 0xff #define BIT0 (1 << 0) #define BIT1 (1 << 1) #define BIT2 (1 << 2) #define BIT3 (1 << 3) #define BITS_PER_BYTE 8 + + +/* Vendor Specific (BMIC) Op Code */ +#define BMIC_READ 0x26 +#define BMIC_WRITE 0x27 +#define IS_BMIC_OPCODE(opcode) (opcode == BMIC_READ || opcode == BMIC_WRITE) /* BMIC commands */ -#define BMIC_IDENTIFY_CONTROLLER 0x11 +#define BMIC_IDENTIFY_CONTROLLER 0x11 #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 -#define BMIC_READ 0x26 -#define BMIC_WRITE 0x27 +#define BMIC_SENSE_FEATURE 0x61 #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 -#define BMIC_CACHE_FLUSH 0xc2 -#define BMIC_FLASH_FIRMWARE 0xf7 -#define BMIC_WRITE_HOST_WELLNESS 0xa5 -#define BMIC_SET_DIAGS_OPTIONS 0xf4 -#define BMIC_SENSE_DIAGS_OPTIONS 0xf5 +#define BMIC_WRITE_HOST_WELLNESS 0xa5 +#define BMIC_CACHE_FLUSH 0xc2 +#define BMIC_SET_DIAGS_OPTIONS 0xf4 +#define BMIC_SENSE_DIAGS_OPTIONS 0xf5 +#define BMIC_FLASH_FIRMWARE 0xf7 +/* Sense Feature Pages/Subpages */ +#define IO_SENSE_FEATURES_PAGE 0x08 +#define SENSE_FEATURES_AIO_SUBPAGE 0x02 #define MASKED_DEVICE(lunid) ((lunid)[3] & 0xC0) #define BMIC_GET_LEVEL_2_BUS(lunid) ((lunid)[7] & 0x3F) #define BMIC_GET_LEVEL_TWO_TARGET(lunid) ((lunid)[6]) #define BMIC_GET_DRIVE_NUMBER(lunid) \ (((BMIC_GET_LEVEL_2_BUS((lunid)) - 1) << 8) + \ BMIC_GET_LEVEL_TWO_TARGET((lunid))) #define NON_DISK_PHYS_DEV(rle) \ (((reportlun_ext_entry_t *)(rle))->device_flags & 0x1) #define NO_TIMEOUT ((unsigned long) -1) #define BMIC_DEVICE_TYPE_SATA 0x1 /* No of IO slots required for internal requests */ #define PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS 3 #define PQI_RESERVED_IO_SLOTS_TMF 1 #define PQI_RESERVED_IO_SLOTS_CNT (PQI_NUM_SUPPORTED_EVENTS + \ PQI_RESERVED_IO_SLOTS_TMF + \ PQI_RESERVED_IO_SLOTS_SYNC_REQUESTS) +/* Defines for counter flags */ +#define COUNTER_FLAG_CLEAR_COUNTS 0x0001 +#define COUNTER_FLAG_ONLY_NON_ZERO 0x0002 + /* Defines for print flags */ -#define PRINT_FLAG_HDR_COLUMN 0x0001 +#define PRINT_FLAG_HDR_COLUMN 0x0001 + +/* Function-specific debug flags */ +#if 0 +#define DEBUG_AIO /* show AIO eligibility, IU, etc. (very spammy!) */ +#define DEBUG_AIO_LOCATOR /* show AIO row/column etc. calc. */ +#define DEBUG_RAID_MAP /* show AIO raid map content from FW */ +#endif static inline uint16_t GET_LE16(const uint8_t *p) { return p[0] | p[1] << 8; } static inline uint32_t GET_LE32(const uint8_t *p) { return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; } static inline uint64_t GET_LE64(const uint8_t *p) { return (((uint64_t)GET_LE32(p + 4) << 32) | GET_LE32(p)); } static inline uint16_t GET_BE16(const uint8_t *p) { return p[0] << 8 | p[1]; } static inline uint32_t GET_BE32(const uint8_t *p) { return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; } static inline uint64_t GET_BE64(const uint8_t *p) { return (((uint64_t)GET_BE32(p) << 32) | GET_BE32(p + 4)); } static inline void PUT_BE16(uint16_t val, uint8_t *p) { *p++ = val >> 8; *p++ = val; } static inline void PUT_BE32(uint32_t val, uint8_t *p) { PUT_BE16(val >> 16, p); PUT_BE16(val, p + 2); } static inline void PUT_BE64(uint64_t val, uint8_t *p) { PUT_BE32(val >> 32, p); PUT_BE32(val, p + 4); } -#define OS_FREEBSD -#define SIS_POLL_WAIT + +/* Calculates percentage of val vs total, i.e. 20 out of 100 --> 20% */ +static inline uint64_t CALC_PERCENT_TOTAL(uint64_t val, uint64_t total) +{ + uint64_t percent = 0; + if (total) + percent = (val * 100) / total; + return percent; +} + +/* Calculates percentage of a vs b, i.e. 50 vs 100 -> 50/150 -> 33% */ +#define CALC_PERCENT_VS(a, b) (CALC_PERCENT_TOTAL(a, (a+b))) + +#define STREAM_DETECTION "stream_disable" +#define SATA_UNIQUE_WWN "sata_unique_wwn_disable" +#define AIO_RAID1_WRITE_BYPASS "aio_raid1_write_disable" +#define AIO_RAID5_WRITE_BYPASS "aio_raid5_write_disable" +#define AIO_RAID6_WRITE_BYPASS "aio_raid6_write_disable" +#define ADAPTER_QUEUE_DEPTH "queue_depth" +#define SCATTER_GATHER_COUNT "sg_count" +#define QUEUE_COUNT "queue_count" #define OS_ATTRIBUTE_PACKED __attribute__((__packed__)) #define OS_ATTRIBUTE_ALIGNED(n) __attribute__((aligned(n))) /* Management Interface */ #define CCISS_IOC_MAGIC 'C' #define SMARTPQI_IOCTL_BASE 'M' #define CCISS_GETDRIVVER _IOWR(SMARTPQI_IOCTL_BASE, 0, driver_info) #define CCISS_GETPCIINFO _IOWR(SMARTPQI_IOCTL_BASE, 1, pqi_pci_info_t) #define SMARTPQI_PASS_THRU _IOWR(SMARTPQI_IOCTL_BASE, 2, IOCTL_Command_struct) #define CCISS_PASSTHRU _IOWR('C', 210, IOCTL_Command_struct) #define CCISS_REGNEWD _IO(CCISS_IOC_MAGIC, 14) /*IOCTL pci_info structure */ typedef struct pqi_pci_info { unsigned char bus; unsigned char dev_fn; unsigned short domain; uint32_t board_id; uint32_t chip_id; }pqi_pci_info_t; typedef struct _driver_info { unsigned char major_version; unsigned long minor_version; unsigned char release_version; unsigned long build_revision; unsigned long max_targets; unsigned long max_io; unsigned long max_transfer_length; }driver_info, *pdriver_info; typedef uint8_t *passthru_buf_type_t; - -#define PQISRC_OS_VERSION 1 -#define PQISRC_FEATURE_VERSION 4014 -#define PQISRC_PATCH_VERSION 0 -#define PQISRC_BUILD_VERSION 105 +#define PQISRC_DRIVER_MAJOR __FreeBSD__ +#define PQISRC_DRIVER_MINOR 4410 +#define PQISRC_DRIVER_RELEASE 0 +#define PQISRC_DRIVER_REVISION 2005 #define STR(s) # s -#define PQISRC_VERSION(a, b, c, d) STR(a.b.c.d) -#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_OS_VERSION, \ - PQISRC_FEATURE_VERSION, \ - PQISRC_PATCH_VERSION, \ - PQISRC_BUILD_VERSION) - +#define PQISRC_VERSION(a, b, c, d) STR(a.b.c-d) +#define PQISRC_DRIVER_VERSION PQISRC_VERSION(PQISRC_DRIVER_MAJOR, \ + PQISRC_DRIVER_MINOR, \ + PQISRC_DRIVER_RELEASE, \ + PQISRC_DRIVER_REVISION) + /* End Management interface */ #ifdef ASSERT #undef ASSERT #endif /* *os_atomic64_cas-- * *Atomically read, compare, and conditionally write. *i.e. compare and swap. *retval True On Success *retval False On Failure * */ static inline boolean_t os_atomic64_cas(volatile uint64_t* var, uint64_t old_val, uint64_t new_val) { return (atomic_cmpset_64(var, old_val, new_val)); } #define ASSERT(cond) {\ if (!(cond)) { \ printf("Assertion failed at file %s line %d\n",__FILE__,__LINE__); \ } \ } /* Atomic */ typedef volatile uint64_t OS_ATOMIC64_T; #define OS_ATOMIC64_READ(p) atomic_load_acq_64(p) #define OS_ATOMIC64_INIT(p,val) atomic_store_rel_64(p, val) /* 64-bit post atomic increment and decrement operations on value in pointer.*/ #define OS_ATOMIC64_DEC(p) (atomic_fetchadd_64(p, -1) - 1) #define OS_ATOMIC64_INC(p) (atomic_fetchadd_64(p, 1) + 1) #define PQI_MAX_MSIX 64 /* vectors */ #define PQI_MSI_CTX_SIZE sizeof(pqi_intr_ctx)+1 #define IS_POLLING_REQUIRED(softs) if (cold) {\ pqisrc_process_event_intr_src(softs, 0);\ pqisrc_process_response_queue(softs, 1);\ } #define OS_GET_TASK_ATTR(rcb) os_get_task_attr(rcb) #define OS_FW_HEARTBEAT_TIMER_INTERVAL (5) typedef struct PCI_ACC_HANDLE { bus_space_tag_t pqi_btag; bus_space_handle_t pqi_bhandle; } PCI_ACC_HANDLE_T; /* * Legacy SIS Register definitions for the Adaptec PMC SRC/SRCv/smartraid adapters. */ /* accessible via BAR0 */ #define LEGACY_SIS_IOAR 0x18 /* IOA->host interrupt register */ #define LEGACY_SIS_IDBR 0x20 /* inbound doorbell register */ #define LEGACY_SIS_IISR 0x24 /* inbound interrupt status register */ #define LEGACY_SIS_OIMR 0x34 /* outbound interrupt mask register */ #define LEGACY_SIS_ODBR_R 0x9c /* outbound doorbell register read */ #define LEGACY_SIS_ODBR_C 0xa0 /* outbound doorbell register clear */ #define LEGACY_SIS_SCR0 0xb0 /* scratchpad 0 */ #define LEGACY_SIS_OMR 0xbc /* outbound message register */ #define LEGACY_SIS_IQUE64_L 0xc0 /* inbound queue address 64-bit (low) */ #define LEGACY_SIS_IQUE64_H 0xc4 /* inbound queue address 64-bit (high)*/ #define LEGACY_SIS_ODBR_MSI 0xc8 /* MSI register for sync./AIF */ #define LEGACY_SIS_IQN_L 0xd0 /* inbound queue native mode (low) */ #define LEGACY_SIS_IQN_H 0xd4 /* inbound queue native mode (high)*/ #define LEGACY_SIS_MAILBOX 0x7fc60 /* mailbox (20 bytes) */ #define LEGACY_SIS_SRCV_MAILBOX 0x1000 /* mailbox (20 bytes) */ #define LEGACY_SIS_SRCV_OFFSET_MAILBOX_7 0x101C /* mailbox 7 register offset */ - #define LEGACY_SIS_ODR_SHIFT 12 /* outbound doorbell shift */ #define LEGACY_SIS_IDR_SHIFT 9 /* inbound doorbell shift */ /* * PQI Register definitions for the smartraid adapters */ /* accessible via BAR0 */ #define PQI_SIGNATURE 0x4000 #define PQI_ADMINQ_CONFIG 0x4008 #define PQI_ADMINQ_CAP 0x4010 -#define PQI_LEGACY_INTR_STATUS 0x4018 +#define PQI_LEGACY_INTR_STATUS 0x4018 #define PQI_LEGACY_INTR_MASK_SET 0x401C #define PQI_LEGACY_INTR_MASK_CLR 0x4020 #define PQI_DEV_STATUS 0x4040 #define PQI_ADMIN_IBQ_PI_OFFSET 0x4048 #define PQI_ADMIN_OBQ_CI_OFFSET 0x4050 #define PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR 0x4058 #define PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR 0x4060 #define PQI_ADMIN_IBQ_CI_ADDR 0x4068 #define PQI_ADMIN_OBQ_PI_ADDR 0x4070 #define PQI_ADMINQ_PARAM 0x4078 #define PQI_DEV_ERR 0x4080 #define PQI_DEV_ERR_DETAILS 0x4088 #define PQI_DEV_RESET 0x4090 #define PQI_POWER_ACTION 0x4094 /* Busy wait micro seconds */ #define OS_BUSYWAIT(x) DELAY(x) #define OS_SLEEP(timeout) \ DELAY(timeout); - + /* TMF request timeout is 600 Sec */ #define OS_TMF_TIMEOUT_SEC (10 * 60) #define LE_16(x) htole16(x) #define LE_32(x) htole32(x) #define LE_64(x) htole64(x) #define BE_16(x) htobe16(x) #define BE_32(x) htobe32(x) #define BE_64(x) htobe64(x) #define PQI_HWIF_SRCV 0 #define PQI_HWIF_UNKNOWN -1 #define SMART_STATE_SUSPEND (1<<0) #define SMART_STATE_UNUSED0 (1<<1) #define SMART_STATE_INTERRUPTS_ON (1<<2) #define SMART_STATE_AIF_SLEEPER (1<<3) #define SMART_STATE_RESET (1<<4) #define PQI_FLAG_BUSY (1<<0) #define PQI_MSI_ENABLED (1<<1) #define PQI_SIM_REGISTERED (1<<2) #define PQI_MTX_INIT (1<<3) #define PQI_CMD_MAPPED (1<<2) /* Interrupt context to get oq_id */ typedef struct pqi_intr_ctx { int oq_id; device_t pqi_dev; }pqi_intr_ctx_t; typedef uint8_t os_dev_info_t; typedef struct OS_SPECIFIC { - device_t pqi_dev; + device_t pqi_dev; struct resource *pqi_regs_res0; /* reg. if. window */ int pqi_regs_rid0; /* resource ID */ bus_dma_tag_t pqi_parent_dmat; /* parent DMA tag */ bus_dma_tag_t pqi_buffer_dmat; /* controller hardware interface */ - int pqi_hwif; + int pqi_hwif; struct resource *pqi_irq[PQI_MAX_MSIX]; /* interrupt */ int pqi_irq_rid[PQI_MAX_MSIX]; void *intrcookie[PQI_MAX_MSIX]; bool intr_registered[PQI_MAX_MSIX]; bool msi_enabled; /* MSI/MSI-X enabled */ pqi_intr_ctx_t *msi_ctx; int oq_id; int pqi_state; uint32_t pqi_flags; struct mtx cam_lock; struct mtx map_lock; int mtx_init; int sim_registered; struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct task event_task; struct cdev *cdev; struct callout wellness_periodic; /* periodic event handling */ struct callout heartbeat_timeout_id; /* heart beat event handling */ } OS_SPECIFIC_T; -typedef bus_addr_t dma_addr_t; + +typedef struct device_hints { + uint8_t stream_status: 1; + uint8_t sata_unique_wwn_status: 1; + uint8_t aio_raid1_write_status: 1; + uint8_t aio_raid5_write_status: 1; + uint8_t aio_raid6_write_status: 1; + uint32_t queue_depth; + uint32_t sg_segments; + uint32_t cpu_count; +} device_hint; + +typedef bus_addr_t dma_addr_t; /* Register access macros */ #define PCI_MEM_GET32( _softs, _absaddr, _offset ) \ - bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset) + bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ + _softs->pci_mem_handle.pqi_bhandle, _offset) #if defined(__i386__) #define PCI_MEM_GET64( _softs, _absaddr, _offset ) ({ \ (uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset) + \ - ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \ + _softs->pci_mem_handle.pqi_bhandle, _offset) + \ + ((uint64_t)bus_space_read_4(_softs->pci_mem_handle.pqi_btag, \ + _softs->pci_mem_handle.pqi_bhandle, _offset + 4) << 32); \ }) #else #define PCI_MEM_GET64(_softs, _absaddr, _offset ) \ - bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset) + bus_space_read_8(_softs->pci_mem_handle.pqi_btag, \ + _softs->pci_mem_handle.pqi_bhandle, _offset) #endif #define PCI_MEM_PUT32( _softs, _absaddr, _offset, _val ) \ - bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset, _val) + bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ + _softs->pci_mem_handle.pqi_bhandle, _offset, _val) #if defined(__i386__) #define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \ + _softs->pci_mem_handle.pqi_bhandle, _offset, _val); \ bus_space_write_4(_softs->pci_mem_handle.pqi_btag, \ _softs->pci_mem_handle.pqi_bhandle, _offset + 4, _val >> 32); #else #define PCI_MEM_PUT64( _softs, _absaddr, _offset, _val ) \ - bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \ - _softs->pci_mem_handle.pqi_bhandle, _offset, _val) + bus_space_write_8(_softs->pci_mem_handle.pqi_btag, \ + _softs->pci_mem_handle.pqi_bhandle, _offset, _val) #endif #define PCI_MEM_GET_BUF(_softs, _absaddr, _offset, buf, size) \ bus_space_read_region_1(_softs->pci_mem_handle.pqi_btag,\ _softs->pci_mem_handle.pqi_bhandle, _offset, buf, size) - + /* Lock */ typedef struct mtx OS_LOCK_T; typedef struct sema OS_SEMA_LOCK_T; #define OS_PQILOCK_T OS_LOCK_T #define OS_ACQUIRE_SPINLOCK(_lock) mtx_lock_spin(_lock) #define OS_RELEASE_SPINLOCK(_lock) mtx_unlock_spin(_lock) #define OS_INIT_PQILOCK(_softs,_lock,_lockname) os_init_spinlock(_softs,_lock,_lockname) #define OS_UNINIT_PQILOCK(_lock) os_uninit_spinlock(_lock) #define PQI_LOCK(_lock) OS_ACQUIRE_SPINLOCK(_lock) #define PQI_UNLOCK(_lock) OS_RELEASE_SPINLOCK(_lock) -#define OS_GET_CDBP(rcb) \ - ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes) +#define OS_GET_CDBP(rcb) ((rcb->cm_ccb->ccb_h.flags & CAM_CDB_POINTER) ? rcb->cm_ccb->csio.cdb_io.cdb_ptr : rcb->cm_ccb->csio.cdb_io.cdb_bytes) #define GET_SCSI_BUFFLEN(rcb) (rcb->cm_ccb->csio.dxfer_len) #define IS_OS_SCSICMD(rcb) (rcb && !rcb->tm_req && rcb->cm_ccb) #define OS_GET_IO_QINDEX(softs,rcb) curcpu % softs->num_op_obq #define OS_GET_IO_RESP_QID(softs,rcb) (softs->op_ob_q[(OS_GET_IO_QINDEX(softs,rcb))].q_id) #define OS_GET_IO_REQ_QINDEX(softs,rcb) OS_GET_IO_QINDEX(softs,rcb) #define OS_GET_TMF_RESP_QID OS_GET_IO_RESP_QID #define OS_GET_TMF_REQ_QINDEX OS_GET_IO_REQ_QINDEX /* check request type */ #define is_internal_req(rcb) (!(rcb->cm_ccb)) #define os_io_memcpy(dest, src, len) memcpy(dest, src, len) /* sg elements addr, len, flags */ #define OS_GET_IO_SG_COUNT(rcb) rcb->nseg #define OS_GET_IO_SG_ADDR(rcb,i) rcb->sgt[i].addr #define OS_GET_IO_SG_LEN(rcb,i) rcb->sgt[i].len /* scsi commands used in pqilib for RAID bypass*/ #define SCMD_READ_6 READ_6 #define SCMD_WRITE_6 WRITE_6 #define SCMD_READ_10 READ_10 #define SCMD_WRITE_10 WRITE_10 #define SCMD_READ_12 READ_12 #define SCMD_WRITE_12 WRITE_12 #define SCMD_READ_16 READ_16 #define SCMD_WRITE_16 WRITE_16 /* FreeBSD status macros */ -#define BSD_SUCCESS 0 +#define BSD_SUCCESS 0 +#define DEVICE_HINT_SUCCESS 0 +/* Min outstanding commands that driver can register with CAM layer.*/ +#define OS_MIN_OUTSTANDING_REQ 6 +#define BSD_MIN_SG_SEGMENTS 16 + +#define DISABLE_ERR_RESP_VERBOSE 1 /* Debug facility */ #define PQISRC_FLAGS_MASK 0x0000ffff #define PQISRC_FLAGS_INIT 0x00000001 #define PQISRC_FLAGS_INFO 0x00000002 #define PQISRC_FLAGS_FUNC 0x00000004 #define PQISRC_FLAGS_TRACEIO 0x00000008 #define PQISRC_FLAGS_DISC 0x00000010 #define PQISRC_FLAGS_WARN 0x00000020 #define PQISRC_FLAGS_ERROR 0x00000040 #define PQISRC_FLAGS_NOTE 0x00000080 #define PQISRC_LOG_LEVEL (PQISRC_FLAGS_WARN | PQISRC_FLAGS_ERROR | PQISRC_FLAGS_NOTE) static int logging_level = PQISRC_LOG_LEVEL; #define DBG_INIT(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_INIT) { \ printf("[INIT]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_INFO(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_INFO) { \ printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_FUNC(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_FUNC) { \ printf("[FUNC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); -#define DBG_TRACEIO(fmt,args...) \ +#define DBG_DISC(fmt,args...) \ do { \ - if (logging_level & PQISRC_FLAGS_TRACEIO) { \ - printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ + if (logging_level & PQISRC_FLAGS_DISC) { \ + printf("[DISC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); -#define DBG_DISC(fmt,args...) \ +#define DBG_TRACEIO(fmt,args...) \ do { \ - if (logging_level & PQISRC_FLAGS_DISC) { \ - printf("[DISC]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ + if (logging_level & PQISRC_FLAGS_TRACEIO) { \ + printf("[TRACEIO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_WARN(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_WARN) { \ printf("[WARN]:[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args);\ } \ }while(0); #define DBG_ERR(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_ERROR) { \ printf("[ERROR]::[%u:%u.%u][CPU %d][%s][%d]:"fmt,softs->bus_id,softs->device_id,softs->func_id,curcpu,__func__,__LINE__,##args); \ } \ }while(0); +#define DBG_ERR_NO_SOFTS(fmt,args...) \ + do { \ + if (logging_level & PQISRC_FLAGS_ERROR) { \ + printf("[ERROR]::[CPU %d][%s][%d]:"fmt,curcpu,__func__,__LINE__,##args); \ + } \ + }while(0); #define DBG_IO(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_TRACEIO) { \ printf("[IO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_ERR_BTL(device,fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_ERROR) { \ - printf("[ERROR]::[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \ + printf("[ERROR]::[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args); \ } \ }while(0); #define DBG_WARN_BTL(device,fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_WARN) { \ - printf("[WARN]:[%u:%u.%u][%u,%u,%u][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\ + printf("[WARN]:[%u:%u.%u][%d,%d,%d][CPU %d][%s][%d]:"fmt, softs->bus_id, softs->device_id, softs->func_id, device->bus, device->target, device->lun,curcpu,__func__,__LINE__,##args);\ } \ }while(0); #define DBG_NOTE(fmt,args...) \ do { \ if (logging_level & PQISRC_FLAGS_NOTE) { \ - printf("[INFO]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ + printf("[NOTE]:[ %s ] [ %d ]"fmt,__func__,__LINE__,##args); \ } \ }while(0); #endif /* _PQI_DEFINES_H */ diff --git a/sys/dev/smartpqi/smartpqi_discovery.c b/sys/dev/smartpqi/smartpqi_discovery.c index 48243d8fc882..7e084a81cd89 100644 --- a/sys/dev/smartpqi/smartpqi_discovery.c +++ b/sys/dev/smartpqi/smartpqi_discovery.c @@ -1,2042 +1,2292 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" -#define MAX_RETRIES 3 -#define PQISRC_INQUIRY_TIMEOUT 30 - -/* Validate the scsi sense response code */ -static inline -boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr) +/* + * Populate the controller's advanced aio features via BMIC cmd. + */ +int +pqisrc_QuerySenseFeatures(pqisrc_softstate_t *softs) { - DBG_FUNC("IN\n"); + bmic_sense_feature_aio_buffer_t *features; + int ret; + pqisrc_raid_req_t request; - if (!sshdr) - return false; + /* Initializing defaults for AIO support subpage */ + softs->max_aio_write_raid5_6 = + PQISRC_MAX_AIO_RAID5_OR_6_WRITE; + softs->max_aio_write_raid1_10_2drv = + PQISRC_MAX_AIO_RAID1_OR_10_WRITE_2DRV; + softs->max_aio_write_raid1_10_3drv = + PQISRC_MAX_AIO_RAID1_OR_10_WRITE_3DRV; + softs->max_aio_rw_xfer_crypto_nvme = + PQISRC_MAX_AIO_RW_XFER_NVME_CRYPTO; + softs->max_aio_rw_xfer_crypto_sas_sata = + PQISRC_MAX_AIO_RW_XFER_SAS_SATA_CRYPTO; + +#ifdef DEVICE_HINT + softs->enable_stream_detection = softs->hint.stream_status; +#endif - DBG_FUNC("OUT\n"); + /* Implement SENSE_FEATURE BMIC to populate AIO limits */ + features = os_mem_alloc(softs, sizeof(*features)); + if (!features) { + DBG_ERR("Failed to allocate memory for sense aio features.\n"); + goto err; + } + memset(features, 0, sizeof(*features)); + + memset(&request, 0, sizeof(request)); + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_READ; + request.cmd.cdb[2] = IO_SENSE_FEATURES_PAGE; + request.cmd.cdb[3] = SENSE_FEATURES_AIO_SUBPAGE; + request.cmd.bmic_cdb.cmd = BMIC_SENSE_FEATURE; + request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*features)); + ret = pqisrc_prepare_send_ctrlr_request(softs, &request, + features, sizeof(*features)); - return (sshdr->response_code & 0x70) == 0x70; + if (ret) + goto free_err; + + /* If AIO subpage was valid, use values from that page */ + if (features->aio_subpage.header.total_length >= + MINIMUM_AIO_SUBPAGE_LENGTH) { + DBG_INIT("AIO support subpage valid. total_length = 0x%0x.\n", + features->aio_subpage.header.total_length); + softs->adv_aio_capable = true; + + /* AIO transfer limits are reported in kbytes, so x 1024. + * Values of 0 mean 'no limit'. + */ + + softs->max_aio_write_raid5_6 = + (features->aio_subpage.max_aio_write_raid5_6 == 0) ? + PQISRC_MAX_AIO_NO_LIMIT : + features->aio_subpage.max_aio_write_raid5_6 * 1024; + softs->max_aio_write_raid1_10_2drv = + (features->aio_subpage.max_aio_write_raid1_10_2drv + == 0) ? PQISRC_MAX_AIO_NO_LIMIT : + features->aio_subpage.max_aio_write_raid1_10_2drv + * 1024; + softs->max_aio_write_raid1_10_3drv = + (features->aio_subpage.max_aio_write_raid1_10_3drv + == 0) ? PQISRC_MAX_AIO_NO_LIMIT : + features->aio_subpage.max_aio_write_raid1_10_3drv + * 1024; + softs->max_aio_rw_xfer_crypto_nvme = + (features->aio_subpage.max_aio_rw_xfer_crypto_nvme + == 0) ? PQISRC_MAX_AIO_NO_LIMIT : + features->aio_subpage.max_aio_rw_xfer_crypto_nvme + * 1024; + softs->max_aio_rw_xfer_crypto_sas_sata = + (features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata + == 0) ? PQISRC_MAX_AIO_NO_LIMIT : + features->aio_subpage.max_aio_rw_xfer_crypto_sas_sata + * 1024; + + DBG_INIT("softs->max_aio_write_raid5_6: 0x%x\n", + softs->max_aio_write_raid5_6); + DBG_INIT("softs->max_aio_write_raid1_10_2drv: 0x%x\n", + softs->max_aio_write_raid1_10_2drv); + DBG_INIT("softs->max_aio_write_raid1_10_3drv: 0x%x\n", + softs->max_aio_write_raid1_10_3drv); + DBG_INIT("softs->max_aio_rw_xfer_crypto_nvme: 0x%x\n", + softs->max_aio_rw_xfer_crypto_nvme); + DBG_INIT("softs->max_aio_rw_xfer_crypto_sas_sata: 0x%x\n", + softs->max_aio_rw_xfer_crypto_sas_sata); + + } else { + DBG_WARN("Problem getting AIO support subpage settings. " + "Disabling advanced AIO writes.\n"); + softs->adv_aio_capable = false; + } + + + os_mem_free(softs, features, sizeof(*features)); + return ret; +free_err: + os_mem_free(softs, features, sizeof(*features)); +err: + return PQI_STATUS_FAILURE; } /* - * Initialize target ID pool for HBA/PDs . + * Initialize target ID pool for exposed physical devices . */ void -pqisrc_init_targetid_pool(pqisrc_softstate_t *softs) +pqisrc_init_bitmap(pqisrc_softstate_t *softs) { - int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1; + memset(&softs->bit_map, SLOT_AVAILABLE, sizeof(softs->bit_map)); +} - for(i = 0; i < PQI_MAX_PHYSICALS; i++) { - softs->tid_pool.tid[i] = tid--; +void +pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target) +{ + if((target == PQI_CTLR_INDEX) || (target == INVALID_ELEM)) { + DBG_ERR("Invalid target ID\n"); + return; } - softs->tid_pool.index = i - 1; + DBG_DISC("Giving back target %d\n", target); + softs->bit_map.bit_vector[target] = SLOT_AVAILABLE; } +/* Use bit map to find availible targets */ int -pqisrc_alloc_tid(pqisrc_softstate_t *softs) +pqisrc_find_avail_target(pqisrc_softstate_t *softs) { - if(softs->tid_pool.index <= -1) { - DBG_ERR("Target ID exhausted\n"); - return INVALID_ELEM; + int avail_target; + for(avail_target = 1; avail_target < MAX_TARGET_BIT; avail_target++) { + if(softs->bit_map.bit_vector[avail_target] == SLOT_AVAILABLE){ + softs->bit_map.bit_vector[avail_target] = SLOT_TAKEN; + DBG_DISC("Avail_target is %d\n", avail_target); + return avail_target; + } } - - return softs->tid_pool.tid[softs->tid_pool.index--]; + DBG_ERR("No available targets\n"); + return INVALID_ELEM; } -void -pqisrc_free_tid(pqisrc_softstate_t *softs, int tid) +/* Subroutine used to set Bus-Target-Lun for the requested device */ +static inline void +pqisrc_set_btl(pqi_scsi_dev_t *device, int bus, int target, int lun) { - if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) { - DBG_ERR("Target ID queue is full\n"); - return; - } + DBG_FUNC("IN\n"); + + device->bus = bus; + device->target = target; + device->lun = lun; - softs->tid_pool.index++; - softs->tid_pool.tid[softs->tid_pool.index] = tid; + DBG_FUNC("OUT\n"); } -/* Update scsi sense info to a local buffer*/ +/* Add all exposed physical devices, logical devices, controller devices, PT RAID +* devices and multi-lun devices */ boolean_t -pqisrc_update_scsi_sense(const uint8_t *buff, int len, - struct sense_header_scsi *header) +pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, + uint8_t *scsi3addr) { + /* Add physical devices with targets that need + * targets */ + int j; + int tid = 0; + unsigned char addr1[8], addr2[8]; + pqi_scsi_dev_t *temp_device; + + /* If controller device, add it to list because its lun/bus/target + * values are already set */ + if(pqisrc_is_hba_lunid(scsi3addr)) + goto add_device_to_dev_list; + + /* If exposed physical device give it a target then add it + * to the dev list */ + if(!pqisrc_is_logical_device(device)) { + tid = pqisrc_find_avail_target(softs); + if(INVALID_ELEM != tid){ + pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0); + goto add_device_to_dev_list; + } + } - DBG_FUNC("IN\n"); - - if (!buff || !len) - return false; + /* If external raid device , assign target from the target pool. + * If a non-zero lun device, search through the list & find the + * device which has same target (byte 2 of LUN address). + * Assign the same target for this new lun. */ + if (pqisrc_is_external_raid_device(device)) { + memcpy(addr1, device->scsi3addr, 8); + for(j = 0; j < PQI_MAX_DEVICES; j++) { + if(softs->dev_list[j] == NULL) + continue; + temp_device = softs->dev_list[j]; + memcpy(addr2, temp_device->scsi3addr, 8); + if (addr1[2] == addr2[2]) { + pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS, + temp_device->target,device->scsi3addr[0]); + goto add_device_to_dev_list; + } + } + tid = pqisrc_find_avail_target(softs); + if(INVALID_ELEM != tid){ + pqisrc_set_btl(device, PQI_EXTERNAL_RAID_VOLUME_BUS, tid, device->scsi3addr[0]); + goto add_device_to_dev_list; + } + } - memset(header, 0, sizeof(struct sense_header_scsi)); + /* If logical device, add it to list because its lun/bus/target + * values are already set */ + if(pqisrc_is_logical_device(device) && !pqisrc_is_external_raid_device(device)) + goto add_device_to_dev_list; + + /* This is a non-zero lun of a multi-lun device. + * Search through our list and find the device which + * has the same 8 byte LUN address, except with bytes 4 and 5. + * Assign the same bus and target for this new LUN. + * Use the logical unit number from the firmware. */ + memcpy(addr1, device->scsi3addr, 8); + addr1[4] = 0; + addr1[5] = 0; + for(j = 0; j < PQI_MAX_DEVICES; j++) { + if(softs->dev_list[j] == NULL) + continue; + temp_device = softs->dev_list[j]; + memcpy(addr2, temp_device->scsi3addr, 8); + addr2[4] = 0; + addr2[5] = 0; + /* If addresses are the same, except for bytes 4 and 5 + * then the passed-in device is an additional lun of a + * previously added multi-lun device. Use the same target + * id as that previous device. Otherwise, use the new + * target id */ + if(memcmp(addr1, addr2, 8) == 0) { + pqisrc_set_btl(device, temp_device->bus, + temp_device->target, temp_device->scsi3addr[4]); + goto add_device_to_dev_list; + } + } + DBG_ERR("The device is not a physical, lun or ptraid device" + "B %d: T %d: L %d\n", device->bus, device->target, + device->lun ); + return false; + +add_device_to_dev_list: + /* Actually add the device to the driver list + * softs->dev_list */ + softs->num_devs++; + for(j = 0; j < PQI_MAX_DEVICES; j++) { + if(softs->dev_list[j]) + continue; + softs->dev_list[j] = device; + break; + } + DBG_NOTE("Added device [%d of %d]: B %d: T %d: L %d\n", + j, softs->num_devs, device->bus, device->target, + device->lun); + return true; +} - header->response_code = (buff[0] & 0x7f); +/* Return a given index for a specific bus, target, lun within the +* softs dev_list (This function is specifically for freebsd)*/ +int +pqisrc_find_btl_list_index(pqisrc_softstate_t *softs, + int bus, int target, int lun) +{ - if (!pqisrc_scsi_sense_valid(header)) - return false; + int index; + pqi_scsi_dev_t *temp_device; + for(index = 0; index < PQI_MAX_DEVICES; index++) { + if(softs->dev_list[index] == NULL) + continue; + temp_device = softs->dev_list[index]; + /* Match the devices then return the location + * of that device for further use*/ + if(bus == softs->bus_id && + target == temp_device->target && + lun == temp_device->lun){ + DBG_DISC("Returning device list index %d\n", index); + return index; - if (header->response_code >= 0x72) { - /* descriptor format */ - if (len > 1) - header->sense_key = (buff[1] & 0xf); - if (len > 2) - header->asc = buff[2]; - if (len > 3) - header->ascq = buff[3]; - if (len > 7) - header->additional_length = buff[7]; - } else { - /* fixed format */ - if (len > 2) - header->sense_key = (buff[2] & 0xf); - if (len > 7) { - len = (len < (buff[7] + 8)) ? - len : (buff[7] + 8); - if (len > 12) - header->asc = buff[12]; - if (len > 13) - header->ascq = buff[13]; + } + if ((temp_device->is_physical_device) && (target == temp_device->target) + && (temp_device->is_multi_lun)) { + return index; } } - - DBG_FUNC("OUT\n"); - - return true; + return INVALID_ELEM; } -/* - * Function used to build the internal raid request and analyze the response - */ +/* Return a given index for a specific device within the +* softs dev_list */ int -pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request, - void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr, - raid_path_error_info_elem_t *error_info) +pqisrc_find_device_list_index(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { - uint8_t *cdb; - int ret = PQI_STATUS_SUCCESS; - uint32_t tag = 0; - struct dma_mem device_mem; - sgt_t *sgd; + int index; + pqi_scsi_dev_t *temp_device; + for(index = 0; index < PQI_MAX_DEVICES; index++) { + if(softs->dev_list[index] == NULL) + continue; + temp_device = softs->dev_list[index]; + /* Match the devices then return the location + * of that device for further use*/ + if(device->bus == temp_device->bus && + device->target == temp_device->target + && device->lun == temp_device->lun){ + DBG_DISC("Returning device list index %d\n", index); + return index; - ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; - ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; + } + } + return INVALID_ELEM; +} - rcb_t *rcb = NULL; +/* Delete a given device from the softs dev_list*/ +int +pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) +{ - DBG_FUNC("IN\n"); + int index; + index = pqisrc_find_device_list_index(softs, device); + if (0 <= index && index < MAX_TARGET_BIT) { + softs->dev_list[index] = NULL; + softs->num_devs--; + DBG_NOTE("Removing device : B %d: T %d: L %d positioned at %d\n", + device->bus, device->target, device->lun, softs->num_devs); + return PQI_STATUS_SUCCESS; + } + if (index == INVALID_ELEM) { + DBG_NOTE("Invalid device, either it was already removed " + "or never added\n"); + return PQI_STATUS_FAILURE; + } + DBG_ERR("This is a bogus device\n"); + return PQI_STATUS_FAILURE; +} - memset(&device_mem, 0, sizeof(struct dma_mem)); +int +pqisrc_simple_dma_alloc(pqisrc_softstate_t *softs, struct dma_mem *device_mem, + size_t datasize, sgt_t *sgd) +{ + int ret = PQI_STATUS_SUCCESS; + + memset(device_mem, 0, sizeof(struct dma_mem)); /* for TUR datasize: 0 buff: NULL */ if (datasize) { - device_mem.tag = "device_mem"; - device_mem.size = datasize; - device_mem.align = PQISRC_DEFAULT_DMA_ALIGN; - ret = os_dma_mem_alloc(softs, &device_mem); + os_strlcpy(device_mem->tag, "device_mem", sizeof(device_mem->tag)); + device_mem->size = datasize; + device_mem->align = PQISRC_DEFAULT_DMA_ALIGN; + + ret = os_dma_mem_alloc(softs, device_mem); if (ret) { DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret); return ret; } - sgd = (sgt_t *)&request->sg_descriptors[0]; + ASSERT(device_mem->size == datasize); - sgd->addr = device_mem.dma_addr; + sgd->addr = device_mem->dma_addr; sgd->len = datasize; sgd->flags = SG_FLAG_LAST; } + return ret; +} + +/* + * Function used to build the internal raid request and analyze the response + */ +static int +pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, struct dma_mem device_mem, + pqisrc_raid_req_t *request, void *buff, + size_t datasize, uint8_t cmd, uint8_t *scsi3addr, + raid_path_error_info_elem_t *error_info) +{ + + uint32_t tag = 0; + int ret = PQI_STATUS_SUCCESS; + + ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; + ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; + + rcb_t *rcb = NULL; + /* Build raid path request */ request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t, sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH); request->buffer_length = LE_32(datasize); memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; - cdb = request->cdb; - - switch (cmd) { - case SA_INQUIRY: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = SA_INQUIRY; - if (vpd_page & VPD_PAGE) { - cdb[1] = 0x1; - cdb[2] = (uint8_t)vpd_page; - } - cdb[4] = (uint8_t)datasize; - if (softs->timeout_in_passthrough) { - request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT; - } - break; - case SA_REPORT_LOG: - case SA_REPORT_PHYS: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = cmd; - if (cmd == SA_REPORT_PHYS) - cdb[1] = SA_REPORT_PHYS_EXTENDED; - else - cdb[1] = SA_REPORT_LOG_EXTENDED; - cdb[8] = (uint8_t)((datasize) >> 8); - cdb[9] = (uint8_t)datasize; - break; - case PQI_LOG_EXT_QUEUE_ENABLE: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = SA_REPORT_LOG; - cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED); - cdb[8] = (uint8_t)((datasize) >> 8); - cdb[9] = (uint8_t)datasize; - break; - case TEST_UNIT_READY: - request->data_direction = SOP_DATA_DIR_NONE; - break; - case SA_GET_RAID_MAP: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = SA_CISS_READ; - cdb[1] = cmd; - cdb[8] = (uint8_t)((datasize) >> 8); - cdb[9] = (uint8_t)datasize; - break; - case SA_CACHE_FLUSH: - request->data_direction = SOP_DATA_DIR_FROM_DEVICE; - memcpy(device_mem.virt_addr, buff, datasize); - cdb[0] = BMIC_WRITE; - cdb[6] = BMIC_CACHE_FLUSH; - cdb[7] = (uint8_t)((datasize) << 8); - cdb[8] = (uint8_t)((datasize) >> 8); - break; - case BMIC_IDENTIFY_CONTROLLER: - case BMIC_IDENTIFY_PHYSICAL_DEVICE: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = BMIC_READ; - cdb[6] = cmd; - cdb[7] = (uint8_t)((datasize) << 8); - cdb[8] = (uint8_t)((datasize) >> 8); - break; - case BMIC_WRITE_HOST_WELLNESS: - request->data_direction = SOP_DATA_DIR_FROM_DEVICE; - memcpy(device_mem.virt_addr, buff, datasize); - cdb[0] = BMIC_WRITE; - cdb[6] = cmd; - cdb[7] = (uint8_t)((datasize) << 8); - cdb[8] = (uint8_t)((datasize) >> 8); - break; - case BMIC_SENSE_SUBSYSTEM_INFORMATION: - request->data_direction = SOP_DATA_DIR_TO_DEVICE; - cdb[0] = BMIC_READ; - cdb[6] = cmd; - cdb[7] = (uint8_t)((datasize) << 8); - cdb[8] = (uint8_t)((datasize) >> 8); - break; - default: - DBG_ERR("unknown command 0x%x", cmd); - ret = PQI_STATUS_FAILURE; - return ret; - } - tag = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == tag) { DBG_ERR("Tag not available\n"); ret = PQI_STATUS_FAILURE; goto err_notag; } ((pqisrc_raid_req_t *)request)->request_id = tag; ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id; ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id; rcb = &softs->rcb[tag]; rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success; rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error; rcb->req_pending = true; rcb->tag = tag; /* Submit Command */ ret = pqisrc_submit_cmnd(softs, ib_q, request); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_out; } ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd); goto err_out; } if (datasize) { if (buff) { memcpy(buff, device_mem.virt_addr, datasize); } os_dma_mem_free(softs, &device_mem); } ret = rcb->status; if (ret) { if(error_info) { memcpy(error_info, rcb->error_info, sizeof(*error_info)); if (error_info->data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) { ret = PQI_STATUS_SUCCESS; } else{ - DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x," + DBG_WARN("Bus=%u Target=%u, Cmd=0x%x," "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), cmd, ret); ret = PQI_STATUS_FAILURE; } } } else { if(error_info) { ret = PQI_STATUS_SUCCESS; memset(error_info, 0, sizeof(*error_info)); } } os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id); DBG_FUNC("OUT\n"); return ret; err_out: DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), cmd, ret); os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id); err_notag: if (datasize) os_dma_mem_free(softs, &device_mem); DBG_FUNC("FAILED \n"); return ret; } -/* common function used to send report physical and logical luns cmnds*/ +/* Use this if you need to specify specific target or if you want error info */ +int +pqisrc_prepare_send_raid(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request, + void *buff, size_t datasize, uint8_t *scsi3addr, + raid_path_error_info_elem_t *error_info) +{ + struct dma_mem device_mem; + int ret = PQI_STATUS_SUCCESS; + uint8_t cmd = IS_BMIC_OPCODE(request->cmd.cdb[0]) ? request->cmd.cdb[6] : request->cmd.cdb[0]; + + ret = pqisrc_simple_dma_alloc(softs, &device_mem, datasize, request->sg_descriptors); + if (PQI_STATUS_SUCCESS != ret){ + DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret); + return ret; + } + + /* If we are sending out data, copy it over to dma buf */ + if (datasize && buff && request->data_direction == SOP_DATA_DIR_FROM_DEVICE) + memcpy(device_mem.virt_addr, buff, datasize); + + ret = pqisrc_build_send_raid_request(softs, device_mem, request, buff, datasize, + cmd, scsi3addr, error_info); + + return ret; +} + +/* Use this to target controller and don't care about error info */ +int +pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request, + void *buff, size_t datasize) +{ + raid_path_error_info_elem_t error_info; /* will be thrown away */ + uint8_t *scsi3addr = RAID_CTLR_LUNID; + + return pqisrc_prepare_send_raid(softs, request, buff, datasize, scsi3addr, &error_info); +} + +/* common function used to send report physical and logical luns cmds */ static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd, void *buff, size_t buf_len) { int ret; pqisrc_raid_req_t request; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); - ret = pqisrc_build_send_raid_request(softs, &request, buff, - buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); + + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + + switch (cmd) { + case SA_REPORT_LOG: + request.cmd.cdb[0] = SA_REPORT_LOG; + request.cmd.cdb[1] = SA_REPORT_LOG_EXTENDED; + break; + case SA_REPORT_PHYS: + request.cmd.cdb[0] = SA_REPORT_PHYS; + request.cmd.cdb[1] = SA_REPORT_PHYS_EXTENDED; + break; + /* @todo: 0x56 does not exist, this is kludgy, need to pass in options */ + case PQI_LOG_EXT_QUEUE_ENABLE: + request.cmd.cdb[0] = SA_REPORT_LOG; + request.cmd.cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED); + break; + } + + request.cmd.cdb[8] = (uint8_t)((buf_len) >> 8); + request.cmd.cdb[9] = (uint8_t)buf_len; + + ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len); DBG_FUNC("OUT\n"); return ret; } /* subroutine used to get physical and logical luns of the device */ int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd, reportlun_data_ext_t **buff, size_t *data_length) { int ret; size_t list_len; size_t data_len; size_t new_lun_list_length; reportlun_data_ext_t *lun_data; reportlun_header_t report_lun_header; DBG_FUNC("IN\n"); ret = pqisrc_report_luns(softs, cmd, &report_lun_header, sizeof(report_lun_header)); if (ret) { DBG_ERR("failed return code: %d\n", ret); return ret; } list_len = BE_32(report_lun_header.list_length); retry: data_len = sizeof(reportlun_header_t) + list_len; *data_length = data_len; lun_data = os_mem_alloc(softs, data_len); if (!lun_data) { DBG_ERR("failed to allocate memory for lun_data\n"); return PQI_STATUS_FAILURE; } if (list_len == 0) { DBG_DISC("list_len is 0\n"); memcpy(lun_data, &report_lun_header, sizeof(report_lun_header)); goto out; } ret = pqisrc_report_luns(softs, cmd, lun_data, data_len); if (ret) { DBG_ERR("error\n"); goto error; } new_lun_list_length = BE_32(lun_data->header.list_length); if (new_lun_list_length > list_len) { list_len = new_lun_list_length; os_mem_free(softs, (void *)lun_data, data_len); goto retry; } out: *buff = lun_data; DBG_FUNC("OUT\n"); return 0; error: os_mem_free(softs, (void *)lun_data, data_len); DBG_ERR("FAILED\n"); return ret; } /* * Function used to grab queue depth ext lun data for logical devices */ static int pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd, reportlun_queue_depth_data_t **buff, size_t *data_length) { int ret; size_t list_len; size_t data_len; size_t new_lun_list_length; reportlun_queue_depth_data_t *lun_data; reportlun_header_t report_lun_header; DBG_FUNC("IN\n"); ret = pqisrc_report_luns(softs, cmd, &report_lun_header, sizeof(report_lun_header)); if (ret) { DBG_ERR("failed return code: %d\n", ret); return ret; } list_len = BE_32(report_lun_header.list_length); retry: data_len = sizeof(reportlun_header_t) + list_len; *data_length = data_len; lun_data = os_mem_alloc(softs, data_len); if (!lun_data) { DBG_ERR("failed to allocate memory for lun_data\n"); return PQI_STATUS_FAILURE; } if (list_len == 0) { - DBG_INFO("list_len is 0\n"); + DBG_DISC("list_len is 0\n"); memcpy(lun_data, &report_lun_header, sizeof(report_lun_header)); goto out; } ret = pqisrc_report_luns(softs, cmd, lun_data, data_len); if (ret) { DBG_ERR("error\n"); goto error; } new_lun_list_length = BE_32(lun_data->header.list_length); if (new_lun_list_length > list_len) { list_len = new_lun_list_length; os_mem_free(softs, (void *)lun_data, data_len); goto retry; } out: *buff = lun_data; DBG_FUNC("OUT\n"); return 0; error: os_mem_free(softs, (void *)lun_data, data_len); DBG_ERR("FAILED\n"); return ret; } /* * Function used to get physical and logical device list */ static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs, reportlun_data_ext_t **physical_dev_list, reportlun_data_ext_t **logical_dev_list, reportlun_queue_depth_data_t **queue_dev_list, size_t *queue_data_length, size_t *phys_data_length, size_t *log_data_length) { int ret = PQI_STATUS_SUCCESS; size_t logical_list_length; size_t logdev_data_length; size_t data_length; reportlun_data_ext_t *local_logdev_list; reportlun_data_ext_t *logdev_data; reportlun_header_t report_lun_header; DBG_FUNC("IN\n"); ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length); if (ret) { DBG_ERR("report physical LUNs failed"); return ret; } ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length); if (ret) { DBG_ERR("report logical LUNs failed"); return ret; } +#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG + /* Save the report_log_dev buffer for deciding rescan requirement from OS driver*/ + if(softs->log_dev_data_length != *log_data_length) { + if(softs->log_dev_list) + os_mem_free(softs, softs->log_dev_list, softs->log_dev_data_length); + softs->log_dev_list = os_mem_alloc(softs, *log_data_length); + } + memcpy(softs->log_dev_list, *logical_dev_list, *log_data_length); + softs->log_dev_data_length = *log_data_length; +#endif + ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length); - if (ret) { - DBG_ERR("report logical LUNs failed"); - return ret; - } + if (ret) { + DBG_ERR("report logical LUNs failed"); + return ret; + } logdev_data = *logical_dev_list; if (logdev_data) { logical_list_length = BE_32(logdev_data->header.list_length); } else { memset(&report_lun_header, 0, sizeof(report_lun_header)); logdev_data = (reportlun_data_ext_t *)&report_lun_header; logical_list_length = 0; } logdev_data_length = sizeof(reportlun_header_t) + logical_list_length; /* Adding LOGICAL device entry for controller */ local_logdev_list = os_mem_alloc(softs, logdev_data_length + sizeof(reportlun_ext_entry_t)); if (!local_logdev_list) { data_length = *log_data_length; os_mem_free(softs, (char *)*logical_dev_list, data_length); *logical_dev_list = NULL; return PQI_STATUS_FAILURE; } memcpy(local_logdev_list, logdev_data, logdev_data_length); memset((uint8_t *)local_logdev_list + logdev_data_length, 0, sizeof(reportlun_ext_entry_t)); local_logdev_list->header.list_length = BE_32(logical_list_length + sizeof(reportlun_ext_entry_t)); data_length = *log_data_length; os_mem_free(softs, (char *)*logical_dev_list, data_length); *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t); *logical_dev_list = local_logdev_list; DBG_FUNC("OUT\n"); return ret; } -/* Subroutine used to set Bus-Target-Lun for the requested device */ -static inline void -pqisrc_set_btl(pqi_scsi_dev_t *device, - int bus, int target, int lun) -{ - DBG_FUNC("IN\n"); - - device->bus = bus; - device->target = target; - device->lun = lun; - - DBG_FUNC("OUT\n"); -} - -inline -boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device) +inline boolean_t +pqisrc_is_external_raid_device(pqi_scsi_dev_t *device) { return device->is_external_raid_device; } -static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr) +static inline boolean_t +pqisrc_is_external_raid_addr(uint8_t *scsi3addr) { return scsi3addr[2] != 0; } /* Function used to assign Bus-Target-Lun for the requested device */ static void -pqisrc_assign_btl(pqi_scsi_dev_t *device) +pqisrc_assign_btl(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint8_t *scsi3addr; uint32_t lunid; uint32_t bus; uint32_t target; uint32_t lun; DBG_FUNC("IN\n"); scsi3addr = device->scsi3addr; lunid = GET_LE32(scsi3addr); if (pqisrc_is_hba_lunid(scsi3addr)) { - /* The specified device is the controller. */ - pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1); + /* The specified device is the controller. */ + pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff)); device->target_lun_valid = true; return; } + /* When the specified device is a logical volume, + * physicals will be given targets in pqisrc update + * device list in pqisrc scan devices. */ if (pqisrc_is_logical_device(device)) { - if (pqisrc_is_external_raid_device(device)) { - DBG_DISC("External Raid Device!!!"); - bus = PQI_EXTERNAL_RAID_VOLUME_BUS; - target = (lunid >> 16) & 0x3fff; - lun = lunid & 0xff; - } else { bus = PQI_RAID_VOLUME_BUS; lun = (lunid & 0x3fff) + 1; target = 0; - } - pqisrc_set_btl(device, bus, target, lun); - device->target_lun_valid = true; - return; + pqisrc_set_btl(device, bus, target, lun); + device->target_lun_valid = true; + return; } DBG_FUNC("OUT\n"); } /* Build and send the internal INQUIRY command to particular device */ int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs, uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len) { int ret = PQI_STATUS_SUCCESS; pqisrc_raid_req_t request; raid_path_error_info_elem_t error_info; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); - ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len, - SA_INQUIRY, vpd_page, scsi3addr, &error_info); - - DBG_FUNC("OUT\n"); - return ret; -} - -#if 0 -/* Function used to parse the sense information from response */ -static void -pqisrc_fetch_sense_info(const uint8_t *sense_data, - unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq) -{ - struct sense_header_scsi header; - DBG_FUNC("IN\n"); - - *sense_key = 0; - *ascq = 0; - *asc = 0; + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + request.cmd.cdb[0] = SA_INQUIRY; + if (vpd_page & VPD_PAGE) { + request.cmd.cdb[1] = 0x1; + request.cmd.cdb[2] = (uint8_t)vpd_page; + } + ASSERT(buf_len < 256); + request.cmd.cdb[4] = (uint8_t)buf_len; - if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) { - *sense_key = header.sense_key; - *asc = header.asc; - *ascq = header.ascq; + if (softs->timeout_in_passthrough) { + request.timeout_in_sec = PQISRC_INQUIRY_TIMEOUT; } - DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq); + pqisrc_prepare_send_raid(softs, &request, buff, buf_len, scsi3addr, &error_info); DBG_FUNC("OUT\n"); + return ret; } -#endif /* Determine logical volume status from vpd buffer.*/ static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { int ret; uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED; uint8_t vpd_size = sizeof(vpd_volume_status); uint8_t offline = true; size_t page_length; vpd_volume_status *vpd; DBG_FUNC("IN\n"); vpd = os_mem_alloc(softs, vpd_size); if (vpd == NULL) goto out; /* Get the size of the VPD return buff. */ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS, (uint8_t *)vpd, vpd_size); if (ret) { DBG_WARN("Inquiry returned failed status\n"); goto out; } if (vpd->page_code != SA_VPD_LV_STATUS) { DBG_WARN("Returned invalid buffer\n"); goto out; } page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length; if (page_length < vpd_size) goto out; status = vpd->volume_status; offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0; out: device->volume_offline = offline; device->volume_status = status; os_mem_free(softs, (char *)vpd, vpd_size); DBG_FUNC("OUT\n"); return; } + /* Validate the RAID map parameters */ static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map) { char *error_msg; uint32_t raidmap_size; uint32_t r5or6_blocks_per_row; +/* unsigned phys_dev_num; */ DBG_FUNC("IN\n"); raidmap_size = LE_32(raid_map->structure_size); if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) { error_msg = "RAID map too small\n"; goto error; } #if 0 phys_dev_num = LE_16(raid_map->layout_map_count) * (LE_16(raid_map->data_disks_per_row) + LE_16(raid_map->metadata_disks_per_row)); #endif if (device->raid_level == SA_RAID_1) { if (LE_16(raid_map->layout_map_count) != 2) { error_msg = "invalid RAID-1 map\n"; goto error; } } else if (device->raid_level == SA_RAID_ADM) { if (LE_16(raid_map->layout_map_count) != 3) { error_msg = "invalid RAID-1(triple) map\n"; goto error; } } else if ((device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_6) && LE_16(raid_map->layout_map_count) > 1) { /* RAID 50/60 */ r5or6_blocks_per_row = LE_16(raid_map->strip_size) * LE_16(raid_map->data_disks_per_row); if (r5or6_blocks_per_row == 0) { error_msg = "invalid RAID-5 or RAID-6 map\n"; goto error; } } DBG_FUNC("OUT\n"); return 0; error: DBG_NOTE("%s\n", error_msg); return PQI_STATUS_FAILURE; } /* Get device raidmap for the requested device */ static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { int ret = PQI_STATUS_SUCCESS; - int raidmap_size; + int raidmap_alloc_size = sizeof(pqisrc_raid_map_t); + int raidmap_reported_size; + int structure_size; + int ii; + int *next_offload_to_mirror; pqisrc_raid_req_t request; pqisrc_raid_map_t *raid_map; DBG_FUNC("IN\n"); - raid_map = os_mem_alloc(softs, sizeof(*raid_map)); - if (!raid_map) - return PQI_STATUS_FAILURE; - - memset(&request, 0, sizeof(request)); - ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map), - SA_GET_RAID_MAP, 0, device->scsi3addr, NULL); - - if (ret) { - DBG_ERR("error in build send raid req ret=%d\n", ret); - goto err_out; - } - - raidmap_size = LE_32(raid_map->structure_size); - if (raidmap_size > sizeof(*raid_map)) { - DBG_NOTE("Raid map is larger than 1024 entries, request once again"); - os_mem_free(softs, (char*)raid_map, sizeof(*raid_map)); - - raid_map = os_mem_alloc(softs, raidmap_size); + for (ii = 0; ii < 2; ii++) + { + raid_map = os_mem_alloc(softs, raidmap_alloc_size); if (!raid_map) return PQI_STATUS_FAILURE; + memset(&request, 0, sizeof(request)); + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + request.cmd.cdb[0] = SA_CISS_READ; + request.cmd.cdb[1] = SA_GET_RAID_MAP; + request.cmd.cdb[8] = (uint8_t)((raidmap_alloc_size) >> 8); + request.cmd.cdb[9] = (uint8_t)(raidmap_alloc_size); + + ret = pqisrc_prepare_send_raid(softs, &request, raid_map, raidmap_alloc_size, device->scsi3addr, NULL); - ret = pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size, - SA_GET_RAID_MAP, 0, device->scsi3addr, NULL); if (ret) { DBG_ERR("error in build send raid req ret=%d\n", ret); goto err_out; } - if(LE_32(raid_map->structure_size) != raidmap_size) { - DBG_WARN("Expected raid map size %d bytes and got %d bytes\n", - raidmap_size,LE_32(raid_map->structure_size)); - goto err_out; - } + raidmap_reported_size = LE_32(raid_map->structure_size); + if (raidmap_reported_size <= raidmap_alloc_size) + break; + + DBG_NOTE("Raid map is larger than 1024 entries, request once again"); + os_mem_free(softs, (char*)raid_map, raidmap_alloc_size); + + raidmap_alloc_size = raidmap_reported_size; } ret = pqisrc_raid_map_validation(softs, device, raid_map); if (ret) { DBG_NOTE("error in raid map validation ret=%d\n", ret); goto err_out; } + structure_size = raid_map->data_disks_per_row * sizeof(*next_offload_to_mirror); + next_offload_to_mirror = os_mem_alloc(softs, structure_size); + if (!next_offload_to_mirror) { + ret = PQI_STATUS_FAILURE; + goto err_out; + } + device->raid_map = raid_map; + device->offload_to_mirror = next_offload_to_mirror; DBG_FUNC("OUT\n"); return 0; err_out: os_mem_free(softs, (char*)raid_map, sizeof(*raid_map)); DBG_FUNC("FAILED \n"); return ret; } /* Get device ioaccel_status to validate the type of device */ static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { int ret = PQI_STATUS_SUCCESS; uint8_t *buff; uint8_t ioaccel_status; DBG_FUNC("IN\n"); buff = os_mem_alloc(softs, 64); if (!buff) return; ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64); if (ret) { DBG_ERR("error in send scsi inquiry ret=%d\n", ret); goto err_out; } ioaccel_status = buff[IOACCEL_STATUS_BYTE]; device->offload_config = !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); if (device->offload_config) { device->offload_enabled_pending = !!(ioaccel_status & OFFLOAD_ENABLED_BIT); if (pqisrc_get_device_raidmap(softs, device)) device->offload_enabled_pending = false; } DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n", device->offload_config, device->offload_enabled_pending); err_out: os_mem_free(softs, (char*)buff, 64); DBG_FUNC("OUT\n"); } /* Get RAID level of requested device */ static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint8_t raid_level; uint8_t *buff; DBG_FUNC("IN\n"); raid_level = SA_RAID_UNKNOWN; buff = os_mem_alloc(softs, 64); if (buff) { int ret; ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64); if (ret == 0) { raid_level = buff[8]; if (raid_level > SA_RAID_MAX) raid_level = SA_RAID_UNKNOWN; } os_mem_free(softs, (char*)buff, 64); } device->raid_level = raid_level; DBG_DISC("RAID LEVEL: %x \n", raid_level); DBG_FUNC("OUT\n"); } /* Parse the inquiry response and determine the type of device */ static int pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { int ret = PQI_STATUS_SUCCESS; uint8_t *inq_buff; - int retry = MAX_RETRIES; + int retry = 3; DBG_FUNC("IN\n"); inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE); if (!inq_buff) return PQI_STATUS_FAILURE; while(retry--) { /* Send an inquiry to the device to see what it is. */ ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff, OBDR_TAPE_INQ_SIZE); if (!ret) break; DBG_WARN("Retrying inquiry !!!\n"); } if(retry <= 0) goto err_out; pqisrc_sanitize_inquiry_string(&inq_buff[8], 8); pqisrc_sanitize_inquiry_string(&inq_buff[16], 16); device->devtype = inq_buff[0] & 0x1f; memcpy(device->vendor, &inq_buff[8], sizeof(device->vendor)); memcpy(device->model, &inq_buff[16], sizeof(device->model)); DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n", device->devtype, device->vendor, device->model); if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) { if (pqisrc_is_external_raid_device(device)) { device->raid_level = SA_RAID_UNKNOWN; device->volume_status = SA_LV_OK; device->volume_offline = false; } else { pqisrc_get_dev_raid_level(softs, device); pqisrc_get_dev_ioaccel_status(softs, device); pqisrc_get_dev_vol_status(softs, device); } } /* * Check if this is a One-Button-Disaster-Recovery device * by looking for "$DR-10" at offset 43 in the inquiry data. */ device->is_obdr_device = (device->devtype == ROM_DEVICE && memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG, OBDR_SIG_LEN) == 0); err_out: os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE); DBG_FUNC("OUT\n"); return ret; } /* * BMIC (Basic Management And Interface Commands) command * to get the controller identify params */ static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff) { int ret = PQI_STATUS_SUCCESS; pqisrc_raid_req_t request; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); - ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff), - BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); + + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_READ; + request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_CONTROLLER; + request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*buff)); + + ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, sizeof(*buff)); + DBG_FUNC("OUT\n"); return ret; } /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */ int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; bmic_ident_ctrl_t *identify_ctrl; DBG_FUNC("IN\n"); identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl)); if (!identify_ctrl) { DBG_ERR("failed to allocate memory for identify_ctrl\n"); return PQI_STATUS_FAILURE; } memset(identify_ctrl, 0, sizeof(*identify_ctrl)); ret = pqisrc_identify_ctrl(softs, identify_ctrl); if (ret) goto out; softs->fw_build_number = identify_ctrl->fw_build_number; memcpy(softs->fw_version, identify_ctrl->fw_version, sizeof(identify_ctrl->fw_version)); softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0'; snprintf(softs->fw_version + strlen(softs->fw_version), sizeof(softs->fw_version), "-%u", identify_ctrl->fw_build_number); out: os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl)); DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number); DBG_FUNC("OUT\n"); return ret; } /* BMIC command to determine scsi device identify params */ static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, bmic_ident_physdev_t *buff, int buf_len) { int ret = PQI_STATUS_SUCCESS; uint16_t bmic_device_index; pqisrc_raid_req_t request; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr); - request.cdb[2] = (uint8_t)bmic_device_index; - request.cdb[9] = (uint8_t)(bmic_device_index >> 8); - ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len, - BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); + request.data_direction = SOP_DATA_DIR_TO_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_READ; + request.cmd.bmic_cdb.cmd = BMIC_IDENTIFY_PHYSICAL_DEVICE; + request.cmd.bmic_cdb.xfer_len = BE_16(buf_len); + request.cmd.cdb[2] = (uint8_t)bmic_device_index; + request.cmd.cdb[9] = (uint8_t)(bmic_device_index >> 8); + + ret = pqisrc_prepare_send_ctrlr_request(softs, &request, buff, buf_len); + DBG_FUNC("OUT\n"); return ret; } /* * Function used to get the scsi device information using one of BMIC * BMIC_IDENTIFY_PHYSICAL_DEVICE */ static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, bmic_ident_physdev_t *id_phys) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); memset(id_phys, 0, sizeof(*id_phys)); ret= pqisrc_identify_physical_disk(softs, device, id_phys, sizeof(*id_phys)); if (ret) { device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; return; } device->queue_depth = LE_16(id_phys->current_queue_depth_limit); device->device_type = id_phys->device_type; device->active_path_index = id_phys->active_path_number; device->path_map = id_phys->redundant_path_present_map; memcpy(&device->box, &id_phys->alternate_paths_phys_box_on_port, sizeof(device->box)); memcpy(&device->phys_connector, &id_phys->alternate_paths_phys_connector, sizeof(device->phys_connector)); device->bay = id_phys->phys_bay_in_box; + if (id_phys->multi_lun_device_lun_count) { + device->is_multi_lun = true; + } DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth); DBG_FUNC("OUT\n"); } /* Function used to find the entry of the device in a list */ -static -device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs, +static device_status_t +pqisrc_scsi_find_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device) { pqi_scsi_dev_t *device; - int i,j; + int i; DBG_FUNC("IN\n"); for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr, - device->scsi3addr)) { - *same_device = device; - if (pqisrc_device_equal(device_to_find, device)) { - if (device_to_find->volume_offline) - return DEVICE_CHANGED; - return DEVICE_UNCHANGED; - } - return DEVICE_CHANGED; + device = softs->dev_list[i]; + if(device == NULL) + continue; + if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr, + device->scsi3addr)) { + *same_device = device; + if (device->in_remove == true) + return DEVICE_IN_REMOVE; + if (pqisrc_device_equal(device_to_find, device)) { + if (device_to_find->volume_offline) + return DEVICE_CHANGED; + return DEVICE_UNCHANGED; } + return DEVICE_CHANGED; } } DBG_FUNC("OUT\n"); return DEVICE_NOT_FOUND; } /* Update the newly added devices as existed device */ static void pqisrc_exist_device_update(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device) { DBG_FUNC("IN\n"); device_exist->expose_device = new_device->expose_device; memcpy(device_exist->vendor, new_device->vendor, sizeof(device_exist->vendor)); memcpy(device_exist->model, new_device->model, sizeof(device_exist->model)); device_exist->is_physical_device = new_device->is_physical_device; device_exist->is_external_raid_device = new_device->is_external_raid_device; - - if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION || - device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) && - new_device->volume_status == SA_LV_OK) { + /* Whenever a logical device expansion happens, reprobe of + * all existing LDs will be triggered, which is resulting + * in updating the size to the os. */ + if ((softs->ld_rescan) && (pqisrc_is_logical_device(device_exist))) { device_exist->scsi_rescan = true; } device_exist->sas_address = new_device->sas_address; device_exist->raid_level = new_device->raid_level; device_exist->queue_depth = new_device->queue_depth; device_exist->ioaccel_handle = new_device->ioaccel_handle; device_exist->volume_status = new_device->volume_status; device_exist->active_path_index = new_device->active_path_index; device_exist->path_map = new_device->path_map; device_exist->bay = new_device->bay; memcpy(device_exist->box, new_device->box, sizeof(device_exist->box)); memcpy(device_exist->phys_connector, new_device->phys_connector, sizeof(device_exist->phys_connector)); device_exist->offload_config = new_device->offload_config; device_exist->offload_enabled_pending = new_device->offload_enabled_pending; - device_exist->offload_to_mirror = 0; + if (device_exist->offload_to_mirror) + os_mem_free(softs, + (int *) device_exist->offload_to_mirror, + sizeof(*(device_exist->offload_to_mirror))); + device_exist->offload_to_mirror = new_device->offload_to_mirror; if (device_exist->raid_map) os_mem_free(softs, - (char *)device_exist->raid_map, - sizeof(*device_exist->raid_map)); + (char *)device_exist->raid_map, + sizeof(*device_exist->raid_map)); device_exist->raid_map = new_device->raid_map; - /* To prevent this from being freed later. */ + /* To prevent these from being freed later. */ new_device->raid_map = NULL; - DBG_FUNC("OUT\n"); -} - -/* Validate the ioaccel_handle for a newly added device */ -static -pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel( - pqisrc_softstate_t *softs, uint32_t ioaccel_handle) -{ - pqi_scsi_dev_t *device; - int i,j; - DBG_FUNC("IN\n"); - for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - if (device->devtype != DISK_DEVICE) - continue; - if (pqisrc_is_logical_device(device)) - continue; - if (device->ioaccel_handle == ioaccel_handle) - return device; - } - } - DBG_FUNC("OUT\n"); - - return NULL; -} - -/* Get the scsi device queue depth */ -static void -pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs) -{ - unsigned i; - unsigned phys_dev_num; - unsigned num_raidmap_entries; - unsigned queue_depth; - pqisrc_raid_map_t *raid_map; - pqi_scsi_dev_t *device; - raidmap_data_t *dev_data; - pqi_scsi_dev_t *phys_disk; - unsigned j; - unsigned k; - - DBG_FUNC("IN\n"); - - for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - if (device->devtype != DISK_DEVICE) - continue; - if (!pqisrc_is_logical_device(device)) - continue; - if (pqisrc_is_external_raid_device(device)) - continue; - device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; - raid_map = device->raid_map; - if (!raid_map) - return; - dev_data = raid_map->dev_data; - phys_dev_num = LE_16(raid_map->layout_map_count) * - (LE_16(raid_map->data_disks_per_row) + - LE_16(raid_map->metadata_disks_per_row)); - num_raidmap_entries = phys_dev_num * - LE_16(raid_map->row_cnt); - - queue_depth = 0; - for (k = 0; k < num_raidmap_entries; k++) { - phys_disk = pqisrc_identify_device_via_ioaccel(softs, - dev_data[k].ioaccel_handle); - - if (!phys_disk) { - DBG_WARN( - "Failed to find physical disk handle for logical drive %016llx\n", - (unsigned long long)BE_64(device->scsi3addr[0])); - device->offload_enabled = false; - device->offload_enabled_pending = false; - if (raid_map) - os_mem_free(softs, (char *)raid_map, sizeof(*raid_map)); - device->raid_map = NULL; - return; - } - - queue_depth += phys_disk->queue_depth; - } - - device->queue_depth = queue_depth; - } /* end inner loop */ - }/* end outer loop */ + new_device->offload_to_mirror = NULL; DBG_FUNC("OUT\n"); } /* Function used to add a scsi device to OS scsi subsystem */ static int pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { DBG_FUNC("IN\n"); DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n", device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status); device->invalid = false; + device->schedule_rescan = false; + device->softs = softs; + device->in_remove = false; if(device->expose_device) { pqisrc_init_device_active_io(softs, device); /* TBD: Call OS upper layer function to add the device entry */ os_add_device(softs,device); } DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } /* Function used to remove a scsi device from OS scsi subsystem */ void pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { DBG_FUNC("IN\n"); DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n", device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status); - device->invalid = true; if (device->expose_device == false) { /*Masked physical devices are not been exposed to storage stack. *Hence, free the masked device resources such as *device memory, Target ID,etc., here. */ DBG_NOTE("Deallocated Masked Device Resources.\n"); + /* softs->device_list[device->target][device->lun] = NULL; */ pqisrc_free_device(softs,device); return; } /* Wait for device outstanding Io's */ pqisrc_wait_for_device_commands_to_complete(softs, device); /* Call OS upper layer function to remove the exposed device entry */ os_remove_device(softs,device); DBG_FUNC("OUT\n"); } + /* * When exposing new device to OS fails then adjst list according to the * mid scsi list */ static void pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { + int i; + unsigned char addr1[8], addr2[8]; + pqi_scsi_dev_t *temp_device; DBG_FUNC("IN\n"); if (!device) { DBG_ERR("softs = %p: device is NULL !!!\n", softs); return; } OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); - softs->device_list[device->target][device->lun] = NULL; + uint8_t *scsi3addr; + /*For external raid device, there can be multiple luns + *with same target. So while freeing external raid device, + *free target only after removing all luns with same target.*/ + if (pqisrc_is_external_raid_device(device)) { + memcpy(addr1, device->scsi3addr, 8); + for(i = 0; i < PQI_MAX_DEVICES; i++) { + if(softs->dev_list[i] == NULL) + continue; + temp_device = softs->dev_list[i]; + memcpy(addr2, temp_device->scsi3addr, 8); + if(memcmp(addr1, addr2, 8) == 0) { + continue; + } + if (addr1[2] == addr2[2]) { + break; + } + } + if(i == PQI_MAX_DEVICES) { + pqisrc_remove_target_bit(softs, device->target); + } + } + + if(pqisrc_delete_softs_entry(softs, device) == PQI_STATUS_SUCCESS){ + scsi3addr = device->scsi3addr; + if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)){ + DBG_NOTE("About to remove target bit %d \n", device->target); + pqisrc_remove_target_bit(softs, device->target); + } + } OS_RELEASE_SPINLOCK(&softs->devlist_lock); pqisrc_device_mem_free(softs, device); DBG_FUNC("OUT\n"); } /* Debug routine used to display the RAID volume status of the device */ static void -pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) +pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { char *status; DBG_FUNC("IN\n"); switch (device->volume_status) { case SA_LV_OK: status = "Volume is online."; break; case SA_LV_UNDERGOING_ERASE: status = "Volume is undergoing background erase process."; break; case SA_LV_NOT_AVAILABLE: status = "Volume is waiting for transforming volume."; break; case SA_LV_UNDERGOING_RPI: status = "Volume is undergoing rapid parity initialization process."; break; case SA_LV_PENDING_RPI: status = "Volume is queued for rapid parity initialization process."; break; case SA_LV_ENCRYPTED_NO_KEY: status = "Volume is encrypted and cannot be accessed because key is not present."; break; case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode."; break; case SA_LV_UNDERGOING_ENCRYPTION: status = "Volume is undergoing encryption process."; break; case SA_LV_UNDERGOING_ENCRYPTION_REKEYING: status = "Volume is undergoing encryption re-keying process."; break; case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled."; break; case SA_LV_PENDING_ENCRYPTION: status = "Volume is pending migration to encrypted state, but process has not started."; break; case SA_LV_PENDING_ENCRYPTION_REKEYING: status = "Volume is encrypted and is pending encryption rekeying."; break; case SA_LV_STATUS_VPD_UNSUPPORTED: status = "Volume status is not available through vital product data pages."; break; case SA_LV_UNDERGOING_EXPANSION: status = "Volume undergoing expansion"; break; case SA_LV_QUEUED_FOR_EXPANSION: status = "Volume queued for expansion"; + break; case SA_LV_EJECTED: status = "Volume ejected"; break; case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED: status = "Volume has wrong physical drive replaced"; break; case SA_LV_DISABLED_SCSI_ID_CONFLICT: status = "Volume disabled scsi id conflict"; break; case SA_LV_HARDWARE_HAS_OVERHEATED: status = "Volume hardware has over heated"; break; case SA_LV_HARDWARE_OVERHEATING: status = "Volume hardware over heating"; break; case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: status = "Volume physical drive connection problem"; break; default: status = "Volume is in an unknown state."; break; } - DBG_DISC("scsi BTL %d:%d:%d %s\n", + DBG_NOTE("scsi BTL %d:%d:%d %s\n", device->bus, device->target, device->lun, status); DBG_FUNC("OUT\n"); } void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { DBG_FUNC("IN\n"); if (!device) return; if (device->raid_map) { os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t)); } + if (device->offload_to_mirror) { + os_mem_free(softs, (int *)device->offload_to_mirror, sizeof(*(device->offload_to_mirror))); + } os_mem_free(softs, (char *)device,sizeof(*device)); DBG_FUNC("OUT\n"); } /* OS should call this function to free the scsi device */ void -pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device) +pqisrc_free_device(pqisrc_softstate_t * softs, pqi_scsi_dev_t *device) { rcb_t *rcb; - int i; - + uint8_t *scsi3addr; + int i, index; + pqi_scsi_dev_t *temp_device; + unsigned char addr1[8], addr2[8]; /* Clear the "device" field in the rcb. * Response coming after device removal shouldn't access this field */ for(i = 1; i <= softs->max_outstanding_io; i++) { rcb = &softs->rcb[i]; if(rcb->dvp == device) { DBG_WARN("Pending requests for the removing device\n"); rcb->dvp = NULL; } } + /* Find the entry in device list for the freed device softs->dev_list[i]& + *make it NULL before freeing the device memory + */ + index = pqisrc_find_device_list_index(softs, device); OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); - - if (!pqisrc_is_logical_device(device)) { - pqisrc_free_tid(softs,device->target); + scsi3addr = device->scsi3addr; + if (!pqisrc_is_logical_device(device) && !MASKED_DEVICE(scsi3addr)) { + DBG_NOTE("Giving back target %i \n", device->target); + pqisrc_remove_target_bit(softs, device->target); + } + /*For external raid device, there can be multiple luns + *with same target. So while freeing external raid device, + *free target only after removing all luns with same target.*/ + if (pqisrc_is_external_raid_device(device)) { + memcpy(addr1, device->scsi3addr, 8); + for(i = 0; i < PQI_MAX_DEVICES; i++) { + if(softs->dev_list[i] == NULL) + continue; + temp_device = softs->dev_list[i]; + memcpy(addr2, temp_device->scsi3addr, 8); + if(memcmp(addr1, addr2, 8) == 0) { + continue; + } + if (addr1[2] == addr2[2]) { + break; + } + } + if(i == PQI_MAX_DEVICES) { + pqisrc_remove_target_bit(softs, device->target); + } } - softs->device_list[device->target][device->lun] = NULL; - - pqisrc_device_mem_free(softs, device); + if (index >= 0 && index < PQI_MAX_DEVICES) + softs->dev_list[index] = NULL; + if (device->expose_device == true){ + pqisrc_delete_softs_entry(softs, device); + DBG_NOTE("Removed memory for device : B %d: T %d: L %d\n", + device->bus, device->target, device->lun); + pqisrc_device_mem_free(softs, device); + } OS_RELEASE_SPINLOCK(&softs->devlist_lock); } + /* Update the newly added devices to the device list */ static void pqisrc_update_device_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *new_device_list[], int num_new_devices) { int ret; int i; device_status_t dev_status; pqi_scsi_dev_t *device; pqi_scsi_dev_t *same_device; pqi_scsi_dev_t **added = NULL; pqi_scsi_dev_t **removed = NULL; int nadded = 0, nremoved = 0; - int j; - int tid = 0; - boolean_t driver_queue_depth_flag = false; + uint8_t *scsi3addr; DBG_FUNC("IN\n"); added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES); removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES); if (!added || !removed) { DBG_WARN("Out of memory \n"); goto free_and_out; } OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - device->device_gone = true; - } + if(softs->dev_list[i] == NULL) + continue; + device = softs->dev_list[i]; + device->device_gone = true; } + + /* TODO:Remove later */ DBG_IO("Device list used an array\n"); for (i = 0; i < num_new_devices; i++) { device = new_device_list[i]; dev_status = pqisrc_scsi_find_entry(softs, device, &same_device); switch (dev_status) { case DEVICE_UNCHANGED: /* New Device present in existing device list */ device->new_device = false; same_device->device_gone = false; pqisrc_exist_device_update(softs, same_device, device); break; case DEVICE_NOT_FOUND: /* Device not found in existing list */ device->new_device = true; break; case DEVICE_CHANGED: /* Actual device gone need to add device to list*/ device->new_device = true; break; + case DEVICE_IN_REMOVE: + /*Older device with same target/lun is in removal stage*/ + /*New device will be added/scanned when same target/lun + * device_list[] gets removed from the OS target + * free call*/ + device->new_device = false; + same_device->schedule_rescan = true; + break; default: break; } } + /* Process all devices that have gone away. */ - for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) + for(i = 0; i < PQI_MAX_DEVICES; i++) { + device = softs->dev_list[i]; + if(device == NULL) + continue; + if (device->device_gone) { + if(device->in_remove == true) + { continue; - device = softs->device_list[i][j]; - if (device->device_gone) { - softs->device_list[device->target][device->lun] = NULL; - removed[nremoved] = device; - nremoved++; } + device->in_remove = true; + removed[nremoved] = device; + softs->num_devs--; + nremoved++; } } /* Process all new devices. */ for (i = 0, nadded = 0; i < num_new_devices; i++) { device = new_device_list[i]; if (!device->new_device) continue; if (device->volume_offline) continue; - /* physical device */ - if (!pqisrc_is_logical_device(device)) { - tid = pqisrc_alloc_tid(softs); - if(INVALID_ELEM != tid) - pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0); - } - - /* This is not expected. We may lose the reference to the old device entry. - * If the target & lun ids are same, it is supposed to detect as an existing - * device, and not as a new device - */ - if(softs->device_list[device->target][device->lun] != NULL) { - DBG_WARN("Overwriting T : %d L :%d\n",device->target,device->lun); + /* Find out which devices to add to the driver list + * in softs->dev_list */ + scsi3addr = device->scsi3addr; + if (device->expose_device || !MASKED_DEVICE(scsi3addr)){ + if(pqisrc_add_softs_entry(softs, device, scsi3addr)){ + /* To prevent this entry from being freed later. */ + new_device_list[i] = NULL; + added[nadded] = device; + nadded++; + } } - softs->device_list[device->target][device->lun] = device; - - DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device, - device->bus,device->target,device->lun); - /* To prevent this entry from being freed later. */ - new_device_list[i] = NULL; - added[nadded] = device; - nadded++; } - for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - device->offload_enabled = device->offload_enabled_pending; + device = softs->dev_list[i]; + if(device == NULL) + continue; + if (device->offload_enabled != device->offload_enabled_pending) + { + DBG_NOTE("[%d:%d:%d]Changing AIO to %d (was %d)\n", + device->bus, device->target, device->lun, + device->offload_enabled_pending, + device->offload_enabled); } + device->offload_enabled = device->offload_enabled_pending; } OS_RELEASE_SPINLOCK(&softs->devlist_lock); for(i = 0; i < nremoved; i++) { device = removed[i]; if (device == NULL) continue; pqisrc_display_device_info(softs, "removed", device); pqisrc_remove_device(softs, device); - } + OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); + for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - /* - * Notify the OS upper layer if the queue depth of any existing device has - * changed. - */ - if (device->queue_depth != - device->advertised_queue_depth) { - device->advertised_queue_depth = device->queue_depth; - /* TBD: Call OS upper layer function to change device Q depth */ - } - if (device->firmware_queue_depth_set == false) - driver_queue_depth_flag = true; - if (device->scsi_rescan) - os_rescan_target(softs, device); + if(softs->dev_list[i] == NULL) + continue; + device = softs->dev_list[i]; + if (device->in_remove) + continue; + /* + * If firmware queue depth is corrupt or not working + * use the PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH + * which is 0. That means there is no limit to the + * queue depth all the way up to the controller + * queue depth + */ + if (pqisrc_is_logical_device(device) && + device->firmware_queue_depth_set == false) + device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; + + if (device->scsi_rescan) { + os_rescan_target(softs, device); } } - /* - * If firmware queue depth is corrupt or not working - * use driver method to re-calculate the queue depth - * for all logical devices - */ - if (driver_queue_depth_flag) - pqisrc_update_log_dev_qdepth(softs); + softs->ld_rescan = false; + + OS_RELEASE_SPINLOCK(&softs->devlist_lock); for(i = 0; i < nadded; i++) { device = added[i]; if (device->expose_device) { ret = pqisrc_add_device(softs, device); if (ret) { DBG_WARN("scsi %d:%d:%d addition failed, device not added\n", - device->bus, device->target, - device->lun); + device->bus, device->target, device->lun); pqisrc_adjust_list(softs, device); continue; } } pqisrc_display_device_info(softs, "added", device); } /* Process all volumes that are offline. */ for (i = 0; i < num_new_devices; i++) { device = new_device_list[i]; if (!device) continue; if (!device->new_device) continue; if (device->volume_offline) { pqisrc_display_volume_status(softs, device); pqisrc_display_device_info(softs, "offline", device); } } + for (i = 0; i < PQI_MAX_DEVICES; i++) { + device = softs->dev_list[i]; + if(device == NULL) + continue; + DBG_DISC("Current device %d : B%d:T%d:L%d\n", + i, device->bus, device->target, + device->lun); + } + free_and_out: if (added) os_mem_free(softs, (char *)added, sizeof(*added) * PQI_MAX_DEVICES); if (removed) os_mem_free(softs, (char *)removed, sizeof(*removed) * PQI_MAX_DEVICES); DBG_FUNC("OUT\n"); } /* * Let the Adapter know about driver version using one of BMIC * BMIC_WRITE_HOST_WELLNESS */ int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs) { int rval = PQI_STATUS_SUCCESS; struct bmic_host_wellness_driver_version *host_wellness_driver_ver; size_t data_length; pqisrc_raid_req_t request; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); data_length = sizeof(*host_wellness_driver_ver); host_wellness_driver_ver = os_mem_alloc(softs, data_length); if (!host_wellness_driver_ver) { DBG_ERR("failed to allocate memory for host wellness driver_version\n"); return PQI_STATUS_FAILURE; } host_wellness_driver_ver->start_tag[0] = '<'; host_wellness_driver_ver->start_tag[1] = 'H'; host_wellness_driver_ver->start_tag[2] = 'W'; host_wellness_driver_ver->start_tag[3] = '>'; host_wellness_driver_ver->driver_version_tag[0] = 'D'; host_wellness_driver_ver->driver_version_tag[1] = 'V'; host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version)); strncpy(host_wellness_driver_ver->driver_version, softs->os_name, sizeof(host_wellness_driver_ver->driver_version)); if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) { strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION, sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name)); } else { - DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n", - strlen(softs->os_name)); + DBG_DISC("OS name length(%u) is longer than buffer of driver_version\n", + (unsigned int)strlen(softs->os_name)); } host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0'; host_wellness_driver_ver->end_tag[0] = 'Z'; host_wellness_driver_ver->end_tag[1] = 'Z'; - rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length, - BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); + + request.data_direction = SOP_DATA_DIR_FROM_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_WRITE; + request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS; + request.cmd.bmic_cdb.xfer_len = BE_16(data_length); + + rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_driver_ver, data_length); os_mem_free(softs, (char *)host_wellness_driver_ver, data_length); DBG_FUNC("OUT"); return rval; } /* * Write current RTC time from host to the adapter using * BMIC_WRITE_HOST_WELLNESS */ int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs) { int rval = PQI_STATUS_SUCCESS; struct bmic_host_wellness_time *host_wellness_time; size_t data_length; pqisrc_raid_req_t request; DBG_FUNC("IN\n"); memset(&request, 0, sizeof(request)); data_length = sizeof(*host_wellness_time); host_wellness_time = os_mem_alloc(softs, data_length); if (!host_wellness_time) { DBG_ERR("failed to allocate memory for host wellness time structure\n"); return PQI_STATUS_FAILURE; } host_wellness_time->start_tag[0] = '<'; host_wellness_time->start_tag[1] = 'H'; host_wellness_time->start_tag[2] = 'W'; host_wellness_time->start_tag[3] = '>'; host_wellness_time->time_tag[0] = 'T'; host_wellness_time->time_tag[1] = 'D'; host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) - offsetof(struct bmic_host_wellness_time, century)); os_get_time(host_wellness_time); host_wellness_time->dont_write_tag[0] = 'D'; host_wellness_time->dont_write_tag[1] = 'W'; host_wellness_time->end_tag[0] = 'Z'; host_wellness_time->end_tag[1] = 'Z'; - rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length, - BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); + + request.data_direction = SOP_DATA_DIR_FROM_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_WRITE; + request.cmd.bmic_cdb.cmd = BMIC_WRITE_HOST_WELLNESS; + request.cmd.bmic_cdb.xfer_len = BE_16(data_length); + + rval = pqisrc_prepare_send_ctrlr_request(softs, &request, host_wellness_time, data_length); os_mem_free(softs, (char *)host_wellness_time, data_length); DBG_FUNC("OUT"); return rval; } - +static void +pqisrc_get_device_vpd_info(pqisrc_softstate_t *softs, + bmic_ident_physdev_t *bmic_phy_info,pqi_scsi_dev_t *device) +{ + DBG_FUNC("IN\n"); + memcpy(&device->wwid, &bmic_phy_info->padding[79], sizeof(device->wwid)); + DBG_FUNC("OUT\n"); +} /* * Function used to perform a rescan of scsi devices * for any config change events */ int pqisrc_scan_devices(pqisrc_softstate_t *softs) { boolean_t is_physical_device; - int ret = PQI_STATUS_FAILURE; + int ret; int i; int new_dev_cnt; int phy_log_dev_cnt; size_t queue_log_data_length; uint8_t *scsi3addr; uint8_t multiplier; uint16_t qdepth; uint32_t physical_cnt; uint32_t logical_cnt; uint32_t logical_queue_cnt; uint32_t ndev_allocated = 0; size_t phys_data_length, log_data_length; reportlun_data_ext_t *physical_dev_list = NULL; reportlun_data_ext_t *logical_dev_list = NULL; reportlun_ext_entry_t *lun_ext_entry = NULL; reportlun_queue_depth_data_t *logical_queue_dev_list = NULL; bmic_ident_physdev_t *bmic_phy_info = NULL; pqi_scsi_dev_t **new_device_list = NULL; pqi_scsi_dev_t *device = NULL; - +#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG + int num_ext_raid_devices = 0; +#endif DBG_FUNC("IN\n"); ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list, &logical_queue_dev_list, &queue_log_data_length, &phys_data_length, &log_data_length); if (ret) goto err_out; physical_cnt = BE_32(physical_dev_list->header.list_length) / sizeof(physical_dev_list->lun_entries[0]); logical_cnt = BE_32(logical_dev_list->header.list_length) / sizeof(logical_dev_list->lun_entries[0]); logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length) / sizeof(logical_queue_dev_list->lun_entries[0]); - DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt); + DBG_DISC("physical_cnt %u logical_cnt %u queue_cnt %u\n", physical_cnt, logical_cnt, logical_queue_cnt); if (physical_cnt) { bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info)); if (bmic_phy_info == NULL) { ret = PQI_STATUS_FAILURE; DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret); goto err_out; } } phy_log_dev_cnt = physical_cnt + logical_cnt; new_device_list = os_mem_alloc(softs, sizeof(*new_device_list) * phy_log_dev_cnt); if (new_device_list == NULL) { ret = PQI_STATUS_FAILURE; DBG_ERR("failed to allocate memory for device list : %d\n", ret); goto err_out; } for (i = 0; i < phy_log_dev_cnt; i++) { new_device_list[i] = os_mem_alloc(softs, sizeof(*new_device_list[i])); if (new_device_list[i] == NULL) { ret = PQI_STATUS_FAILURE; DBG_ERR("failed to allocate memory for device list : %d\n", ret); ndev_allocated = i; goto err_out; } } ndev_allocated = phy_log_dev_cnt; new_dev_cnt = 0; for (i = 0; i < phy_log_dev_cnt; i++) { if (i < physical_cnt) { is_physical_device = true; lun_ext_entry = &physical_dev_list->lun_entries[i]; } else { is_physical_device = false; lun_ext_entry = &logical_dev_list->lun_entries[i - physical_cnt]; } scsi3addr = lun_ext_entry->lunid; /* Save the target sas adderess for external raid device */ if(lun_ext_entry->device_type == CONTROLLER_DEVICE) { +#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG + num_ext_raid_devices++; +#endif int target = lun_ext_entry->lunid[3] & 0x3f; softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid); } /* Skip masked physical non-disk devices. */ if (MASKED_DEVICE(scsi3addr) && is_physical_device && (lun_ext_entry->ioaccel_handle == 0)) continue; device = new_device_list[new_dev_cnt]; memset(device, 0, sizeof(*device)); memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); device->wwid = lun_ext_entry->wwid; device->is_physical_device = is_physical_device; if (!is_physical_device && logical_queue_cnt--) { device->is_external_raid_device = pqisrc_is_external_raid_addr(scsi3addr); /* The multiplier is the value we multiply the queue - * depth value with to get the actual queue depth. - * If multiplier is 1 multiply by 256 if - * multiplier 0 then multiply by 16 */ + * depth value with to get the actual queue depth. + * If multiplier is 1 multiply by 256 if + * multiplier 0 then multiply by 16 */ multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier; qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth; if (multiplier) { device->firmware_queue_depth_set = true; device->queue_depth = qdepth*256; } else { device->firmware_queue_depth_set = true; device->queue_depth = qdepth*16; } if (device->queue_depth > softs->adapterQDepth) { device->firmware_queue_depth_set = true; device->queue_depth = softs->adapterQDepth; } if ((multiplier == 1) && - (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH)) + (qdepth >= MAX_RAW_M256_QDEPTH)) device->firmware_queue_depth_set = false; if ((multiplier == 0) && - (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH)) + (qdepth >= MAX_RAW_M16_QDEPTH)) device->firmware_queue_depth_set = false; + } /* Get device type, vendor, model, device ID. */ ret = pqisrc_get_dev_data(softs, device); if (ret) { DBG_WARN("Inquiry failed, skipping device %016llx\n", (unsigned long long)BE_64(device->scsi3addr[0])); DBG_DISC("INQUIRY FAILED \n"); continue; } /* Set controller queue depth to what - * it was from the scsi midlayer */ + * it was from the scsi midlayer */ if (device->devtype == RAID_DEVICE) { device->firmware_queue_depth_set = true; device->queue_depth = softs->adapterQDepth; } - pqisrc_assign_btl(device); + pqisrc_assign_btl(softs, device); /* * Expose all devices except for physical devices that * are masked. */ if (device->is_physical_device && MASKED_DEVICE(scsi3addr)) device->expose_device = false; else device->expose_device = true; if (device->is_physical_device && (lun_ext_entry->device_flags & REPORT_LUN_DEV_FLAG_AIO_ENABLED) && lun_ext_entry->ioaccel_handle) { device->aio_enabled = true; } switch (device->devtype) { case ROM_DEVICE: /* * We don't *really* support actual CD-ROM devices, * but we do support the HP "One Button Disaster * Recovery" tape drive which temporarily pretends to * be a CD-ROM drive. */ if (device->is_obdr_device) new_dev_cnt++; break; case DISK_DEVICE: case ZBC_DEVICE: if (device->is_physical_device) { device->ioaccel_handle = lun_ext_entry->ioaccel_handle; - device->sas_address = BE_64(lun_ext_entry->wwid); pqisrc_get_physical_device_info(softs, device, bmic_phy_info); + if ( (!softs->page83id_in_rpl) && (bmic_phy_info->device_type == BMIC_DEVICE_TYPE_SATA)) { + pqisrc_get_device_vpd_info(softs, bmic_phy_info, device); + } + device->sas_address = BE_64(device->wwid); } new_dev_cnt++; break; case ENCLOSURE_DEVICE: if (device->is_physical_device) { device->sas_address = BE_64(lun_ext_entry->wwid); } new_dev_cnt++; break; case TAPE_DEVICE: case MEDIUM_CHANGER_DEVICE: new_dev_cnt++; break; case RAID_DEVICE: /* * Only present the HBA controller itself as a RAID * controller. If it's a RAID controller other than * the HBA itself (an external RAID controller, MSA500 * or similar), don't present it. */ if (pqisrc_is_hba_lunid(scsi3addr)) new_dev_cnt++; break; case SES_DEVICE: case CONTROLLER_DEVICE: default: break; } } DBG_DISC("new_dev_cnt %d\n", new_dev_cnt); - +#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG + if(num_ext_raid_devices) + os_start_rescan_timer(softs); + else + os_stop_rescan_timer(softs); +#endif pqisrc_update_device_list(softs, new_device_list, new_dev_cnt); err_out: if (new_device_list) { for (i = 0; i < ndev_allocated; i++) { if (new_device_list[i]) { if(new_device_list[i]->raid_map) os_mem_free(softs, (char *)new_device_list[i]->raid_map, sizeof(pqisrc_raid_map_t)); os_mem_free(softs, (char*)new_device_list[i], sizeof(*new_device_list[i])); } } os_mem_free(softs, (char *)new_device_list, - sizeof(*new_device_list) * ndev_allocated); + sizeof(*new_device_list) * ndev_allocated); } if(physical_dev_list) os_mem_free(softs, (char *)physical_dev_list, phys_data_length); if(logical_dev_list) os_mem_free(softs, (char *)logical_dev_list, log_data_length); if(logical_queue_dev_list) os_mem_free(softs, (char*)logical_queue_dev_list, queue_log_data_length); if (bmic_phy_info) os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info)); DBG_FUNC("OUT \n"); return ret; } /* * Clean up memory allocated for devices. */ void pqisrc_cleanup_devices(pqisrc_softstate_t *softs) { - - int i = 0,j = 0; - pqi_scsi_dev_t *dvp = NULL; + int i = 0; + pqi_scsi_dev_t *device = NULL; DBG_FUNC("IN\n"); - - for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if (softs->device_list[i][j] == NULL) - continue; - dvp = softs->device_list[i][j]; - pqisrc_device_mem_free(softs, dvp); - } + for(i = 0; i < PQI_MAX_DEVICES; i++) { + if(softs->dev_list[i] == NULL) + continue; + device = softs->dev_list[i]; + pqisrc_device_mem_free(softs, device); } + DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c index cec8bbad3cbb..f000d9ce9db3 100644 --- a/sys/dev/smartpqi/smartpqi_event.c +++ b/sys/dev/smartpqi/smartpqi_event.c @@ -1,445 +1,506 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include"smartpqi_includes.h" /* * Function to rescan the devices connected to adapter. */ int pqisrc_rescan_devices(pqisrc_softstate_t *softs) { int ret; DBG_FUNC("IN\n"); os_sema_lock(&softs->scan_lock); ret = pqisrc_scan_devices(softs); os_sema_unlock(&softs->scan_lock); DBG_FUNC("OUT\n"); return ret; } void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs) { os_sema_lock(&softs->scan_lock); os_sema_unlock(&softs->scan_lock); } /* * Subroutine to acknowledge the events processed by the driver to the adapter. */ static void pqisrc_acknowledge_event(pqisrc_softstate_t *softs, struct pqi_event *event) { + int ret; pqi_event_acknowledge_request_t request; ib_queue_t *ib_q = &softs->op_raid_ib_q[0]; int tmo = PQISRC_EVENT_ACK_RESP_TIMEOUT; memset(&request,0,sizeof(request)); DBG_FUNC("IN\n"); request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; request.header.iu_length = (sizeof(pqi_event_acknowledge_request_t) - PQI_REQUEST_HEADER_LENGTH); request.event_type = event->event_type; request.event_id = event->event_id; request.additional_event_id = event->additional_event_id; /* Submit Event Acknowledge */ - - pqisrc_submit_cmnd(softs, ib_q, &request); + ret = pqisrc_submit_cmnd(softs, ib_q, &request); + if (ret != PQI_STATUS_SUCCESS) { + DBG_ERR("Unable to submit acknowledge command\n"); + goto out; + } /* * We have to special-case this type of request because the firmware * does not generate an interrupt when this type of request completes. * Therefore, we have to poll until we see that the firmware has * consumed the request before we move on. */ COND_WAIT(((ib_q->pi_local) == *(ib_q->ci_virt_addr)), tmo); if (tmo <= 0) { DBG_ERR("wait for event acknowledge timed out\n"); DBG_ERR("tmo : %d\n",tmo); - } + } - DBG_FUNC(" OUT\n"); +out: + DBG_FUNC("OUT\n"); } /* * Acknowledge processed events to the adapter. */ void pqisrc_ack_all_events(void *arg1) { int i; struct pqi_event *pending_event; pqisrc_softstate_t *softs = (pqisrc_softstate_t*)arg1; DBG_FUNC(" IN\n"); pending_event = &softs->pending_events[0]; for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { if (pending_event->pending == true) { pending_event->pending = false; pqisrc_acknowledge_event(softs, pending_event); } pending_event++; } /* Rescan devices except for heartbeat event */ if ((pqisrc_rescan_devices(softs)) != PQI_STATUS_SUCCESS) { DBG_ERR(" Failed to Re-Scan devices\n "); } DBG_FUNC(" OUT\n"); } /* * Get event index from event type to validate the type of event. */ static int pqisrc_event_type_to_event_index(unsigned event_type) { int index; switch (event_type) { case PQI_EVENT_TYPE_HOTPLUG: index = PQI_EVENT_HOTPLUG; break; case PQI_EVENT_TYPE_HARDWARE: index = PQI_EVENT_HARDWARE; break; case PQI_EVENT_TYPE_PHYSICAL_DEVICE: index = PQI_EVENT_PHYSICAL_DEVICE; break; case PQI_EVENT_TYPE_LOGICAL_DEVICE: index = PQI_EVENT_LOGICAL_DEVICE; break; case PQI_EVENT_TYPE_AIO_STATE_CHANGE: index = PQI_EVENT_AIO_STATE_CHANGE; break; case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE: index = PQI_EVENT_AIO_CONFIG_CHANGE; break; default: index = -1; break; } return index; } /* * Function used to process the events supported by the adapter. */ int pqisrc_process_event_intr_src(pqisrc_softstate_t *softs,int obq_id) { uint32_t obq_pi,obq_ci; pqi_event_response_t response; ob_queue_t *event_q; struct pqi_event *pending_event; boolean_t need_delayed_work = false; DBG_FUNC(" IN\n"); event_q = &softs->event_q; obq_ci = event_q->ci_local; obq_pi = *(event_q->pi_virt_addr); - DBG_INFO("Initial Event_q ci : %d Event_q pi : %d\n", obq_ci, obq_pi); while(1) { int event_index; - DBG_INFO("queue_id : %d ci : %d pi : %d\n",obq_id, obq_ci, obq_pi); + DBG_INFO("Event queue_id : %d, ci : %u, pi : %u\n",obq_id, obq_ci, obq_pi); if (obq_pi == obq_ci) break; need_delayed_work = true; /* Copy the response */ memcpy(&response, event_q->array_virt_addr + (obq_ci * event_q->elem_size), sizeof(pqi_event_response_t)); - DBG_INFO("response.header.iu_type : 0x%x \n", response.header.iu_type); - DBG_INFO("response.event_type : 0x%x \n", response.event_type); + DBG_INIT("event iu_type=0x%x event_type=0x%x\n", + response.header.iu_type, response.event_type); event_index = pqisrc_event_type_to_event_index(response.event_type); + if ( event_index == PQI_EVENT_LOGICAL_DEVICE) { + softs->ld_rescan = true; + } if (event_index >= 0) { if(response.request_acknowledge) { pending_event = &softs->pending_events[event_index]; pending_event->pending = true; pending_event->event_type = response.event_type; pending_event->event_id = response.event_id; pending_event->additional_event_id = response.additional_event_id; } } obq_ci = (obq_ci + 1) % event_q->num_elem; } /* Update CI */ event_q->ci_local = obq_ci; PCI_MEM_PUT32(softs, event_q->ci_register_abs, event_q->ci_register_offset, event_q->ci_local); /*Adding events to the task queue for acknowledging*/ if (need_delayed_work == true) { os_eventtaskqueue_enqueue(softs); } DBG_FUNC("OUT"); return PQI_STATUS_SUCCESS; } +/* + * Function used to build and send the vendor general request + * Used for configuring PQI feature bits between firmware and driver + */ +int +pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs, + struct pqi_vendor_general_request *request) +{ + int ret = PQI_STATUS_SUCCESS; + ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; + ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; + + rcb_t *rcb = NULL; + + /* Get the tag */ + request->request_id = pqisrc_get_tag(&softs->taglist); + if (INVALID_ELEM == request->request_id) { + DBG_ERR("Tag not available\n"); + ret = PQI_STATUS_FAILURE; + goto err_notag; + } + + request->response_id = ob_q->q_id; + + rcb = &softs->rcb[request->request_id]; + + rcb->req_pending = true; + rcb->tag = request->request_id; + + ret = pqisrc_submit_cmnd(softs, op_ib_q, request); + + if (ret != PQI_STATUS_SUCCESS) { + DBG_ERR("Unable to submit command\n"); + goto err_out; + } + + ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); + if (ret != PQI_STATUS_SUCCESS) { + DBG_ERR("Management request timed out!\n"); + goto err_out; + } + + ret = rcb->status; + +err_out: + os_reset_rcb(rcb); + pqisrc_put_tag(&softs->taglist, request->request_id); +err_notag: + DBG_FUNC("OUT \n"); + return ret; +} + /* * Function used to send a general management request to adapter. */ int pqisrc_submit_management_req(pqisrc_softstate_t *softs, pqi_event_config_request_t *request) { int ret = PQI_STATUS_SUCCESS; ib_queue_t *op_ib_q = &softs->op_raid_ib_q[0]; rcb_t *rcb = NULL; DBG_FUNC(" IN\n"); /* Get the tag */ request->request_id = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == request->request_id) { DBG_ERR("Tag not available\n"); ret = PQI_STATUS_FAILURE; goto err_out; } rcb = &softs->rcb[request->request_id]; rcb->req_pending = true; rcb->tag = request->request_id; + /* Submit command on operational raid ib queue */ ret = pqisrc_submit_cmnd(softs, op_ib_q, request); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR(" Unable to submit command\n"); goto err_cmd; } ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); + if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Management request timed out !!\n"); goto err_cmd; } os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist,request->request_id); DBG_FUNC("OUT\n"); return ret; err_cmd: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist,request->request_id); err_out: DBG_FUNC(" failed OUT : %d\n", ret); return ret; } /* * Build and send the general management request. */ static int pqi_event_configure(pqisrc_softstate_t *softs , pqi_event_config_request_t *request, dma_mem_t *buff) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC(" IN\n"); request->header.comp_feature = 0x00; request->header.iu_length = sizeof(pqi_event_config_request_t) - PQI_REQUEST_HEADER_LENGTH; /* excluding IU header length */ /*Op OQ id where response to be delivered */ request->response_queue_id = softs->op_ob_q[0].q_id; request->buffer_length = buff->size; request->sg_desc.addr = buff->dma_addr; request->sg_desc.length = buff->size; request->sg_desc.zero = 0; request->sg_desc.type = SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT; /* submit management req IU*/ ret = pqisrc_submit_management_req(softs,request); if(ret) goto err_out; DBG_FUNC(" OUT\n"); return ret; err_out: DBG_FUNC("Failed OUT\n"); return ret; } /* * Prepare REPORT EVENT CONFIGURATION IU to request that * event configuration information be reported. */ int pqisrc_report_event_config(pqisrc_softstate_t *softs) { int ret,i ; pqi_event_config_request_t request; pqi_event_config_t *event_config_p ; dma_mem_t buf_report_event ; /*bytes to be allocaed for report event config data-in buffer */ uint32_t alloc_size = sizeof(pqi_event_config_t) ; memset(&request, 0 , sizeof(request)); DBG_FUNC(" IN\n"); memset(&buf_report_event, 0, sizeof(struct dma_mem)); - buf_report_event.tag = "pqi_report_event_buf" ; + os_strlcpy(buf_report_event.tag, "pqi_report_event_buf", sizeof(buf_report_event.tag)); ; buf_report_event.size = alloc_size; buf_report_event.align = PQISRC_DEFAULT_DMA_ALIGN; /* allocate memory */ ret = os_dma_mem_alloc(softs, &buf_report_event); if (ret) { DBG_ERR("Failed to Allocate report event config buffer : %d\n", ret); goto err_out; } DBG_INFO("buf_report_event.dma_addr = %p \n",(void*)buf_report_event.dma_addr); DBG_INFO("buf_report_event.virt_addr = %p \n",(void*)buf_report_event.virt_addr); request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; /* Event configuration */ ret=pqi_event_configure(softs,&request,&buf_report_event); if(ret) goto free_mem; event_config_p = (pqi_event_config_t*)buf_report_event.virt_addr; softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors, PQI_MAX_EVENT_DESCRIPTORS) ; for (i=0; i < softs->event_config.num_event_descriptors ;i++){ softs->event_config.descriptors[i].event_type = event_config_p->descriptors[i].event_type; } /* free the allocated memory*/ os_dma_mem_free(softs, &buf_report_event); DBG_FUNC(" OUT\n"); return ret; free_mem: os_dma_mem_free(softs, &buf_report_event); err_out: DBG_FUNC("Failed OUT\n"); return PQI_STATUS_FAILURE; } /* * Prepare SET EVENT CONFIGURATION IU to request that * event configuration parameters be set. */ int pqisrc_set_event_config(pqisrc_softstate_t *softs) { int ret,i; pqi_event_config_request_t request; pqi_event_config_t *event_config_p; dma_mem_t buf_set_event; /*bytes to be allocaed for set event config data-out buffer */ uint32_t alloc_size = sizeof(pqi_event_config_t); memset(&request, 0 , sizeof(request)); DBG_FUNC(" IN\n"); memset(&buf_set_event, 0, sizeof(struct dma_mem)); - buf_set_event.tag = "pqi_set_event_buf"; + os_strlcpy(buf_set_event.tag, "pqi_set_event_buf", sizeof(buf_set_event.tag)); buf_set_event.size = alloc_size; buf_set_event.align = PQISRC_DEFAULT_DMA_ALIGN; /* allocate memory */ ret = os_dma_mem_alloc(softs, &buf_set_event); if (ret) { DBG_ERR("Failed to Allocate set event config buffer : %d\n", ret); goto err_out; } DBG_INFO("buf_set_event.dma_addr = %p\n",(void*)buf_set_event.dma_addr); DBG_INFO("buf_set_event.virt_addr = %p\n",(void*)buf_set_event.virt_addr); request.header.iu_type = PQI_REQUEST_IU_SET_EVENT_CONFIG; request.iu_specific.global_event_oq_id = softs->event_q.q_id; /*pointer to data-out buffer*/ event_config_p = (pqi_event_config_t *)buf_set_event.virt_addr; event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors; for (i=0; i < softs->event_config.num_event_descriptors ; i++){ event_config_p->descriptors[i].event_type = softs->event_config.descriptors[i].event_type; if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1) event_config_p->descriptors[i].oq_id = softs->event_q.q_id; else event_config_p->descriptors[i].oq_id = 0; /* Not supported this event. */ } /* Event configuration */ ret = pqi_event_configure(softs,&request,&buf_set_event); if(ret) goto free_mem; os_dma_mem_free(softs, &buf_set_event); DBG_FUNC(" OUT\n"); return ret; free_mem: os_dma_mem_free(softs, &buf_set_event); err_out: DBG_FUNC("Failed OUT\n"); return PQI_STATUS_FAILURE; } diff --git a/sys/dev/smartpqi/smartpqi_features.c b/sys/dev/smartpqi/smartpqi_features.c new file mode 100644 index 000000000000..2a53dbe654b1 --- /dev/null +++ b/sys/dev/smartpqi/smartpqi_features.c @@ -0,0 +1,520 @@ +/*- + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +#include "smartpqi_includes.h" + +/* + * Checks a firmware feature status, given bit position. + */ +static inline boolean_t +pqi_is_firmware_feature_supported( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + byte_index = bit_position / BITS_PER_BYTE; + + if (byte_index >= firmware_features->num_elements) { + DBG_ERR_NO_SOFTS("Invalid byte index for bit position %u\n", + bit_position); + return false; + } + + return (firmware_features->features_supported[byte_index] & + (1 << (bit_position % BITS_PER_BYTE))) ? true : false; +} + +/* + * Counts down into the enabled section of firmware + * features and reports current enabled status, given + * bit position. + */ +static inline boolean_t +pqi_is_firmware_feature_enabled( + struct pqi_config_table_firmware_features *firmware_features, + uint8_t *firmware_features_iomem_addr, + unsigned int bit_position) +{ + unsigned int byte_index; + uint8_t *features_enabled_iomem_addr; + + byte_index = (bit_position / BITS_PER_BYTE) + + (firmware_features->num_elements * 2); + + features_enabled_iomem_addr = firmware_features_iomem_addr + + offsetof(struct pqi_config_table_firmware_features, + features_supported) + byte_index; + + return (*features_enabled_iomem_addr & + (1 << (bit_position % BITS_PER_BYTE))) ? true : false; +} + +/* + * Sets the given bit position for the driver to request the indicated + * firmware feature be enabled. + */ +static inline void +pqi_request_firmware_feature( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + /* byte_index adjusted to index into requested start bits */ + byte_index = (bit_position / BITS_PER_BYTE) + + firmware_features->num_elements; + + /* setting requested bits of local firmware_features */ + firmware_features->features_supported[byte_index] |= + (1 << (bit_position % BITS_PER_BYTE)); +} + +/* + * Creates and sends the request for firmware to update the config + * table. + */ +static int +pqi_config_table_update(pqisrc_softstate_t *softs, + uint16_t first_section, uint16_t last_section) +{ + struct pqi_vendor_general_request request; + int ret; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; + request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH; + request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE; + request.data.config_table_update.first_section = first_section; + request.data.config_table_update.last_section = last_section; + + ret = pqisrc_build_send_vendor_request(softs, &request); + + if (ret != PQI_STATUS_SUCCESS) { + DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret); + } + + return ret; +} + +/* + * Copies requested features bits into firmware config table, + * checks for support, and returns status of updating the config table. + */ +static int +pqi_enable_firmware_features(pqisrc_softstate_t *softs, + struct pqi_config_table_firmware_features *firmware_features, + uint8_t *firmware_features_abs_addr) +{ + uint8_t *features_requested; + uint8_t *features_requested_abs_addr; + uint16_t *host_max_known_feature_iomem_addr; + uint16_t pqi_max_feature = PQI_FIRMWARE_FEATURE_MAXIMUM; + + features_requested = firmware_features->features_supported + + firmware_features->num_elements; + + features_requested_abs_addr = firmware_features_abs_addr + + (features_requested - (uint8_t*)firmware_features); + /* + * NOTE: This memcpy is writing to a BAR-mapped address + * which may not be safe for all OSes without proper API + */ + memcpy(features_requested_abs_addr, features_requested, + firmware_features->num_elements); + + if (pqi_is_firmware_feature_supported(firmware_features, + PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { + host_max_known_feature_iomem_addr = + (uint16_t*)(features_requested_abs_addr + + (firmware_features->num_elements * 2) + sizeof(uint16_t)); + /* + * NOTE: This writes to a BAR-mapped address + * which may not be safe for all OSes without proper API + */ + *host_max_known_feature_iomem_addr = pqi_max_feature; + } + + return pqi_config_table_update(softs, + PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES, + PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES); +} + +typedef struct pqi_firmware_feature pqi_firmware_feature_t; +typedef void (*feature_status_fn)(pqisrc_softstate_t *softs, + pqi_firmware_feature_t *firmware_feature); + +struct pqi_firmware_feature { + char *feature_name; + unsigned int feature_bit; + boolean_t supported; + boolean_t enabled; + feature_status_fn feature_status; +}; + +static void +pqi_firmware_feature_status(pqisrc_softstate_t *softs, + struct pqi_firmware_feature *firmware_feature) +{ + if (!firmware_feature->supported) { + DBG_NOTE("%s not supported by controller\n", + firmware_feature->feature_name); + return; + } + + if (firmware_feature->enabled) { + DBG_NOTE("%s enabled\n", firmware_feature->feature_name); + return; + } + + DBG_NOTE("failed to enable %s\n", firmware_feature->feature_name); +} + +static void +pqi_ctrl_update_feature_flags(pqisrc_softstate_t *softs, + struct pqi_firmware_feature *firmware_feature) +{ + switch (firmware_feature->feature_bit) { + case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: + softs->aio_raid1_write_bypass = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: + softs->aio_raid5_write_bypass = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: + softs->aio_raid6_write_bypass = firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: + softs->timeout_in_passthrough = true; + break; + case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: + softs->timeout_in_tmf = true; + break; + case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN: + break; + case PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID: + softs->page83id_in_rpl = true; + break; + default: + DBG_NOTE("Nothing to do\n"); + return; + break; + } + /* for any valid feature, also go update the feature status. */ + pqi_firmware_feature_status(softs, firmware_feature); +} + + +static inline void +pqi_firmware_feature_update(pqisrc_softstate_t *softs, + struct pqi_firmware_feature *firmware_feature) +{ + if (firmware_feature->feature_status) + firmware_feature->feature_status(softs, firmware_feature); +} + +/* Defines PQI features that driver wishes to support */ +static struct pqi_firmware_feature pqi_firmware_features[] = { +#if 0 + { + .feature_name = "Online Firmware Activation", + .feature_bit = PQI_FIRMWARE_FEATURE_OFA, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "Serial Management Protocol", + .feature_bit = PQI_FIRMWARE_FEATURE_SMP, + .feature_status = pqi_firmware_feature_status, + }, +#endif + { + .feature_name = "SATA WWN Unique ID", + .feature_bit = PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "TMF IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "Support for RPL WWID filled by Page83 identifier", + .feature_bit = PQI_FIRMWARE_FEATURE_PAGE83_IDENTIFIER_FOR_RPL_WWID, + .feature_status = pqi_ctrl_update_feature_flags, + }, + /* Features independent of Maximum Known Feature should be added + before Maximum Known Feature*/ + { + .feature_name = "Maximum Known Feature", + .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 5 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 6 Read Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 0 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "RAID 1 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 5 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID 6 Write Bypass", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, + .feature_status = pqi_ctrl_update_feature_flags, + }, +#if 0 + { + .feature_name = "New Soft Reset Handshake", + .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, + .feature_status = pqi_ctrl_update_feature_flags, + }, +#endif + +}; + +static void +pqi_process_firmware_features(pqisrc_softstate_t *softs, + void *features, void *firmware_features_abs_addr) +{ + int rc; + struct pqi_config_table_firmware_features *firmware_features = features; + unsigned int i; + unsigned int num_features_supported; + + /* Iterates through local PQI feature support list to + see if the controller also supports the feature */ + for (i = 0, num_features_supported = 0; + i < ARRAY_SIZE(pqi_firmware_features); i++) { + /*Check if SATA_WWN_FOR_DEV_UNIQUE_ID feature enabled by setting module + parameter if not avoid checking for the feature*/ + if ((pqi_firmware_features[i].feature_bit == + PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN) && + (!softs->sata_unique_wwn)) { + continue; + } + if (pqi_is_firmware_feature_supported(firmware_features, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].supported = true; + num_features_supported++; + } else { + DBG_WARN("Feature %s is not supported by firmware\n", + pqi_firmware_features[i].feature_name); + pqi_firmware_feature_update(softs, + &pqi_firmware_features[i]); + + /* if max known feature bit isn't supported, + * then no other feature bits are supported. + */ + if (pqi_firmware_features[i].feature_bit == + PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE) + break; + } + } + + DBG_INFO("Num joint features supported : %u \n", num_features_supported); + + if (num_features_supported == 0) + return; + + /* request driver features that are also on firmware-supported list */ + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; +#ifdef DEVICE_HINT + if (check_device_hint_status(softs, pqi_firmware_features[i].feature_bit)) + continue; +#endif + pqi_request_firmware_feature(firmware_features, + pqi_firmware_features[i].feature_bit); + } + + /* enable the features that were successfully requested. */ + rc = pqi_enable_firmware_features(softs, firmware_features, + firmware_features_abs_addr); + if (rc) { + DBG_ERR("failed to enable firmware features in PQI configuration table\n"); + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + pqi_firmware_feature_update(softs, + &pqi_firmware_features[i]); + } + return; + } + + /* report the features that were successfully enabled. */ + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + if (pqi_is_firmware_feature_enabled(firmware_features, + firmware_features_abs_addr, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].enabled = true; + } else { + DBG_WARN("Feature %s could not be enabled.\n", + pqi_firmware_features[i].feature_name); + } + pqi_firmware_feature_update(softs, + &pqi_firmware_features[i]); + } +} + +static void +pqi_init_firmware_features(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + pqi_firmware_features[i].supported = false; + pqi_firmware_features[i].enabled = false; + } +} + +static void +pqi_process_firmware_features_section(pqisrc_softstate_t *softs, + void *features, void *firmware_features_abs_addr) +{ + pqi_init_firmware_features(); + pqi_process_firmware_features(softs, features, firmware_features_abs_addr); +} + + +/* + * Get the PQI configuration table parameters. + * Currently using for heart-beat counter scratch-pad register. + */ +int +pqisrc_process_config_table(pqisrc_softstate_t *softs) +{ + int ret = PQI_STATUS_FAILURE; + uint32_t config_table_size; + uint32_t section_off; + uint8_t *config_table_abs_addr; + struct pqi_conf_table *conf_table; + struct pqi_conf_table_section_header *section_hdr; + + config_table_size = softs->pqi_cap.conf_tab_sz; + + if (config_table_size < sizeof(*conf_table) || + config_table_size > PQI_CONF_TABLE_MAX_LEN) { + DBG_ERR("Invalid PQI conf table length of %u\n", + config_table_size); + return ret; + } + + conf_table = os_mem_alloc(softs, config_table_size); + if (!conf_table) { + DBG_ERR("Failed to allocate memory for PQI conf table\n"); + return ret; + } + + config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr + + softs->pqi_cap.conf_tab_off); + + PCI_MEM_GET_BUF(softs, config_table_abs_addr, + softs->pqi_cap.conf_tab_off, + (uint8_t*)conf_table, config_table_size); + + if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE, + sizeof(conf_table->sign)) != 0) { + DBG_ERR("Invalid PQI config signature\n"); + goto out; + } + + section_off = LE_32(conf_table->first_section_off); + + while (section_off) { + + if (section_off+ sizeof(*section_hdr) >= config_table_size) { + DBG_INFO("Reached end of PQI config table. Breaking off.\n"); + break; + } + + section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off); + + switch (LE_16(section_hdr->section_id)) { + case PQI_CONF_TABLE_SECTION_GENERAL_INFO: + break; + case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES: + pqi_process_firmware_features_section(softs, section_hdr, (config_table_abs_addr + section_off)); + break; + case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA: + case PQI_CONF_TABLE_SECTION_DEBUG: + break; + case PQI_CONF_TABLE_SECTION_HEARTBEAT: + softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off + + section_off + + offsetof(struct pqi_conf_table_heartbeat, heartbeat_counter); + softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr + + softs->heartbeat_counter_off); + ret = PQI_STATUS_SUCCESS; + break; + case PQI_CONF_TABLE_SOFT_RESET: + break; + default: + DBG_NOTE("unrecognized PQI config table section ID: 0x%x\n", + LE_16(section_hdr->section_id)); + break; + } + section_off = LE_16(section_hdr->next_section_off); + } +out: + os_mem_free(softs, (void *)conf_table,config_table_size); + return ret; +} diff --git a/sys/dev/smartpqi/smartpqi_helper.c b/sys/dev/smartpqi/smartpqi_helper.c index 7161a3fc49b9..68f105001fa0 100644 --- a/sys/dev/smartpqi/smartpqi_helper.c +++ b/sys/dev/smartpqi/smartpqi_helper.c @@ -1,531 +1,459 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" -/* read and modify controller diagnostic option - PQI_PTRAID_UPDATE_ON_RESCAN_LUNS */ -void -pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *softs) -{ - int ret = PQI_STATUS_SUCCESS; - uint32_t diags_options = 0; - pqisrc_raid_req_t request; - - DBG_NOTE("IN\n"); - - memset(&request, 0, sizeof(request)); - /* read diags options of controller */ - ret = pqisrc_build_send_raid_request(softs, &request, - (void*)&diags_options, - sizeof(diags_options), - BMIC_SENSE_DIAGS_OPTIONS, - 0, (uint8_t *)RAID_CTLR_LUNID, NULL); - if (ret != PQI_STATUS_SUCCESS) { - DBG_WARN("Request failed for BMIC Sense Diags Option command." - "ret:%d\n",ret); - return; - } - DBG_NOTE("diags options data after read: %#x\n",diags_options); - diags_options |= PQI_PTRAID_UPDATE_ON_RESCAN_LUNS; - DBG_NOTE("diags options data to write: %#x\n",diags_options); - memset(&request, 0, sizeof(request)); - /* write specified diags options to controller */ - ret = pqisrc_build_send_raid_request(softs, &request, - (void*)&diags_options, - sizeof(diags_options), - BMIC_SET_DIAGS_OPTIONS, - 0, (uint8_t *)RAID_CTLR_LUNID, NULL); - if (ret != PQI_STATUS_SUCCESS) - DBG_WARN("Request failed for BMIC Set Diags Option command." - "ret:%d\n",ret); -#if 0 - diags_options = 0; - memset(&request, 0, sizeof(request)); - ret = pqisrc_build_send_raid_request(softs, &request, - (void*)&diags_options, - sizeof(diags_options), - BMIC_SENSE_DIAGS_OPTIONS, - 0, (uint8_t *)RAID_CTLR_LUNID, NULL); - if (ret != PQI_STATUS_SUCCESS) - DBG_WARN("Request failed for BMIC Sense Diags Option command." - "ret:%d\n",ret); - DBG_NOTE("diags options after re-read: %#x\n",diags_options); -#endif - DBG_NOTE("OUT\n"); -} - /* * Function used to validate the adapter health. */ boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); DBG_FUNC("OUT\n"); return !softs->ctrl_online; } - /* Function used set/clear legacy INTx bit in Legacy Interrupt INTx * mask clear pqi register */ void pqisrc_configure_legacy_intx(pqisrc_softstate_t *softs, boolean_t enable_intx) { uint32_t intx_mask; - uint32_t *reg_addr __unused; - - DBG_FUNC("IN\n"); - if (enable_intx) - reg_addr = &softs->pqi_reg->legacy_intr_mask_clr; - else - reg_addr = &softs->pqi_reg->legacy_intr_mask_set; + DBG_FUNC("IN\n"); - intx_mask = PCI_MEM_GET32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR); + intx_mask = PCI_MEM_GET32(softs, 0, PQI_LEGACY_INTR_MASK_CLR); intx_mask |= PQISRC_LEGACY_INTX_MASK; - PCI_MEM_PUT32(softs, reg_addr, PQI_LEGACY_INTR_MASK_CLR ,intx_mask); + PCI_MEM_PUT32(softs, 0, PQI_LEGACY_INTR_MASK_CLR ,intx_mask); - DBG_FUNC("OUT\n"); + DBG_FUNC("OUT\n"); } /* * Function used to take exposed devices to OS as offline. */ void pqisrc_take_devices_offline(pqisrc_softstate_t *softs) { pqi_scsi_dev_t *device = NULL; - int i,j; + int i; DBG_FUNC("IN\n"); for(i = 0; i < PQI_MAX_DEVICES; i++) { - for(j = 0; j < PQI_MAX_MULTILUN; j++) { - if(softs->device_list[i][j] == NULL) - continue; - device = softs->device_list[i][j]; - pqisrc_remove_device(softs, device); - } + device = softs->dev_list[i]; + if(device == NULL) + continue; + pqisrc_remove_device(softs, device); } DBG_FUNC("OUT\n"); } /* * Function used to take adapter offline. */ void pqisrc_take_ctrl_offline(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); - softs->ctrl_online = false; - int lockupcode = 0; + softs->ctrl_online = false; + if (SIS_IS_KERNEL_PANIC(softs)) { - lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7); - DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode); - } - else { - pqisrc_trigger_nmi_sis(softs); - } + lockupcode = PCI_MEM_GET32(softs, &softs->ioa_reg->mb[7], LEGACY_SIS_SRCV_OFFSET_MAILBOX_7); + DBG_ERR("Controller FW is not running, Lockup code = %x\n", lockupcode); + } + else { + pqisrc_trigger_nmi_sis(softs); + } os_complete_outstanding_cmds_nodevice(softs); pqisrc_wait_for_rescan_complete(softs); pqisrc_take_devices_offline(softs); DBG_FUNC("OUT\n"); } /* * Timer handler for the adapter heart-beat. */ void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *softs) { uint8_t take_offline = false; + uint64_t new_heartbeat; + static uint32_t running_ping_cnt = 0; DBG_FUNC("IN\n"); - if (CTRLR_HEARTBEAT_CNT(softs) == softs->prev_heartbeat_count) { + new_heartbeat = CTRLR_HEARTBEAT_CNT(softs); + DBG_IO("heartbeat old=%lx new=%lx\n", softs->prev_heartbeat_count, new_heartbeat); + + if (new_heartbeat == softs->prev_heartbeat_count) { take_offline = true; goto take_ctrl_offline; } - softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs); - DBG_INFO("CTRLR_HEARTBEAT_CNT(softs) = %lx \ - softs->prev_heartbeat_count = %lx\n", - CTRLR_HEARTBEAT_CNT(softs), softs->prev_heartbeat_count); + +#if 1 + /* print every 30 calls (should print once/minute) */ + running_ping_cnt++; + + if ((running_ping_cnt % 30) == 0) + print_all_counters(softs, COUNTER_FLAG_ONLY_NON_ZERO); +#endif + + softs->prev_heartbeat_count = new_heartbeat; take_ctrl_offline: if (take_offline){ DBG_ERR("controller is offline\n"); - pqisrc_take_ctrl_offline(softs); os_stop_heartbeat_timer(softs); + pqisrc_take_ctrl_offline(softs); } DBG_FUNC("OUT\n"); } /* * Conditional variable management routine for internal commands. */ int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb, uint32_t timeout_in_msec) { DBG_FUNC("IN\n"); int ret = PQI_STATUS_SUCCESS; /* 1 msec = 500 usec * 2 */ uint32_t loop_cnt = timeout_in_msec * 2; uint32_t i = 0; while (rcb->req_pending == true) { OS_SLEEP(500); /* Micro sec */ /* Polling needed for FreeBSD : since ithread routine is not scheduled * during bootup, we could use polling until interrupts are * enabled (using 'if (cold)'to check for the boot time before * interrupts are enabled). */ IS_POLLING_REQUIRED(softs); if ((timeout_in_msec != TIMEOUT_INFINITE) && (i++ == loop_cnt)) { DBG_ERR("ERR: Requested cmd timed out !!!\n"); ret = PQI_STATUS_TIMEOUT; rcb->timedout = true; break; } if (pqisrc_ctrl_offline(softs)) { DBG_ERR("Controller is Offline"); ret = PQI_STATUS_FAILURE; break; } } rcb->req_pending = true; DBG_FUNC("OUT\n"); return ret; } /* Function used to validate the device wwid. */ boolean_t pqisrc_device_equal(pqi_scsi_dev_t *dev1, pqi_scsi_dev_t *dev2) { return dev1->wwid == dev2->wwid; } /* Function used to validate the device scsi3addr. */ boolean_t pqisrc_scsi3addr_equal(uint8_t *scsi3addr1, uint8_t *scsi3addr2) { return memcmp(scsi3addr1, scsi3addr2, 8) == 0; } /* Function used to validate hba_lunid */ boolean_t pqisrc_is_hba_lunid(uint8_t *scsi3addr) { - return pqisrc_scsi3addr_equal(scsi3addr, (uint8_t*)RAID_CTLR_LUNID); + return pqisrc_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); } /* Function used to validate type of device */ boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *device) { return !device->is_physical_device; } /* Function used to sanitize inquiry string */ void pqisrc_sanitize_inquiry_string(unsigned char *s, int len) { boolean_t terminated = false; DBG_FUNC("IN\n"); for (; len > 0; (--len, ++s)) { if (*s == 0) terminated = true; if (terminated || *s < 0x20 || *s > 0x7e) *s = ' '; } DBG_FUNC("OUT\n"); } static char *raid_levels[] = { "RAID 0", "RAID 4", "RAID 1(1+0)", "RAID 5", "RAID 5+1", - "RAID ADG", - "RAID 1(ADM)", + "RAID 6", + "RAID 1(Triple)", }; /* Get the RAID level from the index */ char * pqisrc_raidlevel_to_string(uint8_t raid_level) { DBG_FUNC("IN\n"); if (raid_level < ARRAY_SIZE(raid_levels)) return raid_levels[raid_level]; DBG_FUNC("OUT\n"); return " "; } /* Debug routine for displaying device info */ void pqisrc_display_device_info(pqisrc_softstate_t *softs, char *action, pqi_scsi_dev_t *device) { if (device->is_physical_device) { DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " "SSDSmartPathCap%c En%c Exp%c qd=%d\n", action, device->bus, device->target, device->lun, device->vendor, device->model, "Physical", device->offload_config ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', device->queue_depth); } else if (device->devtype == RAID_DEVICE) { DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " "SSDSmartPathCap%c En%c Exp%c qd=%d\n", action, device->bus, device->target, device->lun, device->vendor, device->model, "Controller", device->offload_config ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', device->queue_depth); } else if (device->devtype == CONTROLLER_DEVICE) { DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " "SSDSmartPathCap%c En%c Exp%c qd=%d\n", action, device->bus, device->target, device->lun, device->vendor, device->model, "External", device->offload_config ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', device->queue_depth); } else { DBG_NOTE("%s scsi BTL %d:%d:%d: %.8s %.16s %-12s " "SSDSmartPathCap%c En%c Exp%c qd=%d devtype=%d\n", action, device->bus, device->target, device->lun, device->vendor, device->model, pqisrc_raidlevel_to_string(device->raid_level), device->offload_config ? '+' : '-', device->offload_enabled_pending ? '+' : '-', device->expose_device ? '+' : '-', device->queue_depth, device->devtype); pqisrc_raidlevel_to_string(device->raid_level); /* To use this function */ } } /* validate the structure sizes */ void check_struct_sizes(void) { ASSERT(sizeof(SCSI3Addr_struct)== 2); ASSERT(sizeof(PhysDevAddr_struct) == 8); ASSERT(sizeof(LogDevAddr_struct)== 8); ASSERT(sizeof(LUNAddr_struct)==8); ASSERT(sizeof(RequestBlock_struct) == 20); ASSERT(sizeof(MoreErrInfo_struct)== 8); ASSERT(sizeof(ErrorInfo_struct)== 48); /* Checking the size of IOCTL_Command_struct for both 64 bit and 32 bit system*/ ASSERT(sizeof(IOCTL_Command_struct)== 86 || sizeof(IOCTL_Command_struct)== 82); ASSERT(sizeof(struct bmic_host_wellness_driver_version)== 42); ASSERT(sizeof(struct bmic_host_wellness_time)== 20); ASSERT(sizeof(struct pqi_dev_adminq_cap)== 8); ASSERT(sizeof(struct admin_q_param)== 4); ASSERT(sizeof(struct pqi_registers)== 256); ASSERT(sizeof(struct ioa_registers)== 4128); ASSERT(sizeof(struct pqi_pref_settings)==4); ASSERT(sizeof(struct pqi_cap)== 20); ASSERT(sizeof(iu_header_t)== 4); ASSERT(sizeof(gen_adm_req_iu_t)== 64); ASSERT(sizeof(gen_adm_resp_iu_t)== 64); ASSERT(sizeof(op_q_params) == 9); ASSERT(sizeof(raid_path_error_info_elem_t)== 276); ASSERT(sizeof(aio_path_error_info_elem_t)== 276); ASSERT(sizeof(struct init_base_struct)== 24); ASSERT(sizeof(pqi_iu_layer_desc_t)== 16); ASSERT(sizeof(pqi_dev_cap_t)== 576); ASSERT(sizeof(pqi_aio_req_t)== 128); ASSERT(sizeof(pqisrc_raid_req_t)== 128); ASSERT(sizeof(pqi_raid_tmf_req_t)== 32); ASSERT(sizeof(pqi_aio_tmf_req_t)== 32); ASSERT(sizeof(struct pqi_io_response)== 16); ASSERT(sizeof(struct sense_header_scsi)== 8); ASSERT(sizeof(reportlun_header_t)==8); ASSERT(sizeof(reportlun_ext_entry_t)== 24); ASSERT(sizeof(reportlun_data_ext_t)== 32); ASSERT(sizeof(raidmap_data_t)==8); ASSERT(sizeof(pqisrc_raid_map_t)== 8256); ASSERT(sizeof(bmic_ident_ctrl_t)== 325); ASSERT(sizeof(bmic_ident_physdev_t)==2048); } +#if 0 uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint32_t i, active_io = 0; rcb_t* rcb; for(i = 1; i <= softs->max_outstanding_io; i++) { rcb = &softs->rcb[i]; if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { active_io++; } } return active_io; } void check_device_pending_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { uint32_t tag = softs->max_outstanding_io, active_requests; - uint64_t timeout = 0, delay_in_usec = 1000; //In micro Seconds + uint64_t timeout = 0, delay_in_usec = 1000; /* In micro Seconds */ rcb_t* rcb; DBG_FUNC("IN\n"); active_requests = pqisrc_count_num_scsi_active_requests_on_dev(softs, device); DBG_WARN("Device Outstanding IO count = %u\n", active_requests); if(!active_requests) return; do { rcb = &softs->rcb[tag]; if(rcb && IS_OS_SCSICMD(rcb) && (rcb->dvp == device) && rcb->req_pending) { - OS_BUSYWAIT(delay_in_usec); + OS_SLEEP(delay_in_usec); timeout += delay_in_usec; } else tag--; if(timeout >= PQISRC_PENDING_IO_TIMEOUT_USEC) { DBG_WARN("timed out waiting for pending IO\n"); return; } } while(tag); - } - -inline uint64_t -pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) -{ -#if PQISRC_DEVICE_IO_COUNTER - /*Increment device active io count by one*/ - return OS_ATOMIC64_INC(&device->active_requests); #endif -} -inline uint64_t -pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) -{ -#if PQISRC_DEVICE_IO_COUNTER - /*Decrement device active io count by one*/ - return OS_ATOMIC64_DEC(&device->active_requests); -#endif -} +extern inline uint64_t +pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device); -inline void -pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) -{ -#if PQISRC_DEVICE_IO_COUNTER - /* Reset device count to Zero */ - OS_ATOMIC64_INIT(&device->active_requests, 0); -#endif -} +extern inline uint64_t +pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device); -inline uint64_t -pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) -{ -#if PQISRC_DEVICE_IO_COUNTER - /* read device active count*/ - return OS_ATOMIC64_READ(&device->active_requests); -#endif -} +extern inline void +pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device); + +extern inline uint64_t +pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device); void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { - uint64_t timeout_in_usec = 0, delay_in_usec = 1000; //In microseconds + uint64_t timeout_in_usec = 0, delay_in_usec = 1000; /* In microseconds */ DBG_FUNC("IN\n"); if(!softs->ctrl_online) return; #if PQISRC_DEVICE_IO_COUNTER - DBG_NOTE("Device Outstanding IO count = %ld\n", pqisrc_read_device_active_io(softs, device)); + DBG_WARN_BTL(device,"Device Outstanding IO count = %lu\n", pqisrc_read_device_active_io(softs, device)); while(pqisrc_read_device_active_io(softs, device)) { - OS_BUSYWAIT(delay_in_usec); // In microseconds + OS_BUSYWAIT(delay_in_usec); /* In microseconds */ if(!softs->ctrl_online) { DBG_WARN("Controller Offline was detected.\n"); } timeout_in_usec += delay_in_usec; if(timeout_in_usec >= PQISRC_PENDING_IO_TIMEOUT_USEC) { - DBG_WARN("timed out waiting for pending IO. DeviceOutStandingIo's=%ld\n", + DBG_WARN_BTL(device,"timed out waiting for pending IO. DeviceOutStandingIo's=%lu\n", pqisrc_read_device_active_io(softs, device)); return; } } #else check_device_pending_commands_to_complete(softs, device); #endif } diff --git a/sys/dev/smartpqi/smartpqi_cmd.c b/sys/dev/smartpqi/smartpqi_helper.h similarity index 50% copy from sys/dev/smartpqi/smartpqi_cmd.c copy to sys/dev/smartpqi/smartpqi_helper.h index f5820647fed4..cc7030de74ad 100644 --- a/sys/dev/smartpqi/smartpqi_cmd.c +++ b/sys/dev/smartpqi/smartpqi_helper.h @@ -1,74 +1,66 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#include "smartpqi_includes.h" +#ifndef _PQI_HELPER_H +#define _PQI_HELPER_H -/* - * Function to submit the request to the adapter. - */ -int -pqisrc_submit_cmnd(pqisrc_softstate_t *softs, ib_queue_t *ib_q, void *req) +inline uint64_t +pqisrc_increment_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) { - char *slot = NULL; - uint32_t offset; - iu_header_t *hdr = (iu_header_t *)req; - uint32_t iu_len = hdr->iu_length + 4 ; /* header size */ - int i = 0; - DBG_FUNC("IN\n"); - - PQI_LOCK(&ib_q->lock); - - /* Check queue full */ - if ((ib_q->pi_local + 1) % ib_q->num_elem == *(ib_q->ci_virt_addr)) { - DBG_WARN("OUT Q full\n"); - PQI_UNLOCK(&ib_q->lock); - return PQI_STATUS_QFULL; - } - - /* Get the slot */ - offset = ib_q->pi_local * ib_q->elem_size; - slot = ib_q->array_virt_addr + offset; +#if PQISRC_DEVICE_IO_COUNTER + /*Increment device active io count by one*/ + return OS_ATOMIC64_INC(&device->active_requests); +#endif +} - /* Copy the IU */ - memcpy(slot, req, iu_len); - DBG_INFO("IU : \n"); - for(i = 0; i< iu_len; i++) - DBG_INFO(" IU [ %d ] : %x\n", i, *((unsigned char *)(slot + i))); +inline uint64_t +pqisrc_decrement_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) +{ +#if PQISRC_DEVICE_IO_COUNTER + /*Decrement device active io count by one*/ + return OS_ATOMIC64_DEC(&device->active_requests); +#endif +} - /* Update the local PI */ - ib_q->pi_local = (ib_q->pi_local + 1) % ib_q->num_elem; - DBG_INFO("ib_q->pi_local : %x IU size : %d\n", - ib_q->pi_local, hdr->iu_length); - DBG_INFO("*ib_q->ci_virt_addr: %x\n", - *(ib_q->ci_virt_addr)); +inline void +pqisrc_init_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) +{ +#if PQISRC_DEVICE_IO_COUNTER + /* Reset device count to Zero */ + OS_ATOMIC64_INIT(&device->active_requests, 0); +#endif +} - /* Inform the fw about the new IU */ - PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); - PQI_UNLOCK(&ib_q->lock); - DBG_FUNC("OUT\n"); - return PQI_STATUS_SUCCESS; +inline uint64_t +pqisrc_read_device_active_io(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) +{ +#if PQISRC_DEVICE_IO_COUNTER + /* read device active count*/ + return OS_ATOMIC64_READ(&device->active_requests); +#endif } +#endif /* _PQI_HELPER_H */ diff --git a/sys/dev/smartpqi/smartpqi_includes.h b/sys/dev/smartpqi/smartpqi_includes.h index a783580c2e8d..a8682e8a848d 100644 --- a/sys/dev/smartpqi/smartpqi_includes.h +++ b/sys/dev/smartpqi/smartpqi_includes.h @@ -1,87 +1,86 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _PQI_INCLUDES_H #define _PQI_INCLUDES_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include - #include "smartpqi_defines.h" #include "smartpqi_structures.h" #include "smartpqi_prototypes.h" #include "smartpqi_ioctl.h" +#include "smartpqi_helper.h" - -#endif // _PQI_INCLUDES_H +#endif /* _PQI_INCLUDES_H*/ diff --git a/sys/dev/smartpqi/smartpqi_init.c b/sys/dev/smartpqi/smartpqi_init.c index d28d76f14815..41c990a15909 100644 --- a/sys/dev/smartpqi/smartpqi_init.c +++ b/sys/dev/smartpqi/smartpqi_init.c @@ -1,1198 +1,971 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" -/* 5 mins timeout for quiesce */ -#define PQI_QUIESCE_TIMEOUT 300000 - /* * Request the adapter to get PQI capabilities supported. */ static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; - + DBG_FUNC("IN\n"); gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; dma_mem_t pqi_cap_dma_buf; pqi_dev_cap_t *capability = NULL; pqi_iu_layer_desc_t *iu_layer_desc = NULL; /* Allocate Non DMA memory */ capability = os_mem_alloc(softs, sizeof(*capability)); if (!capability) { DBG_ERR("Failed to allocate memory for capability\n"); - ret = PQI_STATUS_FAILURE; goto err_out; } memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem)); - pqi_cap_dma_buf.tag = "pqi_cap_buf"; + os_strlcpy(pqi_cap_dma_buf.tag, "pqi_cap_buf", sizeof(pqi_cap_dma_buf.tag)); pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE; pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN; ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf); if (ret) { DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret); goto err_dma_alloc; } admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP; admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size; admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size; admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr; admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); if( PQI_STATUS_SUCCESS == ret) { memcpy(capability, pqi_cap_dma_buf.virt_addr, pqi_cap_dma_buf.size); } else { DBG_ERR("Failed to send admin req report pqi device capability\n"); goto err_admin_req; } softs->pqi_dev_cap.max_iqs = capability->max_iqs; softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements; softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len; softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len; softs->pqi_dev_cap.max_oqs = capability->max_oqs; softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements; softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len; softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity; iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP]; softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len; softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported; softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported; DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs); DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements); DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len); DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len); DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs); DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements); DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len); DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity); DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw); DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported); DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported); + /* Not expecting these to change, could cause problems if they do */ + ASSERT(softs->pqi_dev_cap.max_iq_elem_len == PQISRC_OP_MAX_ELEM_SIZE); + ASSERT(softs->pqi_dev_cap.min_iq_elem_len == PQISRC_OP_MIN_ELEM_SIZE); + ASSERT(softs->max_ib_iu_length_per_fw == PQISRC_MAX_SPANNING_IU_LENGTH); + ASSERT(softs->ib_spanning_supported == true); + os_mem_free(softs, (void *)capability, REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); os_dma_mem_free(softs, &pqi_cap_dma_buf); DBG_FUNC("OUT\n"); return ret; err_admin_req: os_dma_mem_free(softs, &pqi_cap_dma_buf); err_dma_alloc: if (capability) os_mem_free(softs, (void *)capability, REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); err_out: DBG_FUNC("failed OUT\n"); return PQI_STATUS_FAILURE; } /* * Function used to deallocate the used rcb. */ void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count) { + uint32_t num_req; size_t size; int i; DBG_FUNC("IN\n"); num_req = softs->max_outstanding_io + 1; size = num_req * sizeof(rcb_t); for (i = 1; i < req_count; i++) os_dma_mem_free(softs, &softs->sg_dma_desc[i]); os_mem_free(softs, (void *)softs->rcb, size); softs->rcb = NULL; DBG_FUNC("OUT\n"); } /* * Allocate memory for rcb and SG descriptors. + * TODO : Sg list should be created separately */ static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; int i = 0; uint32_t num_req = 0; uint32_t sg_buf_size = 0; uint64_t alloc_size = 0; rcb_t *rcb = NULL; rcb_t *prcb = NULL; DBG_FUNC("IN\n"); /* Set maximum outstanding requests */ /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io * The rcb will be accessed by using the tag as index - * As 0 tag index is not used, we need to allocate one extra. + * As 0 tag index is not used, we need to allocate one extra. */ softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io; num_req = softs->max_outstanding_io + 1; - DBG_INIT("Max Outstanding IO reset to %d\n", num_req); + DBG_INIT("Max Outstanding IO reset to %u\n", num_req); alloc_size = num_req * sizeof(rcb_t); /* Allocate Non DMA memory */ rcb = os_mem_alloc(softs, alloc_size); if (!rcb) { DBG_ERR("Failed to allocate memory for rcb\n"); ret = PQI_STATUS_FAILURE; goto err_out; } softs->rcb = rcb; /* Allocate sg dma memory for sg chain */ sg_buf_size = softs->pqi_cap.max_sg_elem * sizeof(sgt_t); prcb = &softs->rcb[1]; /* Initialize rcb */ for(i=1; i < num_req; i++) { + /* TODO:Here tag is local variable */ char tag[15]; sprintf(tag, "sg_dma_buf%d", i); - softs->sg_dma_desc[i].tag = tag; + os_strlcpy(softs->sg_dma_desc[i].tag, tag, sizeof(softs->sg_dma_desc[i].tag)); softs->sg_dma_desc[i].size = sg_buf_size; softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN; ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]); if (ret) { DBG_ERR("Failed to Allocate sg desc %d\n", ret); ret = PQI_STATUS_FAILURE; goto error; } prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr); prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr); prcb ++; } DBG_FUNC("OUT\n"); return ret; error: pqisrc_free_rcb(softs, i); err_out: DBG_FUNC("failed OUT\n"); return ret; } /* * Function used to decide the operational queue configuration params * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support */ void pqisrc_decide_opq_config(pqisrc_softstate_t *softs) { uint16_t total_iq_elements; DBG_FUNC("IN\n"); DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d", softs->intr_count, softs->num_cpus_online); - + + /* TODO : Get the number of IB and OB queues from OS layer */ + if (softs->intr_count == 1 || softs->num_cpus_online == 1) { /* Share the event and Operational queue. */ softs->num_op_obq = 1; softs->share_opq_and_eventq = true; } else { /* Note : One OBQ (OBQ0) reserved for event queue */ softs->num_op_obq = MIN(softs->num_cpus_online, softs->intr_count) - 1; softs->share_opq_and_eventq = false; } /* If the available interrupt count is more than one, - we dont need to share the interrupt for IO and event queue */ + we don’t need to share the interrupt for IO and event queue */ if (softs->intr_count > 1) softs->share_opq_and_eventq = false; - DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq); + DBG_INIT("softs->num_op_obq : %u\n",softs->num_op_obq); + + /* TODO : Reset the interrupt count based on number of queues*/ softs->num_op_raid_ibq = softs->num_op_obq; softs->num_op_aio_ibq = softs->num_op_raid_ibq; - softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16; - softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16; + softs->max_ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16; + softs->max_obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16; if (softs->max_ib_iu_length_per_fw == 256 && softs->ob_spanning_supported) { /* older f/w that doesn't actually support spanning. */ - softs->max_ib_iu_length = softs->ibq_elem_size; + softs->max_ib_iu_length = softs->max_ibq_elem_size; } else { /* max. inbound IU length is an multiple of our inbound element size. */ - softs->max_ib_iu_length = - (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) * - softs->ibq_elem_size; - + softs->max_ib_iu_length = PQISRC_ROUND_DOWN(softs->max_ib_iu_length_per_fw, + softs->max_ibq_elem_size); } + /* If Max. Outstanding IO came with Max. Spanning element count then, needed elements per IO are multiplication of Max.Outstanding IO and Max.Spanning element */ total_iq_elements = (softs->max_outstanding_io * - (softs->max_ib_iu_length / softs->ibq_elem_size)); + (softs->max_ib_iu_length / softs->max_ibq_elem_size)); softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq; softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq, softs->pqi_dev_cap.max_iq_elements); softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq; softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq, softs->pqi_dev_cap.max_oq_elements); - softs->max_sg_per_iu = ((softs->max_ib_iu_length - - softs->ibq_elem_size) / - sizeof(sgt_t)) + - MAX_EMBEDDED_SG_IN_FIRST_IU; + /* spanning elements should be 9 (1152/128) */ + softs->max_spanning_elems = softs->max_ib_iu_length/softs->max_ibq_elem_size; + ASSERT(softs->max_spanning_elems == PQISRC_MAX_SPANNING_ELEMS); + + /* max SGs should be 8 (128/16) */ + softs->max_sg_per_single_iu_element = softs->max_ibq_elem_size / sizeof(sgt_t); + ASSERT(softs->max_sg_per_single_iu_element == MAX_EMBEDDED_SG_IN_IU); - DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); - DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq); - DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq); - DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu); + /* max SGs for spanning cmd should be 68*/ + softs->max_sg_per_spanning_cmd = (softs->max_spanning_elems - 1) * softs->max_sg_per_single_iu_element; + softs->max_sg_per_spanning_cmd += MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT; + + DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); /* 1152 per FW advertisement */ + DBG_INIT("softs->num_elem_per_op_ibq: %u\n", softs->num_elem_per_op_ibq); /* 32 for xcal */ + DBG_INIT("softs->num_elem_per_op_obq: %u\n", softs->num_elem_per_op_obq); /* 256 for xcal */ + DBG_INIT("softs->max_spanning_elems: %d\n", softs->max_spanning_elems); /* 9 */ + DBG_INIT("softs->max_sg_per_spanning_cmd: %u\n", softs->max_sg_per_spanning_cmd); /* 68 until we add AIO writes */ DBG_FUNC("OUT\n"); } /* * Configure the operational queue parameters. */ int pqisrc_configure_op_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; /* Get the PQI capability, REPORT PQI DEVICE CAPABILITY request */ ret = pqisrc_report_pqi_capability(softs); if (ret) { DBG_ERR("Failed to send report pqi dev capability request : %d\n", ret); goto err_out; } /* Reserve required no of slots for internal requests */ softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT; /* Decide the Op queue configuration */ pqisrc_decide_opq_config(softs); DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("OUT failed\n"); return ret; } /* * Validate the PQI mode of adapter. */ int pqisrc_check_pqimode(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_FAILURE; int tmo = 0; uint64_t signature = 0; DBG_FUNC("IN\n"); /* Check the PQI device signature */ tmo = PQISRC_PQIMODE_READY_TIMEOUT; do { signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE)); if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE, sizeof(uint64_t)) == 0) { ret = PQI_STATUS_SUCCESS; break; } OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL); } while (tmo--); PRINT_PQI_SIGNATURE(signature); if (tmo <= 0) { DBG_ERR("PQI Signature is invalid\n"); ret = PQI_STATUS_TIMEOUT; goto err_out; } tmo = PQISRC_PQIMODE_READY_TIMEOUT; /* Check function and status code for the device */ COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); if (!tmo) { DBG_ERR("PQI device is not in IDLE state\n"); ret = PQI_STATUS_TIMEOUT; goto err_out; } tmo = PQISRC_PQIMODE_READY_TIMEOUT; /* Check the PQI device status register */ COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) & PQI_DEV_STATE_AT_INIT, tmo); if (!tmo) { DBG_ERR("PQI Registers are not ready\n"); ret = PQI_STATUS_TIMEOUT; goto err_out; } DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("OUT failed\n"); return ret; } -/* PQI Feature processing */ -static int -pqisrc_config_table_update(struct pqisrc_softstate *softs, - uint16_t first_section, uint16_t last_section) -{ - pqi_vendor_general_request_t request; - int ret = PQI_STATUS_FAILURE; - - memset(&request, 0, sizeof(request)); - - request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; - request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH; - request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE; - request.data.config_table_update.first_section = first_section; - request.data.config_table_update.last_section = last_section; - - ret = pqisrc_build_send_vendor_request(softs, &request, NULL); - - if (ret != PQI_STATUS_SUCCESS) { - DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret); - return PQI_STATUS_FAILURE; - } - - return PQI_STATUS_SUCCESS; -} - -static inline -boolean_t pqi_is_firmware_feature_supported( - struct pqi_conf_table_firmware_features *firmware_feature_list, - unsigned int bit_position) -{ - unsigned int byte_index; - - byte_index = bit_position / BITS_PER_BYTE; - - if (byte_index >= firmware_feature_list->num_elements) - return false; - - return firmware_feature_list->features_supported[byte_index] & - (1 << (bit_position % BITS_PER_BYTE)) ? true : false; -} - -static inline -boolean_t pqi_is_firmware_feature_enabled( - struct pqi_conf_table_firmware_features *firmware_feature_list, - uint8_t *firmware_features_addr, unsigned int bit_position) -{ - unsigned int byte_index; - uint8_t *feature_enabled_addr; - - byte_index = (bit_position / BITS_PER_BYTE) + - (firmware_feature_list->num_elements * 2); - - feature_enabled_addr = firmware_features_addr + - offsetof(struct pqi_conf_table_firmware_features, - features_supported) + byte_index; - - return *feature_enabled_addr & - (1 << (bit_position % BITS_PER_BYTE)) ? true : false; -} - -static inline void -pqi_request_firmware_feature( - struct pqi_conf_table_firmware_features *firmware_feature_list, - unsigned int bit_position) -{ - unsigned int byte_index; - - byte_index = (bit_position / BITS_PER_BYTE) + - firmware_feature_list->num_elements; - - firmware_feature_list->features_supported[byte_index] |= - (1 << (bit_position % BITS_PER_BYTE)); -} - -/* Update PQI config table firmware features section and inform the firmware */ -static int -pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs, - struct pqi_conf_table_firmware_features *firmware_feature_list) -{ - uint8_t *request_feature_addr; - void *request_feature_abs_addr; - - request_feature_addr = firmware_feature_list->features_supported + - firmware_feature_list->num_elements; - request_feature_abs_addr = softs->fw_features_section_abs_addr + - (request_feature_addr - (uint8_t*)firmware_feature_list); - - os_io_memcpy(request_feature_abs_addr, request_feature_addr, - firmware_feature_list->num_elements); - - return pqisrc_config_table_update(softs, - PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES, - PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES); -} - -/* Check firmware has enabled the feature specified in the respective bit position. */ -inline boolean_t -pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs, - struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position) -{ - uint16_t byte_index; - uint8_t *features_enabled_abs_addr; - - byte_index = (bit_position / BITS_PER_BYTE) + - (firmware_feature_list->num_elements * 2); - - features_enabled_abs_addr = softs->fw_features_section_abs_addr + - offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index; - - return *features_enabled_abs_addr & - (1 << (bit_position % BITS_PER_BYTE)) ? true : false; -} - -static void -pqi_firmware_feature_status(struct pqisrc_softstate *softs, - struct pqi_firmware_feature *firmware_feature) -{ - switch(firmware_feature->feature_bit) { - case PQI_FIRMWARE_FEATURE_OFA: - break; - case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT: - softs->timeout_in_passthrough = true; - break; - case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT: - softs->timeout_in_tmf = true; - break; - default: - DBG_NOTE("Nothing to do \n"); - } -} - -/* Firmware features supported by the driver */ -static struct -pqi_firmware_feature pqi_firmware_features[] = { - { - .feature_name = "Support timeout for pass-through commands", - .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT, - .feature_status = pqi_firmware_feature_status, - }, - { - .feature_name = "Support timeout for LUN Reset TMF", - .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT, - .feature_status = pqi_firmware_feature_status, - } -}; - -static void -pqisrc_process_firmware_features(pqisrc_softstate_t *softs) -{ - int rc; - struct pqi_conf_table_firmware_features *firmware_feature_list; - unsigned int i; - unsigned int num_features_requested; - - firmware_feature_list = (struct pqi_conf_table_firmware_features*) - softs->fw_features_section_abs_addr; - - /* Check features and request those supported by firmware and driver.*/ - for (i = 0, num_features_requested = 0; - i < ARRAY_SIZE(pqi_firmware_features); i++) { - /* Firmware support it ? */ - if (pqi_is_firmware_feature_supported(firmware_feature_list, - pqi_firmware_features[i].feature_bit)) { - pqi_request_firmware_feature(firmware_feature_list, - pqi_firmware_features[i].feature_bit); - pqi_firmware_features[i].supported = true; - num_features_requested++; - DBG_NOTE("%s supported by driver, requesting firmware to enable it\n", - pqi_firmware_features[i].feature_name); - } else { - DBG_NOTE("%s supported by driver, but not by current firmware\n", - pqi_firmware_features[i].feature_name); - } - } - if (num_features_requested == 0) - return; - - rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list); - if (rc) { - DBG_ERR("Failed to update pqi config table\n"); - return; - } - - for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { - if (pqi_is_firmware_feature_enabled(firmware_feature_list, - softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) { - pqi_firmware_features[i].enabled = true; - DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name); - if(pqi_firmware_features[i].feature_status) - pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i])); - } - } -} - -/* - * Get the PQI configuration table parameters. - * Currently using for heart-beat counter scratch-pad register. - */ -int -pqisrc_process_config_table(pqisrc_softstate_t *softs) -{ - int ret = PQI_STATUS_FAILURE; - uint32_t config_table_size; - uint32_t section_off; - uint8_t *config_table_abs_addr __unused; - struct pqi_conf_table *conf_table; - struct pqi_conf_table_section_header *section_hdr; - - config_table_size = softs->pqi_cap.conf_tab_sz; - - if (config_table_size < sizeof(*conf_table) || - config_table_size > PQI_CONF_TABLE_MAX_LEN) { - DBG_ERR("Invalid PQI conf table length of %u\n", - config_table_size); - return ret; - } - - conf_table = os_mem_alloc(softs, config_table_size); - if (!conf_table) { - DBG_ERR("Failed to allocate memory for PQI conf table\n"); - return ret; - } - - if (config_table_size < sizeof(conf_table) || - config_table_size > PQI_CONF_TABLE_MAX_LEN) { - DBG_ERR("Invalid PQI conf table length of %u\n", - config_table_size); - goto out; - } - - config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr + - softs->pqi_cap.conf_tab_off); - - PCI_MEM_GET_BUF(softs, config_table_abs_addr, - softs->pqi_cap.conf_tab_off, - (uint8_t*)conf_table, config_table_size); - - - if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE, - sizeof(conf_table->sign)) != 0) { - DBG_ERR("Invalid PQI config signature\n"); - goto out; - } - - section_off = LE_32(conf_table->first_section_off); - - while (section_off) { - - if (section_off+ sizeof(*section_hdr) >= config_table_size) { - DBG_INFO("Reached end of PQI config table. Breaking off.\n"); - break; - } - - section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off); - - switch (LE_16(section_hdr->section_id)) { - case PQI_CONF_TABLE_SECTION_GENERAL_INFO: - case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA: - case PQI_CONF_TABLE_SECTION_DEBUG: - break; - case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES: - softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off; - softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off; - pqisrc_process_firmware_features(softs); - break; - case PQI_CONF_TABLE_SECTION_HEARTBEAT: - softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off + - section_off + - offsetof(struct pqi_conf_table_heartbeat, - heartbeat_counter); - softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr + - softs->heartbeat_counter_off); - ret = PQI_STATUS_SUCCESS; - break; - default: - DBG_INFO("unrecognized PQI config table section ID: 0x%x\n", - LE_16(section_hdr->section_id)); - break; - } - section_off = LE_16(section_hdr->next_section_off); - } -out: - os_mem_free(softs, (void *)conf_table,config_table_size); - return ret; -} - /* Wait for PQI reset completion for the adapter*/ int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; pqi_reset_reg_t reset_reg; int pqi_reset_timeout = 0; uint64_t val = 0; uint32_t max_timeout = 0; val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP); max_timeout = (val & 0xFFFF00000000) >> 32; DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout); while(1) { if (pqi_reset_timeout++ == max_timeout) { return PQI_STATUS_TIMEOUT; } OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */ reset_reg.all_bits = PCI_MEM_GET32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET); if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) break; } return ret; } /* * Function used to perform PQI hard reset. */ int pqi_reset(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t val = 0; pqi_reset_reg_t pqi_reset_reg; DBG_FUNC("IN\n"); if (true == softs->ctrl_in_pqi_mode) { if (softs->pqi_reset_quiesce_allowed) { val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR); val |= SIS_PQI_RESET_QUIESCE; PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, LE_32(val)); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE); if (ret) { DBG_ERR("failed with error %d during quiesce\n", ret); return ret; } } pqi_reset_reg.all_bits = 0; pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET, LE_32(pqi_reset_reg.all_bits)); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ ret = pqisrc_wait_for_pqi_reset_completion(softs); if (ret) { DBG_ERR("PQI reset timed out: ret = %d!\n", ret); return ret; } } softs->ctrl_in_pqi_mode = false; DBG_FUNC("OUT\n"); return ret; } /* * Initialize the adapter with supported PQI configuration. */ int pqisrc_pqi_init(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); /* Check the PQI signature */ ret = pqisrc_check_pqimode(softs); if(ret) { DBG_ERR("failed to switch to pqi\n"); goto err_out; } PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE); softs->ctrl_in_pqi_mode = true; /* Get the No. of Online CPUs,NUMA/Processor config from OS */ ret = os_get_processor_config(softs); if (ret) { DBG_ERR("Failed to get processor config from OS %d\n", ret); goto err_out; } softs->intr_type = INTR_TYPE_NONE; /* Get the interrupt count, type, priority available from OS */ ret = os_get_intr_config(softs); if (ret) { DBG_ERR("Failed to get interrupt config from OS %d\n", ret); goto err_out; } /*Enable/Set Legacy INTx Interrupt mask clear pqi register, *if allocated interrupt is legacy type. */ if (INTR_TYPE_FIXED == softs->intr_type) { pqisrc_configure_legacy_intx(softs, true); sis_enable_intx(softs); } /* Create Admin Queue pair*/ ret = pqisrc_create_admin_queue(softs); if(ret) { DBG_ERR("Failed to configure admin queue\n"); goto err_admin_queue; } /* For creating event and IO operational queues we have to submit admin IU requests.So Allocate resources for submitting IUs */ /* Allocate the request container block (rcb) */ ret = pqisrc_allocate_rcb(softs); if (ret == PQI_STATUS_FAILURE) { DBG_ERR("Failed to allocate rcb \n"); goto err_rcb; } /* Allocate & initialize request id queue */ ret = pqisrc_init_taglist(softs,&softs->taglist, softs->max_outstanding_io); if (ret) { DBG_ERR("Failed to allocate memory for request id q : %d\n", ret); goto err_taglist; } ret = pqisrc_configure_op_queues(softs); if (ret) { DBG_ERR("Failed to configure op queue\n"); goto err_config_opq; } /* Create Operational queues */ ret = pqisrc_create_op_queues(softs); if(ret) { - DBG_ERR("Failed to create op queue\n"); - ret = PQI_STATUS_FAILURE; - goto err_create_opq; - } + DBG_ERR("Failed to create op queue\n"); + goto err_create_opq; + } softs->ctrl_online = true; DBG_FUNC("OUT\n"); return ret; err_create_opq: err_config_opq: pqisrc_destroy_taglist(softs,&softs->taglist); err_taglist: pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); err_rcb: pqisrc_destroy_admin_queue(softs); err_admin_queue: os_free_intr_config(softs); err_out: DBG_FUNC("OUT failed\n"); return PQI_STATUS_FAILURE; } +/* */ int pqisrc_force_sis(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; if (SIS_IS_KERNEL_PANIC(softs)) { - DBG_INIT("Controller FW is not running"); + DBG_ERR("Controller FW is not running\n"); return PQI_STATUS_FAILURE; } if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) { return ret; } if (SIS_IS_KERNEL_UP(softs)) { PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); return ret; } /* Disable interrupts ? */ sis_disable_interrupt(softs); /* reset pqi, this will delete queues */ ret = pqi_reset(softs); if (ret) { return ret; } /* Re enable SIS */ ret = pqisrc_reenable_sis(softs); if (ret) { return ret; } PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); return ret; } -static int +/* 5 mins timeout for quiesce */ +#define PQI_QUIESCE_TIMEOUT 300000 + +int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs) { + int count = 0; int ret = PQI_STATUS_SUCCESS; - DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem); + DBG_NOTE("softs->taglist.num_elem : %u",softs->taglist.num_elem); if (softs->taglist.num_elem == softs->max_outstanding_io) return ret; else { - DBG_WARN("%d commands pending\n", + DBG_WARN("%u commands pending\n", softs->max_outstanding_io - softs->taglist.num_elem); while(1) { /* Since heartbeat timer stopped ,check for firmware status*/ if (SIS_IS_KERNEL_PANIC(softs)) { DBG_ERR("Controller FW is not running\n"); return PQI_STATUS_FAILURE; } if (softs->taglist.num_elem != softs->max_outstanding_io) { /* Sleep for 1 msec */ OS_SLEEP(1000); count++; if(count % 1000 == 0) { DBG_WARN("Waited for %d seconds", count/1000); } if (count >= PQI_QUIESCE_TIMEOUT) { return PQI_STATUS_FAILURE; } continue; } break; } } return ret; } -static void +void pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs) { int tag = 0; rcb_t *rcb; for (tag = 1; tag <= softs->max_outstanding_io; tag++) { rcb = &softs->rcb[tag]; if(rcb->req_pending && is_internal_req(rcb)) { - rcb->status = REQUEST_FAILED; + rcb->status = PQI_STATUS_TIMEOUT; rcb->req_pending = false; } } } /* * Uninitialize the resources used during PQI initialization. */ void pqisrc_pqi_uninit(pqisrc_softstate_t *softs) { - int i, ret; + int ret; DBG_FUNC("IN\n"); /* Wait for any rescan to finish */ pqisrc_wait_for_rescan_complete(softs); /* Wait for commands to complete */ ret = pqisrc_wait_for_cmnd_complete(softs); /* disable and free the interrupt resources */ os_destroy_intr(softs); /* Complete all pending commands. */ if(ret != PQI_STATUS_SUCCESS) { pqisrc_complete_internal_cmds(softs); os_complete_outstanding_cmds_nodevice(softs); } if(softs->devlist_lockcreated==true){ os_uninit_spinlock(&softs->devlist_lock); softs->devlist_lockcreated = false; } - for (i = 0; i < softs->num_op_raid_ibq; i++) { - /* OP RAID IB Q */ - if(softs->op_raid_ib_q[i].lockcreated==true){ - OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock); - softs->op_raid_ib_q[i].lockcreated = false; - } - /* OP AIO IB Q */ - if(softs->op_aio_ib_q[i].lockcreated==true){ - OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock); - softs->op_aio_ib_q[i].lockcreated = false; - } - } - - /* Free Op queues */ - os_dma_mem_free(softs, &softs->op_ibq_dma_mem); - os_dma_mem_free(softs, &softs->op_obq_dma_mem); - os_dma_mem_free(softs, &softs->event_q_dma_mem); - - + /* Free all queues */ + pqisrc_destroy_op_ib_queues(softs); + pqisrc_destroy_op_ob_queues(softs); + pqisrc_destroy_event_queue(softs); /* Free rcb */ pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); /* Free request id lists */ pqisrc_destroy_taglist(softs,&softs->taglist); - if(softs->admin_ib_queue.lockcreated==true) { - OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); - softs->admin_ib_queue.lockcreated = false; - } - /* Free Admin Queue */ - os_dma_mem_free(softs, &softs->admin_queue_dma_mem); + pqisrc_destroy_admin_queue(softs); /* Switch back to SIS mode */ if (pqisrc_force_sis(softs)) { DBG_ERR("Failed to switch back the adapter to SIS mode!\n"); } DBG_FUNC("OUT\n"); } + +/* + * Function to do any sanity checks for OS macros + */ +void +sanity_check_os_behavior(pqisrc_softstate_t *softs) +{ +#ifdef OS_ATOMIC64_INC + OS_ATOMIC64_T atomic_test_var = 0; + OS_ATOMIC64_T atomic_ret = 0; + + atomic_ret = OS_ATOMIC64_INC(&atomic_test_var); + ASSERT(atomic_ret == 1); + + atomic_ret = OS_ATOMIC64_INC(&atomic_test_var); + ASSERT(atomic_ret == 2); + + atomic_ret = OS_ATOMIC64_DEC(&atomic_test_var); + ASSERT(atomic_ret == 1); +#else + DBG_INIT("OS needs to define/implement atomic macros\n"); +#endif +} + /* * Function to initialize the adapter settings. */ int pqisrc_init(pqisrc_softstate_t *softs) { int ret = 0; - int i = 0, j = 0; + uint32_t ctrl_type; DBG_FUNC("IN\n"); + sanity_check_os_behavior(softs); + check_struct_sizes(); - /* Init the Sync interface */ - ret = pqisrc_sis_init(softs); - if (ret) { - DBG_ERR("SIS Init failed with error %d\n", ret); + /*Get verbose flags, defined in OS code XX_debug.h or so*/ +#ifdef DISABLE_ERR_RESP_VERBOSE + softs->err_resp_verbose = false; +#else + softs->err_resp_verbose = true; +#endif + + /* prevent attachment of revA hardware. */ + ctrl_type = PQI_GET_CTRL_TYPE(softs); + if (ctrl_type == PQI_CTRL_PRODUCT_ID_GEN2_REV_A) { + DBG_ERR("adapter at B.D.F=%u.%u.%u: unsupported RevA card.\n", + softs->bus_id, softs->device_id, softs->func_id); + ret = PQI_STATUS_FAILURE; goto err_out; } + /* Increment the global adapter ID and tie it to this BDF */ +#ifdef OS_ATOMIC64_INC + static OS_ATOMIC64_T g_adapter_cnt = 0; + softs->adapter_num = (uint8_t)OS_ATOMIC64_INC(&g_adapter_cnt); +#else + static uint64_t g_adapter_cnt = 0; + softs->adapter_num = (uint8_t)++g_adapter_cnt; +#endif + DBG_NOTE("Initializing adapter %u\n", (uint32_t)softs->adapter_num); + ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock); if(ret != PQI_STATUS_SUCCESS){ DBG_ERR(" Failed to initialize scan lock\n"); - goto err_scan_lock; + goto err_out; } + /* Init the Sync interface */ + ret = pqisrc_sis_init(softs); + if (ret) { + DBG_ERR("SIS Init failed with error %d\n", ret); + goto err_sis; + } + + /* Init the PQI interface */ ret = pqisrc_pqi_init(softs); if (ret) { DBG_ERR("PQI Init failed with error %d\n", ret); goto err_pqi; } /* Setup interrupt */ ret = os_setup_intr(softs); if (ret) { DBG_ERR("Interrupt setup failed with error %d\n", ret); goto err_intr; } /* Report event configuration */ - ret = pqisrc_report_event_config(softs); - if(ret){ - DBG_ERR(" Failed to configure Report events\n"); + ret = pqisrc_report_event_config(softs); + if(ret){ + DBG_ERR(" Failed to configure Report events\n"); goto err_event; } /* Set event configuration*/ - ret = pqisrc_set_event_config(softs); - if(ret){ - DBG_ERR(" Failed to configure Set events\n"); - goto err_event; - } + ret = pqisrc_set_event_config(softs); + if(ret){ + DBG_ERR(" Failed to configure Set events\n"); + goto err_event; + } /* Check for For PQI spanning */ ret = pqisrc_get_ctrl_fw_version(softs); - if(ret){ - DBG_ERR(" Failed to get ctrl fw version\n"); - goto err_fw_version; - } + if(ret){ + DBG_ERR(" Failed to get ctrl fw version\n"); + goto err_fw_version; + } /* update driver version in to FW */ ret = pqisrc_write_driver_version_to_host_wellness(softs); if (ret) { DBG_ERR(" Failed to update driver version in to FW"); goto err_host_wellness; } + /* Setup sense features */ + ret = pqisrc_QuerySenseFeatures(softs); + if (ret) { + DBG_ERR("Failed to get sense features\n"); + goto err_sense; + } os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE); ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name); if(ret){ DBG_ERR(" Failed to initialize devlist_lock\n"); softs->devlist_lockcreated=false; goto err_lock; } softs->devlist_lockcreated = true; /* Get the PQI configuration table to read heart-beat counter*/ ret = pqisrc_process_config_table(softs); if (ret) { DBG_ERR("Failed to process PQI configuration table %d\n", ret); goto err_config_tab; } softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL; - /* Init device list */ - for(i = 0; i < PQI_MAX_DEVICES; i++) - for(j = 0; j < PQI_MAX_MULTILUN; j++) - softs->device_list[i][j] = NULL; - - pqisrc_init_targetid_pool(softs); + memset(softs->dev_list, 0, sizeof(*softs->dev_list)); + pqisrc_init_bitmap(softs); DBG_FUNC("OUT\n"); return ret; err_config_tab: if(softs->devlist_lockcreated==true){ os_uninit_spinlock(&softs->devlist_lock); softs->devlist_lockcreated = false; } err_lock: err_fw_version: err_event: err_host_wellness: err_intr: +err_sense: pqisrc_pqi_uninit(softs); err_pqi: - os_destroy_semaphore(&softs->scan_lock); -err_scan_lock: pqisrc_sis_uninit(softs); +err_sis: + os_destroy_semaphore(&softs->scan_lock); err_out: DBG_FUNC("OUT failed\n"); return ret; } /* * Write all data in the adapter's battery-backed cache to * storage. */ int pqisrc_flush_cache( pqisrc_softstate_t *softs, enum pqisrc_flush_cache_event_type event_type) { int rval = PQI_STATUS_SUCCESS; pqisrc_raid_req_t request; pqisrc_bmic_flush_cache_t *flush_buff = NULL; DBG_FUNC("IN\n"); if (pqisrc_ctrl_offline(softs)) return PQI_STATUS_FAILURE; flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t)); if (!flush_buff) { DBG_ERR("Failed to allocate memory for flush cache params\n"); rval = PQI_STATUS_FAILURE; return rval; } flush_buff->halt_event = event_type; memset(&request, 0, sizeof(request)); - rval = pqisrc_build_send_raid_request(softs, &request, flush_buff, - sizeof(*flush_buff), SA_CACHE_FLUSH, 0, - (uint8_t *)RAID_CTLR_LUNID, NULL); + request.data_direction = SOP_DATA_DIR_FROM_DEVICE; + request.cmd.bmic_cdb.op_code = BMIC_WRITE; + request.cmd.bmic_cdb.cmd = BMIC_CACHE_FLUSH; + request.cmd.bmic_cdb.xfer_len = BE_16(sizeof(*flush_buff)); + + rval = pqisrc_prepare_send_ctrlr_request(softs, &request, flush_buff, sizeof(*flush_buff)); + if (rval) { DBG_ERR("error in build send raid req ret=%d\n", rval); } - if (flush_buff) - os_mem_free(softs, (void *)flush_buff, - sizeof(pqisrc_bmic_flush_cache_t)); + os_mem_free(softs, (void *)flush_buff, sizeof(pqisrc_bmic_flush_cache_t)); DBG_FUNC("OUT\n"); return rval; } /* * Uninitialize the adapter. */ void pqisrc_uninit(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); pqisrc_pqi_uninit(softs); pqisrc_sis_uninit(softs); os_destroy_semaphore(&softs->scan_lock); pqisrc_cleanup_devices(softs); DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_intr.c b/sys/dev/smartpqi/smartpqi_intr.c index 0126ef414f73..a62bdc9e8389 100644 --- a/sys/dev/smartpqi/smartpqi_intr.c +++ b/sys/dev/smartpqi/smartpqi_intr.c @@ -1,448 +1,453 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" + /* * Function to get processor count */ int os_get_processor_config(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); softs->num_cpus_online = mp_ncpus; + bsd_set_hint_adapter_cpu_config(softs); DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } /* * Function to get interrupt count and type supported */ int os_get_intr_config(pqisrc_softstate_t *softs) { device_t dev = softs->os_specific.pqi_dev; int msi_count = pci_msix_count(dev); int error = BSD_SUCCESS; DBG_FUNC("IN\n"); if (msi_count > softs->num_cpus_online) msi_count = softs->num_cpus_online; if (msi_count > PQI_MAX_MSIX) msi_count = PQI_MAX_MSIX; if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) { device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; " "will try MSI\n", msi_count, error); pci_release_msi(dev); } else { softs->intr_count = msi_count; softs->intr_type = INTR_TYPE_MSIX; softs->os_specific.msi_enabled = TRUE; device_printf(dev, "using MSI-X interrupts (%d vectors)\n", msi_count); } if (!softs->intr_type) { msi_count = 1; if ((error = pci_alloc_msi(dev, &msi_count)) != 0) { device_printf(dev, "alloc msi failed - err=%d; " "will use INTx\n", error); pci_release_msi(dev); } else { softs->os_specific.msi_enabled = TRUE; softs->intr_count = msi_count; softs->intr_type = INTR_TYPE_MSI; device_printf(dev, "using MSI interrupts\n"); } } if (!softs->intr_type) { device_printf(dev, "using legacy interrupts\n"); softs->intr_type = INTR_TYPE_FIXED; softs->intr_count = 1; } - DBG_FUNC("OUT\n"); - error = bsd_status_to_pqi_status(BSD_SUCCESS); + DBG_FUNC("OUT\n"); + return error; } void os_eventtaskqueue_enqueue(pqisrc_softstate_t *sc) { taskqueue_enqueue(taskqueue_swi, &sc->os_specific.event_task); } void pqisrc_event_worker(void *arg1, int arg2) { pqisrc_ack_all_events(arg1); } /* * ithread routine to handle uniprocessor systems */ static void shared_ithread_routine(void *arg) { pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg; pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev); int oq_id = intr_ctx->oq_id; DBG_FUNC("IN\n"); - if (softs == NULL) + if (!softs) return; pqisrc_process_response_queue(softs, oq_id); pqisrc_process_event_intr_src(softs, oq_id - 1); DBG_FUNC("OUT\n"); } /* * ithread routine to process non event response */ static void common_ithread_routine(void *arg) { pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg; pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev); int oq_id = intr_ctx->oq_id; DBG_FUNC("IN\n"); - if (softs == NULL) + if (!softs) return; pqisrc_process_response_queue(softs, oq_id); DBG_FUNC("OUT\n"); } static void event_ithread_routine(void *arg) { pqi_intr_ctx_t *intr_ctx = (pqi_intr_ctx_t *)arg; pqisrc_softstate_t *softs = device_get_softc(intr_ctx->pqi_dev); int oq_id = intr_ctx->oq_id; DBG_FUNC("IN\n"); - if (softs == NULL) + if (!softs) return; pqisrc_process_event_intr_src(softs, oq_id); DBG_FUNC("OUT\n"); } /* * Registration of legacy interrupt in case MSI is unsupported */ int register_legacy_intr(pqisrc_softstate_t *softs) { int error = BSD_SUCCESS; - device_t dev = softs->os_specific.pqi_dev; + device_t dev; DBG_FUNC("IN\n"); + dev = softs->os_specific.pqi_dev; + softs->os_specific.pqi_irq_rid[0] = 0; softs->os_specific.pqi_irq[0] = bus_alloc_resource_any(dev, \ SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[0], RF_ACTIVE | RF_SHAREABLE); if (NULL == softs->os_specific.pqi_irq[0]) { DBG_ERR("Failed to allocate resource for interrupt\n"); return ENXIO; } if ((softs->os_specific.msi_ctx = os_mem_alloc(softs,sizeof(pqi_intr_ctx_t))) == NULL) { DBG_ERR("Failed to allocate memory for msi_ctx\n"); return ENXIO; } softs->os_specific.msi_ctx[0].pqi_dev = dev; /* For Legacy support oq_id should be one */ softs->os_specific.msi_ctx[0].oq_id = 1; error = bus_setup_intr(dev, softs->os_specific.pqi_irq[0], INTR_TYPE_CAM | INTR_MPSAFE, \ NULL, shared_ithread_routine, &softs->os_specific.msi_ctx[0], &softs->os_specific.intrcookie[0]); if (error) { DBG_ERR("Failed to setup legacy interrupt err = %d\n", error); return error; } softs->os_specific.intr_registered[0] = TRUE; DBG_FUNC("OUT error = %d\n", error); return error; } /* * Registration of MSIx */ int register_msix_intr(pqisrc_softstate_t *softs) { int error = BSD_SUCCESS; int i = 0; device_t dev = softs->os_specific.pqi_dev; int msix_count = softs->intr_count; + size_t msix_size = sizeof(pqi_intr_ctx_t) * msix_count; DBG_FUNC("IN\n"); - softs->os_specific.msi_ctx = os_mem_alloc(softs, sizeof(pqi_intr_ctx_t) * msix_count); + softs->os_specific.msi_ctx = os_mem_alloc(softs, msix_size); if (!softs->os_specific.msi_ctx) { - DBG_ERR("Memory allocation failed\n"); + DBG_ERR("Memory allocation failed, Requested memory:%lu bytes\n", (unsigned long)msix_size); return ENXIO; } /*Add shared handler */ if (softs->share_opq_and_eventq) { softs->os_specific.pqi_irq_rid[i] = i+1; softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \ SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[i], RF_SHAREABLE | RF_ACTIVE); if (NULL == softs->os_specific.pqi_irq[i]) { DBG_ERR("Failed to allocate \ event interrupt resource\n"); return ENXIO; } softs->os_specific.msi_ctx[i].pqi_dev = dev; softs->os_specific.msi_ctx[i].oq_id = i+1; error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i], INTR_TYPE_CAM | INTR_MPSAFE,\ NULL, shared_ithread_routine, &softs->os_specific.msi_ctx[i], &softs->os_specific.intrcookie[i]); if (error) { DBG_ERR("Failed to setup interrupt for events r=%d\n", error); return error; } softs->os_specific.intr_registered[i] = TRUE; } else { /* Add event handler */ softs->os_specific.pqi_irq_rid[i] = i+1; softs->os_specific.pqi_irq[i] = bus_alloc_resource_any(dev, \ SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[i], RF_SHAREABLE | RF_ACTIVE); if (NULL == softs->os_specific.pqi_irq[i]) { DBG_ERR("Failed to allocate event interrupt resource\n"); return ENXIO; } softs->os_specific.msi_ctx[i].pqi_dev = dev; softs->os_specific.msi_ctx[i].oq_id = i; error = bus_setup_intr(dev,softs->os_specific.pqi_irq[i], INTR_TYPE_CAM | INTR_MPSAFE,\ NULL, event_ithread_routine, &softs->os_specific.msi_ctx[i], &softs->os_specific.intrcookie[i]); if (error) { DBG_ERR("Failed to setup interrupt for events err=%d\n", error); return error; } softs->os_specific.intr_registered[i] = TRUE; - /* Add interrupt handlers*/ + /* Add interrupt handlers*/ for (i = 1; i < msix_count; ++i) { softs->os_specific.pqi_irq_rid[i] = i+1; softs->os_specific.pqi_irq[i] = \ bus_alloc_resource_any(dev, SYS_RES_IRQ, &softs->os_specific.pqi_irq_rid[i], RF_SHAREABLE | RF_ACTIVE); if (NULL == softs->os_specific.pqi_irq[i]) { DBG_ERR("Failed to allocate \ msi/x interrupt resource\n"); return ENXIO; } softs->os_specific.msi_ctx[i].pqi_dev = dev; softs->os_specific.msi_ctx[i].oq_id = i; error = bus_setup_intr(dev, softs->os_specific.pqi_irq[i], INTR_TYPE_CAM | INTR_MPSAFE,\ NULL, common_ithread_routine, &softs->os_specific.msi_ctx[i], &softs->os_specific.intrcookie[i]); if (error) { DBG_ERR("Failed to setup \ msi/x interrupt error = %d\n", error); return error; } softs->os_specific.intr_registered[i] = TRUE; } } DBG_FUNC("OUT error = %d\n", error); return error; } /* * Setup interrupt depending on the configuration */ int os_setup_intr(pqisrc_softstate_t *softs) { int bsd_status, pqi_status; DBG_FUNC("IN\n"); if (softs->intr_type == INTR_TYPE_FIXED) { bsd_status = register_legacy_intr(softs); } else { bsd_status = register_msix_intr(softs); } - if(bsd_status) + if (bsd_status) DBG_WARN("interrupt registration is failed, error = %d\n", bsd_status); pqi_status = bsd_status_to_pqi_status(bsd_status); DBG_FUNC("OUT\n"); return pqi_status; } /* * Deregistration of legacy interrupt */ void deregister_pqi_intx(pqisrc_softstate_t *softs) { device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); if (softs->os_specific.pqi_irq[0] != NULL) { if (softs->os_specific.intr_registered[0]) { bus_teardown_intr(dev, softs->os_specific.pqi_irq[0], softs->os_specific.intrcookie[0]); softs->os_specific.intr_registered[0] = FALSE; } bus_release_resource(dev, SYS_RES_IRQ, softs->os_specific.pqi_irq_rid[0], softs->os_specific.pqi_irq[0]); softs->os_specific.pqi_irq[0] = NULL; os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t)); } DBG_FUNC("OUT\n"); } /* * Deregistration of MSIx interrupt */ void deregister_pqi_msix(pqisrc_softstate_t *softs) { device_t dev = softs->os_specific.pqi_dev; int msix_count = softs->intr_count; int i = 0; DBG_FUNC("IN\n"); os_mem_free(softs, (char*)softs->os_specific.msi_ctx, sizeof(pqi_intr_ctx_t) * msix_count); softs->os_specific.msi_ctx = NULL; for (; i < msix_count; ++i) { if (softs->os_specific.pqi_irq[i] != NULL) { if (softs->os_specific.intr_registered[i]) { bus_teardown_intr(dev, softs->os_specific.pqi_irq[i], softs->os_specific.intrcookie[i]); softs->os_specific.intr_registered[i] = FALSE; } bus_release_resource(dev, SYS_RES_IRQ, softs->os_specific.pqi_irq_rid[i], softs->os_specific.pqi_irq[i]); softs->os_specific.pqi_irq[i] = NULL; } } DBG_FUNC("OUT\n"); } /* * Function to destroy interrupts registered */ int os_destroy_intr(pqisrc_softstate_t *softs) { device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); if (softs->intr_type == INTR_TYPE_FIXED) { deregister_pqi_intx(softs); } else if (softs->intr_type == INTR_TYPE_MSIX) { deregister_pqi_msix(softs); } if (softs->os_specific.msi_enabled) { pci_release_msi(dev); softs->os_specific.msi_enabled = FALSE; - } - + } + DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } /* * Free interrupt related resources for the adapter */ void os_free_intr_config(pqisrc_softstate_t *softs) { device_t dev = softs->os_specific.pqi_dev; DBG_FUNC("IN\n"); if (softs->os_specific.msi_enabled) { pci_release_msi(dev); softs->os_specific.msi_enabled = FALSE; } DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_ioctl.c b/sys/dev/smartpqi/smartpqi_ioctl.c index 4e259fcb08af..2bdc5c09e916 100644 --- a/sys/dev/smartpqi/smartpqi_ioctl.c +++ b/sys/dev/smartpqi/smartpqi_ioctl.c @@ -1,423 +1,420 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Management interface for smartpqi driver */ #include "smartpqi_includes.h" /* * Wrapper function to copy to user from kernel */ int os_copy_to_user(struct pqisrc_softstate *softs, void *dest_buf, void *src_buf, int size, int mode) { return(copyout(src_buf, dest_buf, size)); } /* * Wrapper function to copy from user to kernel */ int os_copy_from_user(struct pqisrc_softstate *softs, void *dest_buf, void *src_buf, int size, int mode) { return(copyin(src_buf, dest_buf, size)); } /* - * Device open function for ioctl entry + * Device open function for ioctl entry */ static int smartpqi_open(struct cdev *cdev, int flags, int devtype, struct thread *td) { return BSD_SUCCESS; } /* * Device close function for ioctl entry */ static int smartpqi_close(struct cdev *cdev, int flags, int devtype, struct thread *td) { return BSD_SUCCESS; } /* * ioctl for getting driver info */ static void smartpqi_get_driver_info_ioctl(caddr_t udata, struct cdev *cdev) { struct pqisrc_softstate *softs = cdev->si_drv1; pdriver_info driver_info = (pdriver_info)udata; DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev); - driver_info->major_version = PQISRC_OS_VERSION; - driver_info->minor_version = PQISRC_FEATURE_VERSION; - driver_info->release_version = PQISRC_PATCH_VERSION; - driver_info->build_revision = PQISRC_BUILD_VERSION; + driver_info->major_version = PQISRC_DRIVER_MAJOR; + driver_info->minor_version = PQISRC_DRIVER_MINOR; + driver_info->release_version = PQISRC_DRIVER_RELEASE; + driver_info->build_revision = PQISRC_DRIVER_REVISION; driver_info->max_targets = PQI_MAX_DEVICES - 1; driver_info->max_io = softs->max_io_for_scsi_ml; driver_info->max_transfer_length = softs->pqi_cap.max_transfer_size; DBG_FUNC("OUT\n"); } /* * ioctl for getting controller info */ static void smartpqi_get_pci_info_ioctl(caddr_t udata, struct cdev *cdev) { struct pqisrc_softstate *softs = cdev->si_drv1; device_t dev = softs->os_specific.pqi_dev; pqi_pci_info_t *pci_info = (pqi_pci_info_t *)udata; uint32_t sub_vendor = 0; uint32_t sub_device = 0; uint32_t vendor = 0; uint32_t device = 0; DBG_FUNC("IN udata = %p cdev = %p\n", udata, cdev); pci_info->bus = pci_get_bus(dev); pci_info->dev_fn = pci_get_function(dev); pci_info->domain = pci_get_domain(dev); sub_vendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); sub_device = pci_read_config(dev, PCIR_SUBDEV_0, 2); pci_info->board_id = ((sub_device << 16) & 0xffff0000) | sub_vendor; vendor = pci_get_vendor(dev); device = pci_get_device(dev); pci_info->chip_id = ((device << 16) & 0xffff0000) | vendor; + DBG_FUNC("OUT\n"); } static inline int pqi_status_to_bsd_ioctl_status(int pqi_status) { if (PQI_STATUS_SUCCESS == pqi_status) return BSD_SUCCESS; else return EIO; } /* * ioctl entry point for user */ static int smartpqi_ioctl(struct cdev *cdev, u_long cmd, caddr_t udata, int flags, struct thread *td) { int bsd_status, pqi_status; struct pqisrc_softstate *softs = cdev->si_drv1; DBG_FUNC("IN cmd = 0x%lx udata = %p cdev = %p\n", cmd, udata, cdev); if (!udata) { DBG_ERR("udata is null !!\n"); return EINVAL; } if (pqisrc_ctrl_offline(softs)){ return ENOTTY; } switch (cmd) { case CCISS_GETDRIVVER: smartpqi_get_driver_info_ioctl(udata, cdev); bsd_status = BSD_SUCCESS; break; case CCISS_GETPCIINFO: smartpqi_get_pci_info_ioctl(udata, cdev); bsd_status = BSD_SUCCESS; break; case SMARTPQI_PASS_THRU: case CCISS_PASSTHRU: pqi_status = pqisrc_passthru_ioctl(softs, udata, 0); bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status); break; case CCISS_REGNEWD: pqi_status = pqisrc_scan_devices(softs); bsd_status = pqi_status_to_bsd_ioctl_status(pqi_status); break; default: DBG_WARN( "!IOCTL cmd 0x%lx not supported\n", cmd); bsd_status = ENOTTY; break; } DBG_FUNC("OUT error = %d\n", bsd_status); + return bsd_status; } static struct cdevsw smartpqi_cdevsw = { .d_version = D_VERSION, .d_open = smartpqi_open, .d_close = smartpqi_close, .d_ioctl = smartpqi_ioctl, .d_name = "smartpqi", }; /* * Function to create device node for ioctl */ int create_char_dev(struct pqisrc_softstate *softs, int card_index) { int error = BSD_SUCCESS; DBG_FUNC("IN idx = %d\n", card_index); softs->os_specific.cdev = make_dev(&smartpqi_cdevsw, card_index, UID_ROOT, GID_OPERATOR, 0640, "smartpqi%u", card_index); if(softs->os_specific.cdev) { softs->os_specific.cdev->si_drv1 = softs; } else { error = ENXIO; } DBG_FUNC("OUT error = %d\n", error); return error; } /* * Function to destroy device node for ioctl */ void destroy_char_dev(struct pqisrc_softstate *softs) { DBG_FUNC("IN\n"); if (softs->os_specific.cdev) { destroy_dev(softs->os_specific.cdev); softs->os_specific.cdev = NULL; } DBG_FUNC("OUT\n"); } /* * Function used to send passthru commands to adapter * to support management tools. For eg. ssacli, sscon. */ int pqisrc_passthru_ioctl(struct pqisrc_softstate *softs, void *arg, int mode) { - int ret = PQI_STATUS_SUCCESS; + int ret; char *drv_buf = NULL; uint32_t tag = 0; IOCTL_Command_struct *iocommand = (IOCTL_Command_struct *)arg; dma_mem_t ioctl_dma_buf; pqisrc_raid_req_t request; raid_path_error_info_elem_t error_info; ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; rcb_t *rcb = NULL; memset(&request, 0, sizeof(request)); memset(&error_info, 0, sizeof(error_info)); - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); if (pqisrc_ctrl_offline(softs)) return PQI_STATUS_FAILURE; if (!arg) - return (PQI_STATUS_FAILURE); + return PQI_STATUS_FAILURE; if (iocommand->buf_size < 1 && iocommand->Request.Type.Direction != PQIIOCTL_NONE) return PQI_STATUS_FAILURE; - if (iocommand->Request.CDBLen > sizeof(request.cdb)) + if (iocommand->Request.CDBLen > sizeof(request.cmd.cdb)) return PQI_STATUS_FAILURE; switch (iocommand->Request.Type.Direction) { case PQIIOCTL_NONE: case PQIIOCTL_WRITE: case PQIIOCTL_READ: case PQIIOCTL_BIDIRECTIONAL: break; default: return PQI_STATUS_FAILURE; } if (iocommand->buf_size > 0) { memset(&ioctl_dma_buf, 0, sizeof(struct dma_mem)); - ioctl_dma_buf.tag = "Ioctl_PassthruCmd_Buffer"; + os_strlcpy(ioctl_dma_buf.tag, "Ioctl_PassthruCmd_Buffer", sizeof(ioctl_dma_buf.tag)); ioctl_dma_buf.size = iocommand->buf_size; ioctl_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN; /* allocate memory */ ret = os_dma_mem_alloc(softs, &ioctl_dma_buf); if (ret) { DBG_ERR("Failed to Allocate dma mem for Ioctl PassthruCmd Buffer : %d\n", ret); - ret = PQI_STATUS_FAILURE; goto out; } - DBG_INFO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr); - DBG_INFO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr); + DBG_IO("ioctl_dma_buf.dma_addr = %p\n",(void*)ioctl_dma_buf.dma_addr); + DBG_IO("ioctl_dma_buf.virt_addr = %p\n",(void*)ioctl_dma_buf.virt_addr); drv_buf = (char *)ioctl_dma_buf.virt_addr; if (iocommand->Request.Type.Direction & PQIIOCTL_WRITE) { - if ((ret = os_copy_from_user(softs, (void *)drv_buf, - (void *)iocommand->buf, - iocommand->buf_size, mode)) != 0) { - ret = PQI_STATUS_FAILURE; + ret = os_copy_from_user(softs, (void *)drv_buf, (void *)iocommand->buf, iocommand->buf_size, mode); + if (ret != 0) { goto free_mem; } } } request.header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; request.header.iu_length = offsetof(pqisrc_raid_req_t, sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH; memcpy(request.lun_number, iocommand->LUN_info.LunAddrBytes, sizeof(request.lun_number)); - memcpy(request.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen); + memcpy(request.cmd.cdb, iocommand->Request.CDB, iocommand->Request.CDBLen); request.additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; switch (iocommand->Request.Type.Direction) { case PQIIOCTL_NONE: request.data_direction = SOP_DATA_DIR_NONE; break; case PQIIOCTL_WRITE: request.data_direction = SOP_DATA_DIR_FROM_DEVICE; break; case PQIIOCTL_READ: request.data_direction = SOP_DATA_DIR_TO_DEVICE; break; case PQIIOCTL_BIDIRECTIONAL: request.data_direction = SOP_DATA_DIR_BIDIRECTIONAL; break; } request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; if (iocommand->buf_size > 0) { request.buffer_length = iocommand->buf_size; request.sg_descriptors[0].addr = ioctl_dma_buf.dma_addr; request.sg_descriptors[0].len = iocommand->buf_size; request.sg_descriptors[0].flags = SG_FLAG_LAST; } tag = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == tag) { DBG_ERR("Tag not available\n"); - ret = PQI_STATUS_FAILURE; goto free_mem; } request.request_id = tag; request.response_queue_id = ob_q->q_id; request.error_index = request.request_id; if (softs->timeout_in_passthrough) { request.timeout_in_sec = iocommand->Request.Timeout; } rcb = &softs->rcb[tag]; rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success; rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error; rcb->tag = tag; rcb->req_pending = true; /* Submit Command */ ret = pqisrc_submit_cmnd(softs, ib_q, &request); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_out; } - ret = pqisrc_wait_on_condition(softs, rcb, - PQISRC_PASSTHROUGH_CMD_TIMEOUT); + ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_PASSTHROUGH_CMD_TIMEOUT); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Passthru IOCTL cmd timed out !!\n"); goto err_out; } memset(&iocommand->error_info, 0, sizeof(iocommand->error_info)); if (rcb->status) { size_t sense_data_length; memcpy(&error_info, rcb->error_info, sizeof(error_info)); iocommand->error_info.ScsiStatus = error_info.status; sense_data_length = error_info.sense_data_len; if (!sense_data_length) sense_data_length = error_info.resp_data_len; if (sense_data_length && (sense_data_length > sizeof(error_info.data))) sense_data_length = sizeof(error_info.data); if (sense_data_length) { if (sense_data_length > sizeof(iocommand->error_info.SenseInfo)) sense_data_length = sizeof(iocommand->error_info.SenseInfo); memcpy (iocommand->error_info.SenseInfo, error_info.data, sense_data_length); iocommand->error_info.SenseLen = sense_data_length; } - if (error_info.data_out_result == - PQI_RAID_DATA_IN_OUT_UNDERFLOW){ - rcb->status = REQUEST_SUCCESS; + if (error_info.data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) { + rcb->status = PQI_STATUS_SUCCESS; } } - if (rcb->status == REQUEST_SUCCESS && iocommand->buf_size > 0 && + if (rcb->status == PQI_STATUS_SUCCESS && iocommand->buf_size > 0 && (iocommand->Request.Type.Direction & PQIIOCTL_READ)) { - if ((ret = os_copy_to_user(softs, (void*)iocommand->buf, - (void*)drv_buf, iocommand->buf_size, mode)) != 0) { - DBG_ERR("Failed to copy the response\n"); - goto err_out; + ret = os_copy_to_user(softs, (void*)iocommand->buf, (void*)drv_buf, iocommand->buf_size, mode); + if (ret != 0) { + DBG_ERR("Failed to copy the response\n"); + goto err_out; } } os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, request.request_id); if (iocommand->buf_size > 0) - os_dma_mem_free(softs,&ioctl_dma_buf); + os_dma_mem_free(softs,&ioctl_dma_buf); DBG_FUNC("OUT\n"); - return ret; + return PQI_STATUS_SUCCESS; + err_out: os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, request.request_id); free_mem: if (iocommand->buf_size > 0) os_dma_mem_free(softs, &ioctl_dma_buf); out: DBG_FUNC("Failed OUT\n"); return PQI_STATUS_FAILURE; } diff --git a/sys/dev/smartpqi/smartpqi_ioctl.h b/sys/dev/smartpqi/smartpqi_ioctl.h index cd67135cc821..633465aeb8cd 100644 --- a/sys/dev/smartpqi/smartpqi_ioctl.h +++ b/sys/dev/smartpqi/smartpqi_ioctl.h @@ -1,141 +1,141 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _PQI_IOCTL_H_ #define _PQI_IOCTL_H_ /* IOCTL passthrough macros and structures */ #define SENSEINFOBYTES 32 /* note that this value may vary - between host implementations */ + between host implementations */ /* transfer direction */ #define PQIIOCTL_NONE 0x00 #define PQIIOCTL_WRITE 0x01 #define PQIIOCTL_READ 0x02 #define PQIIOCTL_BIDIRECTIONAL (PQIIOCTL_READ | PQIIOCTL_WRITE) /* Type defs used in the following structs */ #define BYTE uint8_t #define WORD uint16_t #define HWORD uint16_t #define DWORD uint32_t /* Command List Structure */ typedef union _SCSI3Addr_struct { struct { BYTE Dev; BYTE Bus:6; BYTE Mode:2; /* b00 */ } PeripDev; struct { BYTE DevLSB; BYTE DevMSB:6; BYTE Mode:2; /* b01 */ } LogDev; struct { BYTE Dev:5; BYTE Bus:3; BYTE Targ:6; BYTE Mode:2; /* b10 */ } LogUnit; }OS_ATTRIBUTE_PACKED SCSI3Addr_struct; typedef struct _PhysDevAddr_struct { DWORD TargetId:24; DWORD Bus:6; DWORD Mode:2; SCSI3Addr_struct Target[2]; /* 2 level target device addr */ }OS_ATTRIBUTE_PACKED PhysDevAddr_struct; typedef struct _LogDevAddr_struct { DWORD VolId:30; DWORD Mode:2; BYTE reserved[4]; }OS_ATTRIBUTE_PACKED LogDevAddr_struct; typedef union _LUNAddr_struct { BYTE LunAddrBytes[8]; SCSI3Addr_struct SCSI3Lun[4]; PhysDevAddr_struct PhysDev; LogDevAddr_struct LogDev; }OS_ATTRIBUTE_PACKED LUNAddr_struct; typedef struct _RequestBlock_struct { BYTE CDBLen; struct { BYTE Type:3; BYTE Attribute:3; BYTE Direction:2; } Type; HWORD Timeout; BYTE CDB[16]; }OS_ATTRIBUTE_PACKED RequestBlock_struct; typedef union _MoreErrInfo_struct{ struct { BYTE Reserved[3]; BYTE Type; DWORD ErrorInfo; } Common_Info; struct{ BYTE Reserved[2]; BYTE offense_size; /* size of offending entry */ BYTE offense_num; /* byte # of offense 0-base */ DWORD offense_value; } Invalid_Cmd; }OS_ATTRIBUTE_PACKED MoreErrInfo_struct; typedef struct _ErrorInfo_struct { BYTE ScsiStatus; BYTE SenseLen; HWORD CommandStatus; DWORD ResidualCnt; MoreErrInfo_struct MoreErrInfo; BYTE SenseInfo[SENSEINFOBYTES]; }OS_ATTRIBUTE_PACKED ErrorInfo_struct; typedef struct pqi_ioctl_passthruCmd_struct { LUNAddr_struct LUN_info; RequestBlock_struct Request; ErrorInfo_struct error_info; WORD buf_size; /* size in bytes of the buf */ passthru_buf_type_t buf; }OS_ATTRIBUTE_PACKED IOCTL_Command_struct; #endif /* _PQI_IOCTL_H_ */ diff --git a/sys/dev/smartpqi/smartpqi_main.c b/sys/dev/smartpqi/smartpqi_main.c index fe75b2ae14ce..e79a6f0a173f 100644 --- a/sys/dev/smartpqi/smartpqi_main.c +++ b/sys/dev/smartpqi/smartpqi_main.c @@ -1,586 +1,833 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for the Microsemi Smart storage controllers */ #include "smartpqi_includes.h" -#include "smartpqi_prototypes.h" CTASSERT(BSD_SUCCESS == PQI_STATUS_SUCCESS); /* * Supported devices */ struct pqi_ident { u_int16_t vendor; u_int16_t device; u_int16_t subvendor; u_int16_t subdevice; int hwif; char *desc; } pqi_identifiers[] = { /* (MSCC PM8205 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x600, PQI_HWIF_SRCV, "P408i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x601, PQI_HWIF_SRCV, "P408e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x602, PQI_HWIF_SRCV, "P408i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x603, PQI_HWIF_SRCV, "P408i-c SR Gen10"}, {0x9005, 0x028f, 0x1028, 0x1FE0, PQI_HWIF_SRCV, "SmartRAID 3162-8i/eDell"}, {0x9005, 0x028f, 0x9005, 0x608, PQI_HWIF_SRCV, "SmartRAID 3162-8i/e"}, {0x9005, 0x028f, 0x103c, 0x609, PQI_HWIF_SRCV, "P408i-sb SR G10"}, /* (MSCC PM8225 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x650, PQI_HWIF_SRCV, "E208i-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x651, PQI_HWIF_SRCV, "E208e-p SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x652, PQI_HWIF_SRCV, "E208i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x654, PQI_HWIF_SRCV, "E208i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x655, PQI_HWIF_SRCV, "P408e-m SR Gen10"}, + {0x9005, 0x028f, 0x9005, 0x659, PQI_HWIF_SRCV, "2100C8iOXS"}, /* (MSCC PM8221 8x12G based) */ {0x9005, 0x028f, 0x103c, 0x700, PQI_HWIF_SRCV, "P204i-c SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x701, PQI_HWIF_SRCV, "P204i-b SR Gen10"}, {0x9005, 0x028f, 0x193d, 0x1104, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-2GB"}, {0x9005, 0x028f, 0x193d, 0x1106, PQI_HWIF_SRCV, "UN RAID P2404-Mf-4i-1GB"}, {0x9005, 0x028f, 0x193d, 0x1108, PQI_HWIF_SRCV, "UN RAID P4408-Ma-8i-2GB"}, - + {0x9005, 0x028f, 0x193d, 0x1109, PQI_HWIF_SRCV, "UN RAID P4408-Mr-8i-2GB"}, /* (MSCC PM8204 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x800, PQI_HWIF_SRCV, "SmartRAID 3154-8i"}, {0x9005, 0x028f, 0x9005, 0x801, PQI_HWIF_SRCV, "SmartRAID 3152-8i"}, {0x9005, 0x028f, 0x9005, 0x802, PQI_HWIF_SRCV, "SmartRAID 3151-4i"}, {0x9005, 0x028f, 0x9005, 0x803, PQI_HWIF_SRCV, "SmartRAID 3101-4i"}, {0x9005, 0x028f, 0x9005, 0x804, PQI_HWIF_SRCV, "SmartRAID 3154-8e"}, {0x9005, 0x028f, 0x9005, 0x805, PQI_HWIF_SRCV, "SmartRAID 3102-8i"}, {0x9005, 0x028f, 0x9005, 0x806, PQI_HWIF_SRCV, "SmartRAID 3100"}, {0x9005, 0x028f, 0x9005, 0x807, PQI_HWIF_SRCV, "SmartRAID 3162-8i"}, {0x9005, 0x028f, 0x152d, 0x8a22, PQI_HWIF_SRCV, "QS-8204-8i"}, {0x9005, 0x028f, 0x193d, 0xf460, PQI_HWIF_SRCV, "UN RAID P460-M4"}, {0x9005, 0x028f, 0x193d, 0xf461, PQI_HWIF_SRCV, "UN RAID P460-B4"}, - {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "INSPUR PM8204-2GB"}, - {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "INSPUR PM8204-4GB"}, + {0x9005, 0x028f, 0x1bd4, 0x004b, PQI_HWIF_SRCV, "PM8204-2GB"}, + {0x9005, 0x028f, 0x1bd4, 0x004c, PQI_HWIF_SRCV, "PM8204-4GB"}, {0x9005, 0x028f, 0x193d, 0x1105, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-2GB"}, {0x9005, 0x028f, 0x193d, 0x1107, PQI_HWIF_SRCV, "UN RAID P4408-Mf-8i-4GB"}, {0x9005, 0x028f, 0x1d8d, 0x800, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8204-8i"}, {0x9005, 0x028f, 0x9005, 0x0808, PQI_HWIF_SRCV, "SmartRAID 3101E-4i"}, {0x9005, 0x028f, 0x9005, 0x0809, PQI_HWIF_SRCV, "SmartRAID 3102E-8i"}, {0x9005, 0x028f, 0x9005, 0x080a, PQI_HWIF_SRCV, "SmartRAID 3152-8i/N"}, + {0x9005, 0x028f, 0x1cc4, 0x0101, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8204"}, /* (MSCC PM8222 8x12G based) */ {0x9005, 0x028f, 0x9005, 0x900, PQI_HWIF_SRCV, "SmartHBA 2100-8i"}, {0x9005, 0x028f, 0x9005, 0x901, PQI_HWIF_SRCV, "SmartHBA 2100-4i"}, {0x9005, 0x028f, 0x9005, 0x902, PQI_HWIF_SRCV, "HBA 1100-8i"}, {0x9005, 0x028f, 0x9005, 0x903, PQI_HWIF_SRCV, "HBA 1100-4i"}, {0x9005, 0x028f, 0x9005, 0x904, PQI_HWIF_SRCV, "SmartHBA 2100-8e"}, {0x9005, 0x028f, 0x9005, 0x905, PQI_HWIF_SRCV, "HBA 1100-8e"}, {0x9005, 0x028f, 0x9005, 0x906, PQI_HWIF_SRCV, "SmartHBA 2100-4i4e"}, {0x9005, 0x028f, 0x9005, 0x907, PQI_HWIF_SRCV, "HBA 1100"}, {0x9005, 0x028f, 0x9005, 0x908, PQI_HWIF_SRCV, "SmartHBA 2100"}, {0x9005, 0x028f, 0x9005, 0x90a, PQI_HWIF_SRCV, "SmartHBA 2100A-8i"}, {0x9005, 0x028f, 0x193d, 0x8460, PQI_HWIF_SRCV, "UN HBA H460-M1"}, {0x9005, 0x028f, 0x193d, 0x8461, PQI_HWIF_SRCV, "UN HBA H460-B1"}, {0x9005, 0x028f, 0x193d, 0xc460, PQI_HWIF_SRCV, "UN RAID P460-M2"}, {0x9005, 0x028f, 0x193d, 0xc461, PQI_HWIF_SRCV, "UN RAID P460-B2"}, - {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "INSPUR PM8222-SHBA"}, + {0x9005, 0x028f, 0x1bd4, 0x004a, PQI_HWIF_SRCV, "PM8222-SHBA"}, {0x9005, 0x028f, 0x13fe, 0x8312, PQI_HWIF_SRCV, "MIC-8312BridgeB"}, - {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "INSPUR PM8222-HBA"}, + {0x9005, 0x028f, 0x1bd4, 0x004f, PQI_HWIF_SRCV, "PM8222-HBA"}, {0x9005, 0x028f, 0x1d8d, 0x908, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8222-8i"}, - {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "INSPUR RS0800M5E8i"}, - {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "INSPUR RS0800M5H8i"}, + {0x9005, 0x028f, 0x1bd4, 0x006C, PQI_HWIF_SRCV, "RS0800M5E8i"}, + {0x9005, 0x028f, 0x1bd4, 0x006D, PQI_HWIF_SRCV, "RS0800M5H8i"}, + {0x9005, 0x028f, 0x1cc4, 0x0201, PQI_HWIF_SRCV, "Ramaxel FBGF-RAD PM8222"}, /* (SRCx MSCC FVB 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1001, PQI_HWIF_SRCV, "MSCC FVB"}, /* (MSCC PM8241 24x12G based) */ /* (MSCC PM8242 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a37, PQI_HWIF_SRCV, "QS-8242-24i"}, {0x9005, 0x028f, 0x9005, 0x1300, PQI_HWIF_SRCV, "HBA 1100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1301, PQI_HWIF_SRCV, "HBA 1100-24i"}, {0x9005, 0x028f, 0x9005, 0x1302, PQI_HWIF_SRCV, "SmartHBA 2100-8i8e"}, {0x9005, 0x028f, 0x9005, 0x1303, PQI_HWIF_SRCV, "SmartHBA 2100-24i"}, {0x9005, 0x028f, 0x105b, 0x1321, PQI_HWIF_SRCV, "8242-24i"}, - {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8242-24i"}, + {0x9005, 0x028f, 0x1bd4, 0x0045, PQI_HWIF_SRCV, "SMART-HBA 8242-24i"}, + {0x9005, 0x028f, 0x1bd4, 0x006B, PQI_HWIF_SRCV, "RS0800M5H24i"}, + {0x9005, 0x028f, 0x1bd4, 0x0070, PQI_HWIF_SRCV, "RS0800M5E24i"}, /* (MSCC PM8236 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a24, PQI_HWIF_SRCV, "QS-8236-16i"}, {0x9005, 0x028f, 0x9005, 0x1380, PQI_HWIF_SRCV, "SmartRAID 3154-16i"}, - {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "INSPUR RAID 8236-16i"}, + {0x9005, 0x028f, 0x1bd4, 0x0046, PQI_HWIF_SRCV, "RAID 8236-16i"}, {0x9005, 0x028f, 0x1d8d, 0x806, PQI_HWIF_SRCV, "Fiberhome SmartRAID AIS-8236-16i"}, + {0x9005, 0x028f, 0x1cf2, 0x0B27, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B-18i 4G"}, + {0x9005, 0x028f, 0x1cf2, 0x0B45, PQI_HWIF_SRCV, "ZTE SmartROC3100 SDPSA/B_L-18i 2G"}, + {0x9005, 0x028f, 0x1cf2, 0x5445, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241-18i 2G"}, + {0x9005, 0x028f, 0x1cf2, 0x5446, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242-18i 4G"}, {0x9005, 0x028f, 0x1cf2, 0x5449, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS241-18i 2G"}, {0x9005, 0x028f, 0x1cf2, 0x544A, PQI_HWIF_SRCV, "ZTE SmartROC3100 RS242-18i 4G"}, {0x9005, 0x028f, 0x1cf2, 0x544D, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM241B-18i 2G"}, {0x9005, 0x028f, 0x1cf2, 0x544E, PQI_HWIF_SRCV, "ZTE SmartROC3100 RM242B-18i 4G"}, + {0x9005, 0x028f, 0x1bd4, 0x006F, PQI_HWIF_SRCV, "RS0804M5R16i"}, + + /* (MSCC PM8237 24x12G based) */ {0x9005, 0x028f, 0x103c, 0x1100, PQI_HWIF_SRCV, "P816i-a SR Gen10"}, {0x9005, 0x028f, 0x103c, 0x1101, PQI_HWIF_SRCV, "P416ie-m SR G10"}, /* (MSCC PM8238 16x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a23, PQI_HWIF_SRCV, "QS-8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1280, PQI_HWIF_SRCV, "HBA 1100-16i"}, {0x9005, 0x028f, 0x9005, 0x1281, PQI_HWIF_SRCV, "HBA 1100-16e"}, {0x9005, 0x028f, 0x105b, 0x1211, PQI_HWIF_SRCV, "8238-16i"}, - {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "INSPUR SMART-HBA 8238-16i"}, + {0x9005, 0x028f, 0x1bd4, 0x0048, PQI_HWIF_SRCV, "SMART-HBA 8238-16i"}, {0x9005, 0x028f, 0x9005, 0x1282, PQI_HWIF_SRCV, "SmartHBA 2100-16i"}, {0x9005, 0x028f, 0x1d8d, 0x916, PQI_HWIF_SRCV, "Fiberhome SmartHBA AIS-8238-16i"}, {0x9005, 0x028f, 0x1458, 0x1000, PQI_HWIF_SRCV, "GIGABYTE SmartHBA CLN1832"}, + {0x9005, 0x028f, 0x1cf2, 0x0B29, PQI_HWIF_SRCV, "ZTE SmartIOC2100 SDPSA/B_I-18i"}, + {0x9005, 0x028f, 0x1cf2, 0x5447, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243-18i"}, + {0x9005, 0x028f, 0x1cf2, 0x544B, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RS243-18i"}, {0x9005, 0x028f, 0x1cf2, 0x544F, PQI_HWIF_SRCV, "ZTE SmartIOC2100 RM243B-18i"}, + {0x9005, 0x028f, 0x1bd4, 0x0071, PQI_HWIF_SRCV, "RS0800M5H16i"}, + {0x9005, 0x028f, 0x1bd4, 0x0072, PQI_HWIF_SRCV, "RS0800M5E16i"}, /* (MSCC PM8240 24x12G based) */ {0x9005, 0x028f, 0x152d, 0x8a36, PQI_HWIF_SRCV, "QS-8240-24i"}, {0x9005, 0x028f, 0x9005, 0x1200, PQI_HWIF_SRCV, "SmartRAID 3154-24i"}, {0x9005, 0x028f, 0x9005, 0x1201, PQI_HWIF_SRCV, "SmartRAID 3154-8i16e"}, {0x9005, 0x028f, 0x9005, 0x1202, PQI_HWIF_SRCV, "SmartRAID 3154-8i8e"}, - {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "INSPUR RAID 8240-24i"}, + {0x9005, 0x028f, 0x1bd4, 0x0047, PQI_HWIF_SRCV, "RAID 8240-24i"}, + {0x9005, 0x028f, 0x1dfc, 0x3161, PQI_HWIF_SRCV, "NTCOM SAS3 RAID-24i"}, {0x9005, 0x028f, 0x1F0C, 0x3161, PQI_HWIF_SRCV, "NT RAID 3100-24i"}, /* Huawei ID's */ {0x9005, 0x028f, 0x19e5, 0xd227, PQI_HWIF_SRCV, "SR465C-M 4G"}, {0x9005, 0x028f, 0x19e5, 0xd22a, PQI_HWIF_SRCV, "SR765-M"}, {0x9005, 0x028f, 0x19e5, 0xd228, PQI_HWIF_SRCV, "SR455C-M 2G"}, {0x9005, 0x028f, 0x19e5, 0xd22c, PQI_HWIF_SRCV, "SR455C-M 4G"}, {0x9005, 0x028f, 0x19e5, 0xd229, PQI_HWIF_SRCV, "SR155-M"}, {0x9005, 0x028f, 0x19e5, 0xd22b, PQI_HWIF_SRCV, "SR455C-ME 4G"}, + + /* (MSCC PM8252 8x12G based) */ + {0x9005, 0x028f, 0x193d, 0x110b, PQI_HWIF_SRCV, "UN HBA H4508-Mf-8i"}, + {0x9005, 0x028f, 0x1bd4, 0x0052, PQI_HWIF_SRCV, "MT0801M6E"}, + {0x9005, 0x028f, 0x1bd4, 0x0054, PQI_HWIF_SRCV, "MT0800M6H"}, + {0x9005, 0x028f, 0x1bd4, 0x0086, PQI_HWIF_SRCV, "RT0800M7E"}, + {0x9005, 0x028f, 0x1bd4, 0x0087, PQI_HWIF_SRCV, "RT0800M7H"}, + {0x9005, 0x028f, 0x1f51, 0x1001, PQI_HWIF_SRCV, "SmartHBA P6600-8i"}, + {0x9005, 0x028f, 0x1f51, 0x1003, PQI_HWIF_SRCV, "SmartHBA P6600-8e"}, + {0x9005, 0x028f, 0x9005, 0x1460, PQI_HWIF_SRCV, "HBA 1200"}, + {0x9005, 0x028f, 0x9005, 0x1461, PQI_HWIF_SRCV, "SmartHBA 2200"}, + {0x9005, 0x028f, 0x9005, 0x1462, PQI_HWIF_SRCV, "HBA 1200-8i"}, + /* (MSCC PM8254 32x12G based) */ + {0x9005, 0x028f, 0x1bd4, 0x0051, PQI_HWIF_SRCV, "MT0804M6R"}, + {0x9005, 0x028f, 0x1bd4, 0x0053, PQI_HWIF_SRCV, "MT0808M6R"}, + {0x9005, 0x028f, 0x1bd4, 0x0088, PQI_HWIF_SRCV, "RT0804M7R"}, + {0x9005, 0x028f, 0x1bd4, 0x0089, PQI_HWIF_SRCV, "RT0808M7R"}, + {0x9005, 0x028f, 0x1f51, 0x1002, PQI_HWIF_SRCV, "SmartRAID P7604-8i"}, + {0x9005, 0x028f, 0x1f51, 0x1004, PQI_HWIF_SRCV, "SmartRAID P7604-8e"}, + {0x9005, 0x028f, 0x9005, 0x14a0, PQI_HWIF_SRCV, "SmartRAID 3254-8i"}, + {0x9005, 0x028f, 0x9005, 0x14a1, PQI_HWIF_SRCV, "SmartRAID 3204-8i"}, {0x9005, 0x028f, 0x9005, 0x14a2, PQI_HWIF_SRCV, "SmartRAID 3252-8i"}, {0x9005, 0x028f, 0x9005, 0x14a4, PQI_HWIF_SRCV, "SmartRAID 3254-8i /e"}, {0x9005, 0x028f, 0x9005, 0x14a5, PQI_HWIF_SRCV, "SmartRAID 3252-8i /e"}, {0x9005, 0x028f, 0x9005, 0x14a6, PQI_HWIF_SRCV, "SmartRAID 3204-8i /e"}, -/* (MSCC PM8265 16x12G based) */ + + /* (MSCC PM8262 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14c0, PQI_HWIF_SRCV, "SmartHBA 2200-16i"}, + {0x9005, 0x028f, 0x9005, 0x14c1, PQI_HWIF_SRCV, "HBA 1200-16i"}, + {0x9005, 0x028f, 0x9005, 0x14c3, PQI_HWIF_SRCV, "HBA 1200-16e"}, + {0x9005, 0x028f, 0x9005, 0x14c4, PQI_HWIF_SRCV, "HBA 1200-8e"}, + {0x9005, 0x028f, 0x1f51, 0x1005, PQI_HWIF_SRCV, "SmartHBA P6600-16i"}, + {0x9005, 0x028f, 0x1f51, 0x1007, PQI_HWIF_SRCV, "SmartHBA P6600-8i8e"}, + {0x9005, 0x028f, 0x1f51, 0x1009, PQI_HWIF_SRCV, "SmartHBA P6600-16e"}, + {0x9005, 0x028f, 0x1cf2, 0x54dc, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RM346-16i"}, + {0x9005, 0x028f, 0x1cf2, 0x0806, PQI_HWIF_SRCV, "ZTE SmartIOC2200 RS346-16i"}, + + /* (MSCC PM8264 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14b0, PQI_HWIF_SRCV, "SmartRAID 3254-16i"}, + {0x9005, 0x028f, 0x9005, 0x14b1, PQI_HWIF_SRCV, "SmartRAID 3258-16i"}, + {0x9005, 0x028f, 0x1f51, 0x1006, PQI_HWIF_SRCV, "SmartRAID P7608-16i"}, + {0x9005, 0x028f, 0x1f51, 0x1008, PQI_HWIF_SRCV, "SmartRAID P7608-8i8e"}, + {0x9005, 0x028f, 0x1f51, 0x100a, PQI_HWIF_SRCV, "SmartRAID P7608-16e"}, + {0x9005, 0x028f, 0x1cf2, 0x54da, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM344-16i 4G"}, + {0x9005, 0x028f, 0x1cf2, 0x54db, PQI_HWIF_SRCV, "ZTE SmartROC3200 RM345-16i 8G"}, + {0x9005, 0x028f, 0x1cf2, 0x0804, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS344-16i 4G"}, + {0x9005, 0x028f, 0x1cf2, 0x0805, PQI_HWIF_SRCV, "ZTE SmartROC3200 RS345-16i 8G"}, + + /* (MSCC PM8265 16x12G based) */ + {0x9005, 0x028f, 0x1590, 0x02dc, PQI_HWIF_SRCV, "SR416i-a Gen10+"}, + {0x9005, 0x028f, 0x9005, 0x1470, PQI_HWIF_SRCV, "SmartRAID 3200"}, + {0x9005, 0x028f, 0x9005, 0x1471, PQI_HWIF_SRCV, "SmartRAID 3254-16i /e"}, + {0x9005, 0x028f, 0x9005, 0x1472, PQI_HWIF_SRCV, "SmartRAID 3258-16i /e"}, + {0x9005, 0x028f, 0x9005, 0x1473, PQI_HWIF_SRCV, "SmartRAID 3284-16io /e/uC"}, {0x9005, 0x028f, 0x9005, 0x1474, PQI_HWIF_SRCV, "SmartRAID 3254-16io /e"}, -/* (MSCC PM8270 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1475, PQI_HWIF_SRCV, "SmartRAID 3254-16e /e"}, + + /* (MSCC PM8266 16x12G based) */ + {0x9005, 0x028f, 0x1014, 0x0718, PQI_HWIF_SRCV, "IBM 4-Port 24G SAS"}, + {0x9005, 0x028f, 0x9005, 0x1490, PQI_HWIF_SRCV, "HBA 1200p Ultra"}, + {0x9005, 0x028f, 0x9005, 0x1491, PQI_HWIF_SRCV, "SmartHBA 2200p Ultra"}, + {0x9005, 0x028f, 0x9005, 0x1402, PQI_HWIF_SRCV, "HBA Ultra 1200P-16i"}, + {0x9005, 0x028f, 0x9005, 0x1441, PQI_HWIF_SRCV, "HBA Ultra 1200P-32i"}, + + /* (MSCC PM8268 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14d0, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i"}, + + /* (MSCC PM8269 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1400, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-16i /e"}, + + /* (MSCC PM8270 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1410, PQI_HWIF_SRCV, "HBA Ultra 1200P-16e"}, + {0x9005, 0x028f, 0x9005, 0x1411, PQI_HWIF_SRCV, "HBA 1200 Ultra"}, + {0x9005, 0x028f, 0x9005, 0x1412, PQI_HWIF_SRCV, "SmartHBA 2200 Ultra"}, {0x9005, 0x028f, 0x9005, 0x1463, PQI_HWIF_SRCV, "SmartHBA 2200-8io /e"}, {0x9005, 0x028f, 0x9005, 0x14c2, PQI_HWIF_SRCV, "SmartHBA 2200-16io /e"}, + + /* (MSCC PM8271 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14e0, PQI_HWIF_SRCV, "SmartIOC PM8271"}, + + /* (MSCC PM8272 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1420, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e"}, + + /* (MSCC PM8273 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1430, PQI_HWIF_SRCV, "SmartRAID Ultra 3254-16e /e"}, + + /* (MSCC PM8274 16x12G based) */ + {0x9005, 0x028f, 0x1e93, 0x1000, PQI_HWIF_SRCV, "ByteHBA JGH43024-8"}, + {0x9005, 0x028f, 0x1e93, 0x1001, PQI_HWIF_SRCV, "ByteHBA JGH43034-8"}, + {0x9005, 0x028f, 0x1e93, 0x1005, PQI_HWIF_SRCV, "ByteHBA JGH43014-8"}, + + /* (MSCC PM8275 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x14f0, PQI_HWIF_SRCV, "SmartIOC PM8275"}, + + /* (MSCC PM8276 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1480, PQI_HWIF_SRCV, "SmartRAID 3200 Ultra"}, + {0x9005, 0x028f, 0x1e93, 0x1002, PQI_HWIF_SRCV, "ByteHBA JGH44014-8"}, + + /* (MSCC PM8278 16x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1440, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i"}, + /* (MSCC PM8279 32x12G based) */ + {0x9005, 0x028f, 0x9005, 0x1450, PQI_HWIF_SRCV, "SmartRAID Ultra 3258P-32i /e"}, + {0x9005, 0x028f, 0x1590, 0x0294, PQI_HWIF_SRCV, "SR932i-p Gen10+"}, {0x9005, 0x028f, 0x1590, 0x0381, PQI_HWIF_SRCV, "SR932i-p Gen11"}, {0x9005, 0x028f, 0x1590, 0x0382, PQI_HWIF_SRCV, "SR308i-p Gen11"}, {0x9005, 0x028f, 0x1590, 0x0383, PQI_HWIF_SRCV, "SR308i-o Gen11"}, {0x9005, 0x028f, 0x1590, 0x02db, PQI_HWIF_SRCV, "SR416ie-m Gen11"}, {0x9005, 0x028f, 0x1590, 0x032e, PQI_HWIF_SRCV, "SR416i-o Gen11"}, + {0x9005, 0x028f, 0x9005, 0x1452, PQI_HWIF_SRCV, "SmartRAID 3200p Ultra"}, + + /* (MSCC HBA/SMARTHBA/CFF SmartRAID - Lenovo 8X12G 16X12G based) */ + {0x9005, 0x028f, 0x1d49, 0x0220, PQI_HWIF_SRCV, "4350-8i SAS/SATA HBA"}, + {0x9005, 0x028f, 0x1d49, 0x0221, PQI_HWIF_SRCV, "4350-16i SAS/SATA HBA"}, + {0x9005, 0x028f, 0x1d49, 0x0520, PQI_HWIF_SRCV, "5350-8i"}, + {0x9005, 0x028f, 0x1d49, 0x0522, PQI_HWIF_SRCV, "5350-8i INTR"}, + {0x9005, 0x028f, 0x1d49, 0x0620, PQI_HWIF_SRCV, "9350-8i 2GB Flash"}, + {0x9005, 0x028f, 0x1d49, 0x0621, PQI_HWIF_SRCV, "9350-8i 2GB Flash INTR"}, + {0x9005, 0x028f, 0x1d49, 0x0622, PQI_HWIF_SRCV, "9350-16i 4GB Flash"}, + {0x9005, 0x028f, 0x1d49, 0x0623, PQI_HWIF_SRCV, "9350-16i 4GB Flash INTR"}, {0, 0, 0, 0, 0, 0} }; struct pqi_ident pqi_family_identifiers[] = { {0x9005, 0x028f, 0, 0, PQI_HWIF_SRCV, "Smart Array Storage Controller"}, {0, 0, 0, 0, 0, 0} }; /* * Function to identify the installed adapter. */ -static struct -pqi_ident *pqi_find_ident(device_t dev) +static struct pqi_ident * +pqi_find_ident(device_t dev) { struct pqi_ident *m; u_int16_t vendid, devid, sub_vendid, sub_devid; + static long AllowWildcards = 0xffffffff; + int result; + +#ifdef DEVICE_HINT + if (AllowWildcards == 0xffffffff) + { + result = resource_long_value("smartpqi", 0, "allow_wildcards", &AllowWildcards); + + /* the default case if the hint is not found is to allow wildcards */ + if (result != DEVICE_HINT_SUCCESS) { + AllowWildcards = 1; + } + } + +#endif vendid = pci_get_vendor(dev); devid = pci_get_device(dev); sub_vendid = pci_get_subvendor(dev); sub_devid = pci_get_subdevice(dev); for (m = pqi_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid) && (m->subvendor == sub_vendid) && (m->subdevice == sub_devid)) { return (m); } } for (m = pqi_family_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid)) { - return (m); + if (AllowWildcards != 0) + { + DBG_NOTE("Controller device ID matched using wildcards\n"); + return (m); + } + else + { + DBG_NOTE("Controller not probed because device ID wildcards are disabled\n") + return (NULL); + } } } return (NULL); } /* * Determine whether this is one of our supported adapters. */ static int smartpqi_probe(device_t dev) { struct pqi_ident *id; if ((id = pqi_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); return(BUS_PROBE_VENDOR); } return(ENXIO); } /* * Store Bus/Device/Function in softs */ void pqisrc_save_controller_info(struct pqisrc_softstate *softs) { device_t dev = softs->os_specific.pqi_dev; softs->bus_id = (uint32_t)pci_get_bus(dev); softs->device_id = (uint32_t)pci_get_device(dev); softs->func_id = (uint32_t)pci_get_function(dev); } +static void read_device_hint_resource(struct pqisrc_softstate *softs, + char *keyword, uint32_t *value) +{ + DBG_FUNC("IN\n"); + + device_t dev = softs->os_specific.pqi_dev; + + if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) { + if (*value) { + /* set resource to 1 for disabling the + * firmware feature in device hint file. */ + *value = 0; + + } + else { + /* set resource to 0 for enabling the + * firmware feature in device hint file. */ + *value = 1; + } + } + else { + /* Enabled by default */ + *value = 1; + } + + DBG_NOTE("SmartPQI Device Hint: %s, Is it enabled = %u\n", keyword, *value); + + DBG_FUNC("OUT\n"); +} + +static void read_device_hint_decimal_value(struct pqisrc_softstate *softs, + char *keyword, uint32_t *value) +{ + DBG_FUNC("IN\n"); + + device_t dev = softs->os_specific.pqi_dev; + + if (resource_long_value("smartpqi", device_get_unit(dev), keyword, (long *)value) == DEVICE_HINT_SUCCESS) { + /* Nothing to do here. Value reads + * directly from Device.Hint file */ + } + else { + /* Set to max to determine the value */ + *value = 0XFFFF; + } + + DBG_FUNC("OUT\n"); +} + +static void smartpqi_read_all_device_hint_file_entries(struct pqisrc_softstate *softs) +{ + uint32_t value = 0; + + DBG_FUNC("IN\n"); + + /* hint.smartpqi.0.stream_disable = "0" */ + read_device_hint_resource(softs, STREAM_DETECTION, &value); + softs->hint.stream_status = value; + + /* hint.smartpqi.0.sata_unique_wwn_disable = "0" */ + read_device_hint_resource(softs, SATA_UNIQUE_WWN, &value); + softs->hint.sata_unique_wwn_status = value; + + /* hint.smartpqi.0.aio_raid1_write_disable = "0" */ + read_device_hint_resource(softs, AIO_RAID1_WRITE_BYPASS, &value); + softs->hint.aio_raid1_write_status = value; + + /* hint.smartpqi.0.aio_raid5_write_disable = "0" */ + read_device_hint_resource(softs, AIO_RAID5_WRITE_BYPASS, &value); + softs->hint.aio_raid5_write_status = value; + + /* hint.smartpqi.0.aio_raid6_write_disable = "0" */ + read_device_hint_resource(softs, AIO_RAID6_WRITE_BYPASS, &value); + softs->hint.aio_raid6_write_status = value; + + /* hint.smartpqi.0.queue_depth = "0" */ + read_device_hint_decimal_value(softs, ADAPTER_QUEUE_DEPTH, &value); + softs->hint.queue_depth = value; + + /* hint.smartpqi.0.sg_count = "0" */ + read_device_hint_decimal_value(softs, SCATTER_GATHER_COUNT, &value); + softs->hint.sg_segments = value; + + /* hint.smartpqi.0.queue_count = "0" */ + read_device_hint_decimal_value(softs, QUEUE_COUNT, &value); + softs->hint.cpu_count = value; + + DBG_FUNC("IN\n"); +} + + /* * Allocate resources for our device, set up the bus interface. * Initialize the PQI related functionality, scan devices, register sim to * upper layer, create management interface device node etc. */ static int smartpqi_attach(device_t dev) { - struct pqisrc_softstate *softs = NULL; + struct pqisrc_softstate *softs; struct pqi_ident *id = NULL; int error = BSD_SUCCESS; u_int32_t command = 0, i = 0; int card_index = device_get_unit(dev); rcb_t *rcbp = NULL; /* * Initialise softc. */ softs = device_get_softc(dev); if (!softs) { printf("Could not get softc\n"); error = EINVAL; goto out; } memset(softs, 0, sizeof(*softs)); softs->os_specific.pqi_dev = dev; DBG_FUNC("IN\n"); /* assume failure is 'not configured' */ error = ENXIO; /* * Verify that the adapter is correctly set up in PCI space. */ pci_enable_busmaster(softs->os_specific.pqi_dev); command = pci_read_config(softs->os_specific.pqi_dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_MEMEN) == 0) { DBG_ERR("memory window not available command = %d\n", command); error = ENXIO; goto out; } /* * Detect the hardware interface version, set up the bus interface * indirection. */ id = pqi_find_ident(dev); if (!id) { DBG_ERR("NULL return value from pqi_find_ident\n"); goto out; } softs->os_specific.pqi_hwif = id->hwif; switch(softs->os_specific.pqi_hwif) { case PQI_HWIF_SRCV: DBG_INFO("set hardware up for PMC SRCv for %p\n", softs); break; default: softs->os_specific.pqi_hwif = PQI_HWIF_UNKNOWN; DBG_ERR("unknown hardware type\n"); error = ENXIO; goto out; } pqisrc_save_controller_info(softs); /* * Allocate the PCI register window. */ softs->os_specific.pqi_regs_rid0 = PCIR_BAR(0); if ((softs->os_specific.pqi_regs_res0 = bus_alloc_resource_any(softs->os_specific.pqi_dev, SYS_RES_MEMORY, &softs->os_specific.pqi_regs_rid0, RF_ACTIVE)) == NULL) { DBG_ERR("couldn't allocate register window 0\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto out; } bus_get_resource_start(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0); softs->pci_mem_handle.pqi_btag = rman_get_bustag(softs->os_specific.pqi_regs_res0); softs->pci_mem_handle.pqi_bhandle = rman_get_bushandle(softs->os_specific.pqi_regs_res0); /* softs->pci_mem_base_vaddr = (uintptr_t)rman_get_virtual(softs->os_specific.pqi_regs_res0); */ softs->pci_mem_base_vaddr = (char *)rman_get_virtual(softs->os_specific.pqi_regs_res0); /* * Allocate the parent bus DMA tag appropriate for our PCI interface. * * Note that some of these controllers are 64-bit capable. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &softs->os_specific.pqi_parent_dmat)) { DBG_ERR("can't allocate parent DMA tag\n"); /* assume failure is 'out of memory' */ error = ENOMEM; goto dma_out; } softs->os_specific.sim_registered = FALSE; softs->os_name = "FreeBSD "; + smartpqi_read_all_device_hint_file_entries(softs); + /* Initialize the PQI library */ error = pqisrc_init(softs); if (error != PQI_STATUS_SUCCESS) { DBG_ERR("Failed to initialize pqi lib error = %d\n", error); error = ENXIO; goto out; } else { error = BSD_SUCCESS; } - mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF); - softs->os_specific.mtx_init = TRUE; - mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF); + mtx_init(&softs->os_specific.cam_lock, "cam_lock", NULL, MTX_DEF); + softs->os_specific.mtx_init = TRUE; + mtx_init(&softs->os_specific.map_lock, "map_lock", NULL, MTX_DEF); - callout_init(&softs->os_specific.wellness_periodic, 1); - callout_init(&softs->os_specific.heartbeat_timeout_id, 1); + callout_init(&softs->os_specific.wellness_periodic, 1); + callout_init(&softs->os_specific.heartbeat_timeout_id, 1); - /* - * Create DMA tag for mapping buffers into controller-addressable space. - */ - if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */ + /* + * Create DMA tag for mapping buffers into controller-addressable space. + */ + if (bus_dma_tag_create(softs->os_specific.pqi_parent_dmat,/* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (bus_size_t)softs->pqi_cap.max_sg_elem*PAGE_SIZE,/* maxsize */ softs->pqi_cap.max_sg_elem, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &softs->os_specific.map_lock, /* lockfuncarg*/ &softs->os_specific.pqi_buffer_dmat)) { DBG_ERR("can't allocate buffer DMA tag for pqi_buffer_dmat\n"); return (ENOMEM); } rcbp = &softs->rcb[1]; for( i = 1; i <= softs->pqi_cap.max_outstanding_io; i++, rcbp++ ) { if ((error = bus_dmamap_create(softs->os_specific.pqi_buffer_dmat, 0, &rcbp->cm_datamap)) != 0) { DBG_ERR("Cant create datamap for buf @" - "rcbp = %p maxio = %d error = %d\n", + "rcbp = %p maxio = %u error = %d\n", rcbp, softs->pqi_cap.max_outstanding_io, error); goto dma_out; } } os_start_heartbeat_timer((void *)softs); /* Start the heart-beat timer */ callout_reset(&softs->os_specific.wellness_periodic, 120 * hz, os_wellness_periodic, softs); error = pqisrc_scan_devices(softs); if (error != PQI_STATUS_SUCCESS) { DBG_ERR("Failed to scan lib error = %d\n", error); error = ENXIO; goto out; } + else { + error = BSD_SUCCESS; + } error = register_sim(softs, card_index); if (error) { DBG_ERR("Failed to register sim index = %d error = %d\n", card_index, error); goto out; } smartpqi_target_rescan(softs); TASK_INIT(&softs->os_specific.event_task, 0, pqisrc_event_worker,softs); error = create_char_dev(softs, card_index); if (error) { DBG_ERR("Failed to register character device index=%d r=%d\n", card_index, error); goto out; } + goto out; dma_out: if (softs->os_specific.pqi_regs_res0 != NULL) bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY, softs->os_specific.pqi_regs_rid0, softs->os_specific.pqi_regs_res0); out: DBG_FUNC("OUT error = %d\n", error); + return(error); } /* * Deallocate resources for our device. */ static int smartpqi_detach(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); int rval = BSD_SUCCESS; DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; /* kill the periodic event */ callout_drain(&softs->os_specific.wellness_periodic); /* Kill the heart beat event */ callout_drain(&softs->os_specific.heartbeat_timeout_id); if (!pqisrc_ctrl_offline(softs)) { rval = pqisrc_flush_cache(softs, PQISRC_NONE_CACHE_FLUSH_ONLY); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to flush adapter cache! rval = %d\n", rval); rval = EIO; + } else { + rval = BSD_SUCCESS; } } destroy_char_dev(softs); pqisrc_uninit(softs); deregister_sim(softs); pci_release_msi(dev); DBG_FUNC("OUT\n"); return rval; } /* * Bring the controller to a quiescent state, ready for system suspend. */ static int smartpqi_suspend(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; DBG_INFO("Suspending the device %p\n", softs); softs->os_specific.pqi_state |= SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return BSD_SUCCESS; } /* * Bring the controller back to a state ready for operation. */ static int smartpqi_resume(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; softs->os_specific.pqi_state &= ~SMART_STATE_SUSPEND; DBG_FUNC("OUT\n"); return BSD_SUCCESS; } /* * Do whatever is needed during a system shutdown. */ static int smartpqi_shutdown(device_t dev) { struct pqisrc_softstate *softs = device_get_softc(dev); int bsd_status = BSD_SUCCESS; int pqi_status; DBG_FUNC("IN\n"); if (softs == NULL) return ENXIO; if (pqisrc_ctrl_offline(softs)) return BSD_SUCCESS; pqi_status = pqisrc_flush_cache(softs, PQISRC_SHUTDOWN); if (pqi_status != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to flush adapter cache! rval = %d\n", pqi_status); bsd_status = EIO; } DBG_FUNC("OUT\n"); - + return bsd_status; } + /* * PCI bus interface. */ static device_method_t pqi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, smartpqi_probe), DEVMETHOD(device_attach, smartpqi_attach), DEVMETHOD(device_detach, smartpqi_detach), DEVMETHOD(device_suspend, smartpqi_suspend), DEVMETHOD(device_resume, smartpqi_resume), DEVMETHOD(device_shutdown, smartpqi_shutdown), { 0, 0 } }; static driver_t smartpqi_pci_driver = { "smartpqi", pqi_methods, sizeof(struct pqisrc_softstate) }; DRIVER_MODULE(smartpqi, pci, smartpqi_pci_driver, 0, 0); MODULE_DEPEND(smartpqi, pci, 1, 1, 1); diff --git a/sys/dev/smartpqi/smartpqi_mem.c b/sys/dev/smartpqi/smartpqi_mem.c index f3fc8dc01d9d..239d619968d2 100644 --- a/sys/dev/smartpqi/smartpqi_mem.c +++ b/sys/dev/smartpqi/smartpqi_mem.c @@ -1,210 +1,210 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" MALLOC_DEFINE(M_SMARTPQI, "smartpqi", "Buffers for the smartpqi driver"); /* * DMA map load callback function */ static void os_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr = (bus_addr_t *)arg; *paddr = segs[0].ds_addr; } int os_dma_setup(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } int os_dma_destroy(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } void os_update_dma_attributes(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); DBG_FUNC("OUT\n"); } /* * DMA mem resource allocation wrapper function */ int os_dma_mem_alloc(pqisrc_softstate_t *softs, struct dma_mem *dma_mem) { int ret = BSD_SUCCESS; /* DBG_FUNC("IN\n"); */ /* Make sure the alignment is at least 4 bytes */ ASSERT(dma_mem->align >= 4); /* DMA memory needed - allocate it */ if ((ret = bus_dma_tag_create( softs->os_specific.pqi_parent_dmat, /* parent */ dma_mem->align, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem->size, /* maxsize */ 1, /* nsegments */ dma_mem->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &dma_mem->dma_tag)) != 0 ) { DBG_ERR("can't allocate DMA tag with error = 0x%x\n", ret); goto err_out; } if (!dma_mem->dma_tag) { DBG_ERR("dma tag is NULL\n"); ret = ENOMEM; goto err_out; } if ((ret = bus_dmamem_alloc(dma_mem->dma_tag, (void **)&dma_mem->virt_addr, BUS_DMA_NOWAIT, &dma_mem->dma_map)) != 0) { DBG_ERR("can't allocate DMA memory for required object \ with error = 0x%x\n", ret); goto err_mem; } if((ret = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, dma_mem->virt_addr, dma_mem->size, os_dma_map, &dma_mem->dma_addr, 0)) != 0) { DBG_ERR("can't load DMA memory for required \ object with error = 0x%x\n", ret); goto err_load; } memset(dma_mem->virt_addr, 0, dma_mem->size); ret = bsd_status_to_pqi_status(ret); /* DBG_FUNC("OUT\n"); */ return ret; err_load: if(dma_mem->virt_addr) bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr, dma_mem->dma_map); err_mem: if(dma_mem->dma_tag) bus_dma_tag_destroy(dma_mem->dma_tag); err_out: DBG_FUNC("failed OUT\n"); ret = bsd_status_to_pqi_status(ret); return ret; } /* * DMA mem resource deallocation wrapper function */ void os_dma_mem_free(pqisrc_softstate_t *softs, struct dma_mem *dma_mem) { /* DBG_FUNC("IN\n"); */ if(dma_mem->dma_addr) { bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); dma_mem->dma_addr = 0; } if(dma_mem->virt_addr) { bus_dmamem_free(dma_mem->dma_tag, dma_mem->virt_addr, dma_mem->dma_map); dma_mem->virt_addr = NULL; } if(dma_mem->dma_tag) { bus_dma_tag_destroy(dma_mem->dma_tag); dma_mem->dma_tag = NULL; } /* DBG_FUNC("OUT\n"); */ } /* * Mem resource allocation wrapper function */ void *os_mem_alloc(pqisrc_softstate_t *softs, size_t size) { - void *addr = NULL; + void *addr; - /* DBG_FUNC("IN\n"); */ + /* DBG_FUNC("IN\n"); */ addr = malloc((unsigned long)size, M_SMARTPQI, M_NOWAIT | M_ZERO); /* DBG_FUNC("OUT\n"); */ return addr; } /* * Mem resource deallocation wrapper function */ void -os_mem_free(pqisrc_softstate_t *softs, char *addr, size_t size) +os_mem_free(pqisrc_softstate_t *softs, void *addr, size_t size) { /* DBG_FUNC("IN\n"); */ free((void*)addr, M_SMARTPQI); /* DBG_FUNC("OUT\n"); */ } /* * dma/bus resource deallocation wrapper function */ void os_resource_free(pqisrc_softstate_t *softs) { if(softs->os_specific.pqi_parent_dmat) bus_dma_tag_destroy(softs->os_specific.pqi_parent_dmat); if (softs->os_specific.pqi_regs_res0 != NULL) bus_release_resource(softs->os_specific.pqi_dev, SYS_RES_MEMORY, - softs->os_specific.pqi_regs_rid0, + softs->os_specific.pqi_regs_rid0, softs->os_specific.pqi_regs_res0); } diff --git a/sys/dev/smartpqi/smartpqi_misc.c b/sys/dev/smartpqi/smartpqi_misc.c index 5072982363ae..20ba4fc11eb7 100644 --- a/sys/dev/smartpqi/smartpqi_misc.c +++ b/sys/dev/smartpqi/smartpqi_misc.c @@ -1,182 +1,312 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" /* - * Populate hostwellness time variables in bcd format from FreeBSD format + * Populate hostwellness time variables in bcd format from FreeBSD format. */ void os_get_time(struct bmic_host_wellness_time *host_wellness_time) { struct timespec ts; - struct clocktime ct; + struct clocktime ct = {0}; getnanotime(&ts); clock_ts_to_ct(&ts, &ct); /* Fill the time In BCD Format */ host_wellness_time->hour= (uint8_t)bin2bcd(ct.hour); host_wellness_time->min = (uint8_t)bin2bcd(ct.min); host_wellness_time->sec= (uint8_t)bin2bcd(ct.sec); host_wellness_time->reserved = 0; host_wellness_time->month = (uint8_t)bin2bcd(ct.mon); host_wellness_time->day = (uint8_t)bin2bcd(ct.day); host_wellness_time->century = (uint8_t)bin2bcd(ct.year / 100); host_wellness_time->year = (uint8_t)bin2bcd(ct.year % 100); } /* * Update host time to f/w every 24 hours in a periodic timer. */ void os_wellness_periodic(void *data) { struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data; int ret = 0; /* update time to FW */ if (!pqisrc_ctrl_offline(softs)){ if( (ret = pqisrc_write_current_time_to_host_wellness(softs)) != 0 ) DBG_ERR("Failed to update time to FW in periodic ret = %d\n", ret); } /* reschedule ourselves */ callout_reset(&softs->os_specific.wellness_periodic, PQI_HOST_WELLNESS_TIMEOUT_SEC * hz, os_wellness_periodic, softs); } /* * Routine used to stop the heart-beat timer */ void os_stop_heartbeat_timer(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); /* Kill the heart beat event */ callout_stop(&softs->os_specific.heartbeat_timeout_id); DBG_FUNC("OUT\n"); } /* * Routine used to start the heart-beat timer */ void os_start_heartbeat_timer(void *data) { struct pqisrc_softstate *softs = (struct pqisrc_softstate *)data; DBG_FUNC("IN\n"); pqisrc_heartbeat_timer_handler(softs); if (!pqisrc_ctrl_offline(softs)) { callout_reset(&softs->os_specific.heartbeat_timeout_id, PQI_HEARTBEAT_TIMEOUT_SEC * hz, os_start_heartbeat_timer, softs); } DBG_FUNC("OUT\n"); } /* * Mutex initialization function */ int os_init_spinlock(struct pqisrc_softstate *softs, struct mtx *lock, char *lockname) { - mtx_init(lock, lockname, NULL, MTX_SPIN); - return 0; + mtx_init(lock, lockname, NULL, MTX_SPIN); + return 0; + } /* * Mutex uninitialization function */ void os_uninit_spinlock(struct mtx *lock) { mtx_destroy(lock); return; } /* * Semaphore initialization function */ int os_create_semaphore(const char *name, int value, struct sema *sema) { sema_init(sema, value, name); return PQI_STATUS_SUCCESS; } /* * Semaphore uninitialization function */ int os_destroy_semaphore(struct sema *sema) { sema_destroy(sema); return PQI_STATUS_SUCCESS; } /* * Semaphore grab function */ void inline os_sema_lock(struct sema *sema) { sema_post(sema); } /* * Semaphore release function */ void inline os_sema_unlock(struct sema *sema) { sema_wait(sema); } /* * string copy wrapper function */ int os_strlcpy(char *dst, char *src, int size) { return strlcpy(dst, src, size); } int bsd_status_to_pqi_status(int bsd_status) { if (bsd_status == BSD_SUCCESS) return PQI_STATUS_SUCCESS; else return PQI_STATUS_FAILURE; } + +/* Return true : If the feature is disabled from device hints. + * Return false : If the feature is enabled from device hints. + * Return default: The feature status is not deciding from hints. + * */ +boolean_t +check_device_hint_status(struct pqisrc_softstate *softs, unsigned int feature_bit) +{ + DBG_FUNC("IN\n"); + + switch(feature_bit) { + case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: + if (!softs->hint.aio_raid1_write_status) + return true; + break; + case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: + if (!softs->hint.aio_raid5_write_status) + return true; + break; + case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: + if (!softs->hint.aio_raid6_write_status) + return true; + break; + case PQI_FIRMWARE_FEATURE_UNIQUE_SATA_WWN: + if (!softs->hint.sata_unique_wwn_status) + return true; + break; + default: + return false; + } + + DBG_FUNC("OUT\n"); + + return false; +} + +static void +bsd_set_hint_adapter_queue_depth(struct pqisrc_softstate *softs) +{ + uint32_t queue_depth = softs->pqi_cap.max_outstanding_io; + + DBG_FUNC("IN\n"); + + if ((!softs->hint.queue_depth) || (softs->hint.queue_depth > + softs->pqi_cap.max_outstanding_io)) { + /* Nothing to do here. Supported queue depth + * is already set by controller/driver */ + } + else if (softs->hint.queue_depth < PQISRC_MIN_OUTSTANDING_REQ) { + /* Nothing to do here. Supported queue depth + * is already set by controller/driver */ + } + else { + /* Set Device.Hint queue depth here */ + softs->pqi_cap.max_outstanding_io = + softs->hint.queue_depth; + } + + DBG_NOTE("Adapter queue depth before hint set = %u, Queue depth after hint set = %u\n", + queue_depth, softs->pqi_cap.max_outstanding_io); + + DBG_FUNC("OUT\n"); +} + +static void +bsd_set_hint_scatter_gather_config(struct pqisrc_softstate *softs) +{ + uint32_t pqi_sg_segments = softs->pqi_cap.max_sg_elem; + + DBG_FUNC("IN\n"); + + /* At least > 16 sg's required to wotk hint correctly. + * Default the sg count set by driver/controller. */ + + if ((!softs->hint.sg_segments) || (softs->hint.sg_segments > + softs->pqi_cap.max_sg_elem)) { + /* Nothing to do here. Supported sg count + * is already set by controller/driver. */ + } + else if (softs->hint.sg_segments < BSD_MIN_SG_SEGMENTS) + { + /* Nothing to do here. Supported sg count + * is already set by controller/driver. */ + } + else { + /* Set Device.Hint sg count here */ + softs->pqi_cap.max_sg_elem = softs->hint.sg_segments; + } + + DBG_NOTE("SG segments before hint set = %u, SG segments after hint set = %u\n", + pqi_sg_segments, softs->pqi_cap.max_sg_elem); + + DBG_FUNC("OUT\n"); +} + +void +bsd_set_hint_adapter_cap(struct pqisrc_softstate *softs) +{ + DBG_FUNC("IN\n"); + + bsd_set_hint_adapter_queue_depth(softs); + bsd_set_hint_scatter_gather_config(softs); + + DBG_FUNC("OUT\n"); +} + +void +bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *softs) +{ + DBG_FUNC("IN\n"); + + /* online cpu count decides the no.of queues the driver can create, + * and msi interrupt count as well. + * If the cpu count is "zero" set by hint file then the driver + * can have "one" queue and "one" legacy interrupt. (It shares event queue for + * operational IB queue). + * Check for os_get_intr_config function for interrupt assignment.*/ + + if (softs->hint.cpu_count > softs->num_cpus_online) { + /* Nothing to do here. Supported cpu count + * already fetched from hardware */ + } + else { + /* Set Device.Hint cpu count here */ + softs->num_cpus_online = softs->hint.cpu_count; + } + + DBG_FUNC("OUT\n"); +} diff --git a/sys/dev/smartpqi/smartpqi_prototypes.h b/sys/dev/smartpqi/smartpqi_prototypes.h index e66d6e337e0a..8eabe5c47cf4 100644 --- a/sys/dev/smartpqi/smartpqi_prototypes.h +++ b/sys/dev/smartpqi/smartpqi_prototypes.h @@ -1,296 +1,331 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _PQI_PROTOTYPES_H #define _PQI_PROTOTYPES_H /* Function prototypes */ -/*pqi_init.c */ +/*smartpqi_init.c */ int pqisrc_init(pqisrc_softstate_t *); void pqisrc_uninit(pqisrc_softstate_t *); void pqisrc_pqi_uninit(pqisrc_softstate_t *); int pqisrc_process_config_table(pqisrc_softstate_t *); int pqisrc_flush_cache(pqisrc_softstate_t *, enum pqisrc_flush_cache_event_type); int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *); +int pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *); +void pqisrc_complete_internal_cmds(pqisrc_softstate_t *); +void sanity_check_os_behavior(pqisrc_softstate_t *); -/* pqi_sis.c*/ + +/* smartpqi_sis.c*/ int pqisrc_sis_init(pqisrc_softstate_t *); void pqisrc_sis_uninit(pqisrc_softstate_t *); int pqisrc_reenable_sis(pqisrc_softstate_t *); void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *); void sis_disable_msix(pqisrc_softstate_t *); void sis_enable_intx(pqisrc_softstate_t *); void sis_disable_intx(pqisrc_softstate_t *softs); int pqisrc_force_sis(pqisrc_softstate_t *); int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *, uint32_t); void sis_disable_interrupt(pqisrc_softstate_t*); -/* pqi_queue.c */ +/* smartpqi_queue.c */ int pqisrc_submit_admin_req(pqisrc_softstate_t *, gen_adm_req_iu_t *, gen_adm_resp_iu_t *); int pqisrc_create_admin_queue(pqisrc_softstate_t *); int pqisrc_destroy_admin_queue(pqisrc_softstate_t *); int pqisrc_create_op_queues(pqisrc_softstate_t *); +int pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *, ib_queue_t *, + char *); +int pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *, ob_queue_t *, + char *); -/* pqi_cmd.c */ +/* smartpqi_cmd.c */ int pqisrc_submit_cmnd(pqisrc_softstate_t *,ib_queue_t *,void *); -/* pqi_tag.c */ +/* smartpqi_tag.c */ #ifndef LOCKFREE_STACK int pqisrc_init_taglist(pqisrc_softstate_t *,pqi_taglist_t *,uint32_t); void pqisrc_destroy_taglist(pqisrc_softstate_t *,pqi_taglist_t *); void pqisrc_put_tag(pqi_taglist_t *,uint32_t); uint32_t pqisrc_get_tag(pqi_taglist_t *); #else int pqisrc_init_taglist(pqisrc_softstate_t *, lockless_stack_t *, uint32_t); void pqisrc_destroy_taglist(pqisrc_softstate_t *, lockless_stack_t *); void pqisrc_put_tag(lockless_stack_t *,uint32_t); uint32_t pqisrc_get_tag(lockless_stack_t *); #endif /* LOCKFREE_STACK */ -/* pqi_discovery.c */ +/* smartpqi_discovery.c */ void pqisrc_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); +boolean_t pqisrc_add_softs_entry(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device, + uint8_t *scsi3addr); int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *); int pqisrc_rescan_devices(pqisrc_softstate_t *); int pqisrc_scan_devices(pqisrc_softstate_t *); -void pqisrc_process_raid_path_io_response(pqisrc_softstate_t *, uint16_t, struct pqi_io_response *); -void pqisrc_process_io_error_response(pqisrc_softstate_t *, int, uint16_t, struct pqi_io_response *); void pqisrc_cleanup_devices(pqisrc_softstate_t *); void pqisrc_device_mem_free(pqisrc_softstate_t *, pqi_scsi_dev_t *); boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device); void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device); -void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs); -int pqisrc_alloc_tid(pqisrc_softstate_t *softs); -void pqisrc_free_tid(pqisrc_softstate_t *softs, int); +void pqisrc_init_bitmap(pqisrc_softstate_t *softs); +void pqisrc_remove_target_bit(pqisrc_softstate_t *softs, int target); +int pqisrc_find_avail_target(pqisrc_softstate_t *softs); +int pqisrc_find_device_list_index(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device); +int pqisrc_find_btl_list_index(pqisrc_softstate_t *softs, + int bus, int target, int lun); +int pqisrc_delete_softs_entry(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device); int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd, - reportlun_data_ext_t **buff, size_t *data_length); + reportlun_data_ext_t **buff, size_t *data_length); int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs, uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len); +int pqisrc_simple_dma_alloc(pqisrc_softstate_t *, struct dma_mem *, size_t, + sgt_t *); +int pqisrc_prepare_send_raid(pqisrc_softstate_t *, pqisrc_raid_req_t *, + void *, size_t , uint8_t *, raid_path_error_info_elem_t *); + -/* pqi_helper.c */ +/* smartpqi_helper.c */ boolean_t pqisrc_ctrl_offline(pqisrc_softstate_t *); void pqisrc_heartbeat_timer_handler(pqisrc_softstate_t *); int pqisrc_wait_on_condition(pqisrc_softstate_t *softs, rcb_t *rcb, uint32_t timeout); boolean_t pqisrc_device_equal(pqi_scsi_dev_t *, pqi_scsi_dev_t *); boolean_t pqisrc_is_hba_lunid(uint8_t *); boolean_t pqisrc_is_logical_device(pqi_scsi_dev_t *); void pqisrc_sanitize_inquiry_string(unsigned char *, int ); void pqisrc_display_device_info(pqisrc_softstate_t *, char *, pqi_scsi_dev_t *); boolean_t pqisrc_scsi3addr_equal(uint8_t *, uint8_t *); void check_struct_sizes(void); char *pqisrc_raidlevel_to_string(uint8_t); void pqisrc_configure_legacy_intx(pqisrc_softstate_t*, boolean_t); void pqisrc_ctrl_diagnostic_options(pqisrc_softstate_t *); void pqisrc_wait_for_device_commands_to_complete(pqisrc_softstate_t *, pqi_scsi_dev_t *); +int pqisrc_QuerySenseFeatures(pqisrc_softstate_t *); void check_device_pending_commands_to_complete(pqisrc_softstate_t *, pqi_scsi_dev_t *); uint32_t pqisrc_count_num_scsi_active_requests_on_dev(pqisrc_softstate_t *, pqi_scsi_dev_t *); - -/* pqi_response.c */ -void pqisrc_signal_event(pqisrc_softstate_t *softs, rcb_t *rcb); +/* smartpqi_response.c */ void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *, rcb_t *); void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_io_response_success(pqisrc_softstate_t *, rcb_t *); void pqisrc_show_sense_data_full(pqisrc_softstate_t *, rcb_t *, sense_data_u_t *sense_data); void pqisrc_process_aio_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_raid_response_error(pqisrc_softstate_t *, rcb_t *, uint16_t); void pqisrc_process_response_queue(pqisrc_softstate_t *, int); - - - -/* pqi_request.c */ -int pqisrc_build_send_vendor_request(pqisrc_softstate_t*, - pqi_vendor_general_request_t *, - raid_path_error_info_elem_t *); +void pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, + aio_path_error_info_elem_t *aio_err); +void pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, + raid_path_error_info_elem_t *aio_err); +boolean_t suppress_innocuous_error_prints(pqisrc_softstate_t *softs, + rcb_t *rcb); +uint8_t pqisrc_get_cmd_from_rcb(rcb_t *); +boolean_t pqisrc_is_innocuous_error(pqisrc_softstate_t *, rcb_t *, void *); + + +/* smartpqi_request.c */ +int pqisrc_build_send_vendor_request(pqisrc_softstate_t *softs, + struct pqi_vendor_general_request *request); int pqisrc_build_send_io(pqisrc_softstate_t *,rcb_t *); - - -int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, - pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t*); - - +int pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device, rcb_t *rcb); int pqisrc_send_tmf(pqisrc_softstate_t *, pqi_scsi_dev_t *, rcb_t *, rcb_t *, int); int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs); int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs); +extern inline void pqisrc_aio_build_cdb(aio_req_locator_t *, uint32_t, + rcb_t *, uint8_t *); +extern inline boolean_t pqisrc_aio_req_too_big(pqisrc_softstate_t *, pqi_scsi_dev_t *, + rcb_t *, aio_req_locator_t *, uint32_t); +void pqisrc_build_aio_common(pqisrc_softstate_t *, pqi_aio_req_t *, + rcb_t *, uint32_t); +void pqisrc_build_aio_R1_write(pqisrc_softstate_t *, + pqi_aio_raid1_write_req_t *, rcb_t *, uint32_t); +void pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *, + pqi_aio_raid5or6_write_req_t *, rcb_t *, uint32_t); +void pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb); void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags); +void pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info); +void pqisrc_show_aio_io(pqisrc_softstate_t *, rcb_t *, + pqi_aio_req_t *, uint32_t); +void pqisrc_show_aio_common(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *); +void pqisrc_show_aio_R1_write(pqisrc_softstate_t *, rcb_t *, + pqi_aio_raid1_write_req_t *); +void pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *, rcb_t *, + pqi_aio_raid5or6_write_req_t *); +boolean_t pqisrc_cdb_is_write(uint8_t *); +void print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg); +void print_all_counters(pqisrc_softstate_t *softs, uint32_t flags); char *io_path_to_ascii(IO_PATH_T path); +void int_to_scsilun(uint64_t, uint8_t *); +boolean_t pqisrc_cdb_is_read(uint8_t *); +void pqisrc_build_aio_io(pqisrc_softstate_t *, rcb_t *, pqi_aio_req_t *, uint32_t); +uint8_t pqisrc_get_aio_data_direction(rcb_t *); +uint8_t pqisrc_get_raid_data_direction(rcb_t *); +void dump_tmf_details(pqisrc_softstate_t *, rcb_t *, char *); +io_type_t get_io_type_from_cdb(uint8_t *); +OS_ATOMIC64_T increment_this_counter(io_counters_t *, IO_PATH_T , io_type_t ); +boolean_t +is_buffer_zero(void *, uint32_t ); + + -/* pqi_event.c*/ + +/* smartpqi_event.c*/ int pqisrc_report_event_config(pqisrc_softstate_t *); int pqisrc_set_event_config(pqisrc_softstate_t *); int pqisrc_process_event_intr_src(pqisrc_softstate_t *,int); void pqisrc_ack_all_events(void *arg); void pqisrc_wait_for_rescan_complete(pqisrc_softstate_t *softs); -boolean_t pqisrc_update_scsi_sense(const uint8_t *, int, - struct sense_header_scsi *); -int pqisrc_build_send_raid_request(pqisrc_softstate_t *, pqisrc_raid_req_t *, - void *, size_t, uint8_t, uint16_t, uint8_t *, - raid_path_error_info_elem_t *); +int pqisrc_prepare_send_ctrlr_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request, + void *buff, size_t datasize); int pqisrc_submit_management_req(pqisrc_softstate_t *, pqi_event_config_request_t *); void pqisrc_take_devices_offline(pqisrc_softstate_t *); void pqisrc_take_ctrl_offline(pqisrc_softstate_t *); void pqisrc_free_rcb(pqisrc_softstate_t *, int); void pqisrc_decide_opq_config(pqisrc_softstate_t *); int pqisrc_configure_op_queues(pqisrc_softstate_t *); int pqisrc_pqi_init(pqisrc_softstate_t *); int pqi_reset(pqisrc_softstate_t *); int pqisrc_check_pqimode(pqisrc_softstate_t *); int pqisrc_check_fw_status(pqisrc_softstate_t *); int pqisrc_init_struct_base(pqisrc_softstate_t *); int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *); int pqisrc_get_preferred_settings(pqisrc_softstate_t *); int pqisrc_get_adapter_properties(pqisrc_softstate_t *, uint32_t *, uint32_t *); void pqisrc_get_admin_queue_config(pqisrc_softstate_t *); void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *); int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *); int pqisrc_create_delete_adminq(pqisrc_softstate_t *, uint32_t); void pqisrc_print_adminq_config(pqisrc_softstate_t *); -int pqisrc_delete_op_queue(pqisrc_softstate_t *, - uint32_t, boolean_t); +int pqisrc_delete_op_queue(pqisrc_softstate_t *, uint32_t, boolean_t); void pqisrc_destroy_event_queue(pqisrc_softstate_t *); - void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *); - void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *); - -int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, - ib_queue_t *, uint32_t); -int pqisrc_create_op_obq(pqisrc_softstate_t *, - ob_queue_t *); -int pqisrc_create_op_ibq(pqisrc_softstate_t *, - ib_queue_t *); +int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *, ib_queue_t *, + uint32_t); +int pqisrc_create_op_obq(pqisrc_softstate_t *, ob_queue_t *); +int pqisrc_create_op_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *, ib_queue_t *); int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *); int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *); +int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *); int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *); int pqisrc_process_task_management_response(pqisrc_softstate_t *, pqi_tmf_resp_t *); -/*Device outstanding Io count*/ -uint64_t pqisrc_increment_device_active_io(pqisrc_softstate_t *, - pqi_scsi_dev_t *); -uint64_t pqisrc_decrement_device_active_io(pqisrc_softstate_t *, - pqi_scsi_dev_t *); -void pqisrc_init_device_active_io(pqisrc_softstate_t *, - pqi_scsi_dev_t *); -uint64_t pqisrc_read_device_active_io(pqisrc_softstate_t *, - pqi_scsi_dev_t *); - -/* pqi_ioctl.c*/ - -int -pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int); - - +/* smartpqi_ioctl.c*/ +int pqisrc_passthru_ioctl(struct pqisrc_softstate *, void *, int); /* Functions Prototypes */ -/* FreeBSD_mem.c */ +/* smartpqi_mem.c */ int os_dma_mem_alloc(pqisrc_softstate_t *,struct dma_mem *); void os_dma_mem_free(pqisrc_softstate_t *,struct dma_mem *); void *os_mem_alloc(pqisrc_softstate_t *,size_t); -void os_mem_free(pqisrc_softstate_t *,char *,size_t); +void os_mem_free(pqisrc_softstate_t *,void *,size_t); void os_resource_free(pqisrc_softstate_t *); int os_dma_setup(pqisrc_softstate_t *); int os_dma_destroy(pqisrc_softstate_t *); void os_update_dma_attributes(pqisrc_softstate_t *); -/* FreeBSD intr.c */ +/* smartpqi_intr.c */ int os_get_intr_config(pqisrc_softstate_t *); int os_setup_intr(pqisrc_softstate_t *); int os_destroy_intr(pqisrc_softstate_t *); int os_get_processor_config(pqisrc_softstate_t *); void os_free_intr_config(pqisrc_softstate_t *); -/* FreeBSD_ioctl.c */ +/* smartpqi_ioctl.c */ int os_copy_to_user(struct pqisrc_softstate *, void *, void *, int, int); int os_copy_from_user(struct pqisrc_softstate *, void *, void *, int, int); int create_char_dev(struct pqisrc_softstate *, int); void destroy_char_dev(struct pqisrc_softstate *); - -/* FreeBSD_misc.c*/ + +/* smartpqi_misc.c*/ int os_init_spinlock(struct pqisrc_softstate *, struct mtx *, char *); void os_uninit_spinlock(struct mtx *); int os_create_semaphore(const char *, int,struct sema *); int os_destroy_semaphore(struct sema *); void os_sema_lock(struct sema *); void os_sema_unlock(struct sema *); +void bsd_set_hint_adapter_cap(struct pqisrc_softstate *); +void bsd_set_hint_adapter_cpu_config(struct pqisrc_softstate *); int os_strlcpy(char *dst, char *src, int len); void os_complete_outstanding_cmds_nodevice(pqisrc_softstate_t *); void os_stop_heartbeat_timer(pqisrc_softstate_t *); void os_start_heartbeat_timer(void *); -/* FreeBSD_cam.c */ +/* smartpqi_cam.c */ uint8_t os_get_task_attr(rcb_t *); void smartpqi_target_rescan(struct pqisrc_softstate *); void os_rescan_target(struct pqisrc_softstate *, pqi_scsi_dev_t *); -/* FreeBSD_intr.c FreeBSD_main.c */ +/* smartpqi_intr.c smartpqi_main.c */ void pqisrc_event_worker(void *, int); void os_add_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); -void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); +void os_remove_device(pqisrc_softstate_t *, pqi_scsi_dev_t *); void os_io_response_success(rcb_t *); void os_aio_response_error(rcb_t *, aio_path_error_info_elem_t *); +boolean_t check_device_hint_status(struct pqisrc_softstate *, unsigned int ); void smartpqi_adjust_queue_depth(struct cam_path *, uint32_t ); void os_raid_response_error(rcb_t *, raid_path_error_info_elem_t *); void os_wellness_periodic(void *); void os_reset_rcb( rcb_t *); int register_sim(struct pqisrc_softstate *, int); void deregister_sim(struct pqisrc_softstate *); -int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *, +int check_for_scsi_opcode(uint8_t *, boolean_t *, uint64_t *, uint32_t *); int register_legacy_intr(pqisrc_softstate_t *); int register_msix_intr(pqisrc_softstate_t *); void deregister_pqi_intx(pqisrc_softstate_t *); void deregister_pqi_msix(pqisrc_softstate_t *); void os_get_time(struct bmic_host_wellness_time *); void os_eventtaskqueue_enqueue(pqisrc_softstate_t *); void pqisrc_save_controller_info(struct pqisrc_softstate *); /* Domain status conversion */ int bsd_status_to_pqi_status(int ); - -#endif // _SMARTPQI_PROTOTYPES_H +#endif diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c index b5c962752d71..2e80b01b5436 100644 --- a/sys/dev/smartpqi/smartpqi_queue.c +++ b/sys/dev/smartpqi/smartpqi_queue.c @@ -1,1022 +1,1015 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" /* * Submit an admin IU to the adapter. + * TODO : Admin command implemented using polling, * Add interrupt support, if required */ int pqisrc_submit_admin_req(pqisrc_softstate_t *softs, gen_adm_req_iu_t *req, gen_adm_resp_iu_t *resp) { int ret = PQI_STATUS_SUCCESS; ob_queue_t *ob_q = &softs->admin_ob_queue; ib_queue_t *ib_q = &softs->admin_ib_queue; int tmo = PQISRC_ADMIN_CMD_RESP_TIMEOUT; DBG_FUNC("IN\n"); req->header.iu_type = PQI_IU_TYPE_GENERAL_ADMIN_REQUEST; req->header.comp_feature = 0x00; req->header.iu_length = PQI_STANDARD_IU_LENGTH; req->res1 = 0; req->work = 0; /* Get the tag */ req->req_id = pqisrc_get_tag(&softs->taglist); if (INVALID_ELEM == req->req_id) { DBG_ERR("Tag not available0x%x\n",(uint16_t)req->req_id); ret = PQI_STATUS_FAILURE; goto err_out; } softs->rcb[req->req_id].tag = req->req_id; /* Submit the command to the admin ib queue */ ret = pqisrc_submit_cmnd(softs, ib_q, req); if (ret != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command\n"); goto err_cmd; } /* Wait for completion */ COND_WAIT((*(ob_q->pi_virt_addr) != ob_q->ci_local), tmo); if (tmo <= 0) { DBG_ERR("Admin cmd timeout\n"); DBG_ERR("tmo : %d\n",tmo); \ /* TODO : PQI device status and error register and report */ ret = PQI_STATUS_TIMEOUT; goto err_cmd; } /* Copy the response */ memcpy(resp, ob_q->array_virt_addr + (ob_q->ci_local * ob_q->elem_size), sizeof(gen_adm_resp_iu_t)); /* Update CI */ ob_q->ci_local = (ob_q->ci_local + 1 ) % ob_q->num_elem; PCI_MEM_PUT32(softs, ob_q->ci_register_abs, ob_q->ci_register_offset, LE_32(ob_q->ci_local)); /* Validate the response data */ ASSERT(req->fn_code == resp->fn_code); ASSERT(resp->header.iu_type == PQI_IU_TYPE_GENERAL_ADMIN_RESPONSE); ret = resp->status; if (ret) goto err_cmd; os_reset_rcb(&softs->rcb[req->req_id]); pqisrc_put_tag(&softs->taglist,req->req_id); DBG_FUNC("OUT\n"); return ret; err_cmd: os_reset_rcb(&softs->rcb[req->req_id]); pqisrc_put_tag(&softs->taglist,req->req_id); err_out: DBG_FUNC("failed OUT : %d\n", ret); return ret; } /* * Get the administration queue config parameters. */ void pqisrc_get_admin_queue_config(pqisrc_softstate_t *softs) { uint64_t val = 0; val = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP)); /* pqi_cap = (struct pqi_dev_adminq_cap *)&val;*/ softs->admin_ib_queue.num_elem = val & 0xFF; softs->admin_ob_queue.num_elem = (val & 0xFF00) >> 8; /* Note : size in unit of 16 byte s*/ softs->admin_ib_queue.elem_size = ((val & 0xFF0000) >> 16) * 16; softs->admin_ob_queue.elem_size = ((val & 0xFF000000) >> 24) * 16; - DBG_FUNC(" softs->admin_ib_queue.num_elem : %d\n", - softs->admin_ib_queue.num_elem); - DBG_FUNC(" softs->admin_ib_queue.elem_size : %d\n", - softs->admin_ib_queue.elem_size); + DBG_INIT(" admin ib: num_elem=%u elem_size=%u\n", + softs->admin_ib_queue.num_elem, softs->admin_ib_queue.elem_size); + DBG_INIT(" admin ob: num_elem=%u elem_size=%u\n", + softs->admin_ob_queue.num_elem, softs->admin_ob_queue.elem_size); } /* * Decide the no of elements in admin ib and ob queues. */ void pqisrc_decide_admin_queue_config(pqisrc_softstate_t *softs) { /* Determine num elements in Admin IBQ */ softs->admin_ib_queue.num_elem = MIN(softs->admin_ib_queue.num_elem, PQISRC_MAX_ADMIN_IB_QUEUE_ELEM_NUM); /* Determine num elements in Admin OBQ */ softs->admin_ob_queue.num_elem = MIN(softs->admin_ob_queue.num_elem, PQISRC_MAX_ADMIN_OB_QUEUE_ELEM_NUM); } /* - * Allocate DMA memory for admin queue and initialize. + * Allocate DMA memory for inbound queue and initialize. */ int -pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs) +pqisrc_allocate_and_init_inbound_q(pqisrc_softstate_t *softs, ib_queue_t *ib_q, char *tag) { + struct dma_mem *dma_mem = &ib_q->alloc_dma; uint32_t ib_array_size = 0; - uint32_t ob_array_size = 0; uint32_t alloc_size = 0; char *virt_addr = NULL; dma_addr_t dma_addr = 0; int ret = PQI_STATUS_SUCCESS; - ib_array_size = (softs->admin_ib_queue.num_elem * - softs->admin_ib_queue.elem_size); + ib_array_size = ib_q->num_elem * ib_q->elem_size; + ASSERT(ib_array_size > 0); - ob_array_size = (softs->admin_ob_queue.num_elem * - softs->admin_ob_queue.elem_size); + alloc_size = ib_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for IB CI and OB PI */ - alloc_size = ib_array_size + ob_array_size + - 2 * sizeof(uint32_t) + PQI_ADDR_ALIGN_MASK_64 + 1; /* for IB CI and OB PI */ - /* Allocate memory for Admin Q */ - softs->admin_queue_dma_mem.tag = "admin_queue"; - softs->admin_queue_dma_mem.size = alloc_size; - softs->admin_queue_dma_mem.align = PQI_ADMINQ_ELEM_ARRAY_ALIGN; - ret = os_dma_mem_alloc(softs, &softs->admin_queue_dma_mem); + /* Allocate memory for the Q */ + memset(dma_mem, 0, sizeof(*dma_mem)); + os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag)); + dma_mem->size = alloc_size; + dma_mem->align = PQI_ADDR_ALIGN; + ret = os_dma_mem_alloc(softs, &ib_q->alloc_dma); if (ret) { - DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); + DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret); goto err_out; } + DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n", + dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr); + /* Setup the address */ - virt_addr = softs->admin_queue_dma_mem.virt_addr; - dma_addr = softs->admin_queue_dma_mem.dma_addr; + virt_addr = dma_mem->virt_addr; + dma_addr = dma_mem->dma_addr; + ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK)); + ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK)); /* IB */ - softs->admin_ib_queue.q_id = 0; - softs->admin_ib_queue.array_virt_addr = virt_addr; - softs->admin_ib_queue.array_dma_addr = dma_addr; - softs->admin_ib_queue.pi_local = 0; - /* OB */ - softs->admin_ob_queue.q_id = 0; - softs->admin_ob_queue.array_virt_addr = virt_addr + ib_array_size; - softs->admin_ob_queue.array_dma_addr = dma_addr + ib_array_size; - softs->admin_ob_queue.ci_local = 0; + ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK)); + ib_q->array_virt_addr = virt_addr; + ib_q->array_dma_addr = dma_addr; + ib_q->pi_local = 0; + + /* update addr for the next user */ + virt_addr += ib_array_size; + dma_addr += ib_array_size; /* IB CI */ - softs->admin_ib_queue.ci_virt_addr = - (uint32_t*)((uint8_t*)softs->admin_ob_queue.array_virt_addr - + ob_array_size); - softs->admin_ib_queue.ci_dma_addr = - (dma_addr_t)((uint8_t*)softs->admin_ob_queue.array_dma_addr + - ob_array_size); + ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK)); + ib_q->ci_virt_addr = (uint32_t*)virt_addr; + ib_q->ci_dma_addr = dma_addr; + + /* update addr for the next user */ + virt_addr += PQI_CI_PI_ALIGN; + + DBG_INIT("ib_q: virt_addr=%p, ci_dma_addr=%p elem=%u size=%u\n", + ib_q->array_virt_addr, (void*)ib_q->ci_dma_addr, ib_q->num_elem, ib_array_size); + + /* Verify we aren't out of bounds from allocation */ + ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size)); + + DBG_FUNC("OUT\n"); + return ret; + +err_out: + DBG_FUNC("failed OUT\n"); + return PQI_STATUS_FAILURE; +} + + +/* + * Allocate DMA memory for outbound queue and initialize. + */ +int +pqisrc_allocate_and_init_outbound_q(pqisrc_softstate_t *softs, ob_queue_t *ob_q, + char *tag) +{ + struct dma_mem *dma_mem = &ob_q->alloc_dma; + uint32_t ob_array_size = 0; + uint32_t alloc_size = 0; + char *virt_addr = NULL; + dma_addr_t dma_addr = 0; + int ret = PQI_STATUS_SUCCESS; + + ob_array_size = ob_q->num_elem * ob_q->elem_size; + ASSERT(ob_array_size > 0); + + alloc_size = ob_array_size + PQI_CI_PI_ALIGN + PQI_ADDR_ALIGN; /* for OB PI */ + + /* Allocate memory for the Q */ + memset(dma_mem, 0, sizeof(*dma_mem)); + os_strlcpy(dma_mem->tag, tag, sizeof(dma_mem->tag)); + dma_mem->size = alloc_size; + dma_mem->align = PQI_ADDR_ALIGN; + ret = os_dma_mem_alloc(softs, &ob_q->alloc_dma); + if (ret) { + DBG_ERR("Failed to Allocate Q tag=%s ret=%d\n", dma_mem->tag, ret); + goto err_out; + } + + DBG_INIT("alloc tag=%s size=0x%x align=0x%x virt_addr=%p dma_addr=%p\n", + dma_mem->tag, dma_mem->size, dma_mem->align, dma_mem->virt_addr, (void*)dma_mem->dma_addr); + + /* Setup the address */ + virt_addr = dma_mem->virt_addr; + dma_addr = dma_mem->dma_addr; + ASSERT(!((uint64_t)virt_addr & PQI_ADDR_ALIGN_MASK)); + ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK)); + + ob_q->array_virt_addr = virt_addr; + ob_q->array_dma_addr = dma_addr; + ob_q->ci_local = 0; + + /* update addr for the next user */ + virt_addr += ob_array_size; + dma_addr += ob_array_size; /* OB PI */ - softs->admin_ob_queue.pi_virt_addr = - (uint32_t*)((uint8_t*)(softs->admin_ib_queue.ci_virt_addr) + - PQI_ADDR_ALIGN_MASK_64 + 1); - softs->admin_ob_queue.pi_dma_addr = - (dma_addr_t)((uint8_t*)(softs->admin_ib_queue.ci_dma_addr) + - PQI_ADDR_ALIGN_MASK_64 + 1); - - DBG_INIT("softs->admin_ib_queue.ci_dma_addr : %p,softs->admin_ob_queue.pi_dma_addr :%p\n", - (void*)softs->admin_ib_queue.ci_dma_addr, (void*)softs->admin_ob_queue.pi_dma_addr ); - - /* Verify alignment */ - ASSERT(!(softs->admin_ib_queue.array_dma_addr & - PQI_ADDR_ALIGN_MASK_64)); - ASSERT(!(softs->admin_ib_queue.ci_dma_addr & - PQI_ADDR_ALIGN_MASK_64)); - ASSERT(!(softs->admin_ob_queue.array_dma_addr & - PQI_ADDR_ALIGN_MASK_64)); - ASSERT(!(softs->admin_ob_queue.pi_dma_addr & - PQI_ADDR_ALIGN_MASK_64)); + ASSERT(!(dma_addr & PQI_CI_PI_ALIGN_MASK)); + ob_q->pi_virt_addr = (uint32_t*)virt_addr; + ob_q->pi_dma_addr = dma_addr; + + /* update addr to show the end next user */ + virt_addr += PQI_CI_PI_ALIGN; + + DBG_INIT("ob_q: virt_addr=%p, pi_dma_addr=%p elem=%u size=%u\n", + ob_q->array_virt_addr, (void*)ob_q->pi_dma_addr, ob_q->num_elem, ob_array_size); + + /* Verify we aren't out of bounds from allocation */ + ASSERT(virt_addr <= ((char*)dma_mem->virt_addr + alloc_size)); DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("failed OUT\n"); return PQI_STATUS_FAILURE; } +/* + * Allocate DMA memory for admin queue and initialize. + */ +int pqisrc_allocate_and_init_adminq(pqisrc_softstate_t *softs) +{ + int ret; + ib_queue_t *admin_ib_q = &softs->admin_ib_queue; + ob_queue_t *admin_ob_q = &softs->admin_ob_queue; + + ret = pqisrc_allocate_and_init_inbound_q(softs, admin_ib_q, "admin_queue"); + if (!ret) { + admin_ib_q->q_id = PQI_ADMIN_IB_QUEUE_ID; + ret = pqisrc_allocate_and_init_outbound_q(softs, admin_ob_q, "admin_queue"); + if(!ret) + admin_ob_q->q_id = PQI_ADMIN_OB_QUEUE_ID; + else { + if(softs->admin_ib_queue.lockcreated==true) { + OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); + softs->admin_ib_queue.lockcreated = false; + } + if (softs->admin_ib_queue.alloc_dma.virt_addr) + os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma); + } + } + else + DBG_ERR("Failed to create Admin Queue pair\n"); + + return ret; +} + /* * Subroutine used to create (or) delete the admin queue requested. */ int pqisrc_create_delete_adminq(pqisrc_softstate_t *softs, uint32_t cmd) { int tmo = 0; int ret = PQI_STATUS_SUCCESS; /* Create Admin Q pair writing to Admin Q config function reg */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG, LE_64(cmd)); if (cmd == PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR) tmo = PQISRC_ADMIN_QUEUE_CREATE_TIMEOUT; else tmo = PQISRC_ADMIN_QUEUE_DELETE_TIMEOUT; /* Wait for completion */ COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); if (tmo <= 0) { DBG_ERR("Unable to create/delete admin queue pair\n"); /* TODO : PQI device status and error register and report */ ret = PQI_STATUS_TIMEOUT; } return ret; } /* * Debug admin queue configuration params. */ void pqisrc_print_adminq_config(pqisrc_softstate_t *softs) { DBG_INFO(" softs->admin_ib_queue.array_dma_addr : %p\n", (void*)softs->admin_ib_queue.array_dma_addr); DBG_INFO(" softs->admin_ib_queue.array_virt_addr : %p\n", (void*)softs->admin_ib_queue.array_virt_addr); - DBG_INFO(" softs->admin_ib_queue.num_elem : %d\n", + DBG_INFO(" softs->admin_ib_queue.num_elem : %u\n", softs->admin_ib_queue.num_elem); - DBG_INFO(" softs->admin_ib_queue.elem_size : %d\n", + DBG_INFO(" softs->admin_ib_queue.elem_size : %u\n", softs->admin_ib_queue.elem_size); DBG_INFO(" softs->admin_ob_queue.array_dma_addr : %p\n", (void*)softs->admin_ob_queue.array_dma_addr); DBG_INFO(" softs->admin_ob_queue.array_virt_addr : %p\n", (void*)softs->admin_ob_queue.array_virt_addr); - DBG_INFO(" softs->admin_ob_queue.num_elem : %d\n", + DBG_INFO(" softs->admin_ob_queue.num_elem : %u\n", softs->admin_ob_queue.num_elem); - DBG_INFO(" softs->admin_ob_queue.elem_size : %d\n", + DBG_INFO(" softs->admin_ob_queue.elem_size : %u\n", softs->admin_ob_queue.elem_size); DBG_INFO(" softs->admin_ib_queue.pi_register_abs : %p\n", (void*)softs->admin_ib_queue.pi_register_abs); DBG_INFO(" softs->admin_ob_queue.ci_register_abs : %p\n", (void*)softs->admin_ob_queue.ci_register_abs); } /* * Function used to create an admin queue. */ int pqisrc_create_admin_queue(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; +/* struct pqi_dev_adminq_cap *pqi_cap; */ uint32_t admin_q_param = 0; DBG_FUNC("IN\n"); /* Get admin queue details - pqi2-r00a - table 24 */ pqisrc_get_admin_queue_config(softs); /* Decide admin Q config */ pqisrc_decide_admin_queue_config(softs); /* Allocate and init Admin Q pair */ ret = pqisrc_allocate_and_init_adminq(softs); if (ret) { DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); goto err_out; } /* Write IB Q element array address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_elem_array_addr, PQI_ADMIN_IBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ib_queue.array_dma_addr)); /* Write OB Q element array address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_elem_array_addr, PQI_ADMIN_OBQ_ELEM_ARRAY_ADDR, LE_64(softs->admin_ob_queue.array_dma_addr)); /* Write IB Q CI address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_ibq_ci_addr, PQI_ADMIN_IBQ_CI_ADDR, LE_64(softs->admin_ib_queue.ci_dma_addr)); /* Write OB Q PI address */ PCI_MEM_PUT64(softs, &softs->pqi_reg->admin_obq_pi_addr, PQI_ADMIN_OBQ_PI_ADDR, LE_64(softs->admin_ob_queue.pi_dma_addr)); /* Write Admin Q params pqi-r200a table 36 */ admin_q_param = softs->admin_ib_queue.num_elem | (softs->admin_ob_queue.num_elem << 8)| PQI_ADMIN_QUEUE_MSIX_DISABLE; PCI_MEM_PUT32(softs, &softs->pqi_reg->admin_q_param, PQI_ADMINQ_PARAM, LE_32(admin_q_param)); /* Submit cmd to create Admin Q pair */ ret = pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_CREATE_Q_PAIR); if (ret) { DBG_ERR("Failed to Allocate Admin Q ret : %d\n", ret); goto err_q_create; } /* Admin queue created, get ci,pi offset */ softs->admin_ib_queue.pi_register_offset =(PQISRC_PQI_REG_OFFSET + PCI_MEM_GET64(softs, &softs->pqi_reg->admin_ibq_pi_offset, PQI_ADMIN_IBQ_PI_OFFSET)); softs->admin_ib_queue.pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr + softs->admin_ib_queue.pi_register_offset); softs->admin_ob_queue.ci_register_offset = (PQISRC_PQI_REG_OFFSET + PCI_MEM_GET64(softs, &softs->pqi_reg->admin_obq_ci_offset, PQI_ADMIN_OBQ_CI_OFFSET)); softs->admin_ob_queue.ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr + softs->admin_ob_queue.ci_register_offset); os_strlcpy(softs->admin_ib_queue.lockname, "admin_ibqlock", LOCKNAME_SIZE); ret =OS_INIT_PQILOCK(softs, &softs->admin_ib_queue.lock, softs->admin_ib_queue.lockname); if(ret){ DBG_ERR("Admin spinlock initialization failed\n"); softs->admin_ib_queue.lockcreated = false; goto err_lock; } softs->admin_ib_queue.lockcreated = true; /* Print admin q config details */ pqisrc_print_adminq_config(softs); DBG_FUNC("OUT\n"); return ret; err_lock: +#if 0 + pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR); +#endif err_q_create: - os_dma_mem_free(softs, &softs->admin_queue_dma_mem); + pqisrc_destroy_admin_queue(softs); err_out: DBG_FUNC("failed OUT\n"); return ret; } /* * Subroutine used to delete an operational queue. */ int pqisrc_delete_op_queue(pqisrc_softstate_t *softs, uint32_t q_id, boolean_t ibq) { int ret = PQI_STATUS_SUCCESS; /* Firmware doesn't support this now */ #if 0 gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); DBG_FUNC("IN\n"); admin_req.req_type.create_op_iq.qid = q_id; if (ibq) admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_IQ; else admin_req.fn_code = PQI_FUNCTION_DELETE_OPERATIONAL_OQ; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); DBG_FUNC("OUT\n"); #endif return ret; } /* * Function used to destroy the event queue. */ void pqisrc_destroy_event_queue(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); if (softs->event_q.created == true) { int ret = PQI_STATUS_SUCCESS; ret = pqisrc_delete_op_queue(softs, softs->event_q.q_id, false); if (ret) { - DBG_ERR("Failed to Delete Event Q %d\n", softs->event_q.q_id); + DBG_ERR("Failed to Delete Event Q %u\n", softs->event_q.q_id); } softs->event_q.created = false; } /* Free the memory */ - os_dma_mem_free(softs, &softs->event_q_dma_mem); + if (softs->event_q.alloc_dma.virt_addr) + os_dma_mem_free(softs, &softs->event_q.alloc_dma); DBG_FUNC("OUT\n"); } /* * Function used to destroy operational ib queues. */ void pqisrc_destroy_op_ib_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; ib_queue_t *op_ib_q = NULL; + uint32_t total_op_ibq = softs->num_op_raid_ibq; int i; DBG_FUNC("IN\n"); - for (i = 0; i < softs->num_op_raid_ibq; i++) { - /* OP RAID IB Q */ + for (i = 0; i < total_op_ibq; i++) { + int repeat = 0; + /* RAID first */ op_ib_q = &softs->op_raid_ib_q[i]; +release_queue: if (op_ib_q->created == true) { - ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true); + ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, + true); if (ret) { - DBG_ERR("Failed to Delete Raid IB Q %d\n",op_ib_q->q_id); + DBG_ERR("Failed to Delete IB Q %u\n", + op_ib_q->q_id); } op_ib_q->created = false; } - if(op_ib_q->lockcreated==true){ - OS_UNINIT_PQILOCK(&op_ib_q->lock); - op_ib_q->lockcreated = false; - } - - /* OP AIO IB Q */ - op_ib_q = &softs->op_aio_ib_q[i]; - if (op_ib_q->created == true) { - ret = pqisrc_delete_op_queue(softs, op_ib_q->q_id, true); - if (ret) { - DBG_ERR("Failed to Delete AIO IB Q %d\n",op_ib_q->q_id); - } - op_ib_q->created = false; + if (op_ib_q->lockcreated == true) { + OS_UNINIT_PQILOCK(&op_ib_q->lock); + op_ib_q->lockcreated = false; } - if(op_ib_q->lockcreated==true){ - OS_UNINIT_PQILOCK(&op_ib_q->lock); - op_ib_q->lockcreated = false; - } + /* Free the memory */ + if (op_ib_q->alloc_dma.virt_addr) + os_dma_mem_free(softs, &op_ib_q->alloc_dma); + + if (repeat < 1) { + repeat++; + op_ib_q = &softs->op_aio_ib_q[i]; + goto release_queue; + } } - /* Free the memory */ - os_dma_mem_free(softs, &softs->op_ibq_dma_mem); DBG_FUNC("OUT\n"); } /* * Function used to destroy operational ob queues. */ void pqisrc_destroy_op_ob_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; int i; + ob_queue_t *op_ob_q = NULL; DBG_FUNC("IN\n"); for (i = 0; i < softs->num_op_obq; i++) { - ob_queue_t *op_ob_q = NULL; op_ob_q = &softs->op_ob_q[i]; + if (op_ob_q->created == true) { ret = pqisrc_delete_op_queue(softs, op_ob_q->q_id, false); if (ret) { - DBG_ERR("Failed to Delete OB Q %d\n",op_ob_q->q_id); + DBG_ERR("Failed to Delete OB Q %u\n",op_ob_q->q_id); } op_ob_q->created = false; } + + /* Free the memory */ + if (op_ob_q->alloc_dma.virt_addr) + os_dma_mem_free(softs, &op_ob_q->alloc_dma); } /* Free the memory */ - os_dma_mem_free(softs, &softs->op_obq_dma_mem); DBG_FUNC("OUT\n"); } /* * Function used to destroy an admin queue. */ int pqisrc_destroy_admin_queue(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); + + if(softs->admin_ib_queue.lockcreated==true) { + OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); + softs->admin_ib_queue.lockcreated = false; + } + #if 0 ret = pqisrc_create_delete_adminq(softs, PQI_ADMIN_QUEUE_CONF_FUNC_DEL_Q_PAIR); #endif - os_dma_mem_free(softs, &softs->admin_queue_dma_mem); + + if (softs->admin_ib_queue.alloc_dma.virt_addr) + os_dma_mem_free(softs, &softs->admin_ib_queue.alloc_dma); + + if (softs->admin_ob_queue.alloc_dma.virt_addr) + os_dma_mem_free(softs, &softs->admin_ob_queue.alloc_dma); DBG_FUNC("OUT\n"); return ret; } /* * Function used to change operational ib queue properties. */ int pqisrc_change_op_ibq_queue_prop(pqisrc_softstate_t *softs, ib_queue_t *op_ib_q, uint32_t prop) { int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); DBG_FUNC("IN\n"); admin_req.fn_code = PQI_FUNCTION_CHANGE_OPERATIONAL_IQ_PROP; admin_req.req_type.change_op_iq_prop.qid = op_ib_q->q_id; admin_req.req_type.change_op_iq_prop.vend_specific = prop; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); DBG_FUNC("OUT\n"); return ret; } /* * Function used to create an operational ob queue. */ int pqisrc_create_op_obq(pqisrc_softstate_t *softs, ob_queue_t *op_ob_q) { int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; DBG_FUNC("IN\n"); memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_OQ; admin_req.req_type.create_op_oq.qid = op_ob_q->q_id; admin_req.req_type.create_op_oq.intr_msg_num = op_ob_q->intr_msg_num; admin_req.req_type.create_op_oq.elem_arr_addr = op_ob_q->array_dma_addr; admin_req.req_type.create_op_oq.ob_pi_addr = op_ob_q->pi_dma_addr; admin_req.req_type.create_op_oq.num_elem = op_ob_q->num_elem; admin_req.req_type.create_op_oq.elem_len = op_ob_q->elem_size / 16; DBG_INFO("admin_req.req_type.create_op_oq.qid : %x\n",admin_req.req_type.create_op_oq.qid); DBG_INFO("admin_req.req_type.create_op_oq.intr_msg_num : %x\n", admin_req.req_type.create_op_oq.intr_msg_num ); ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); if( PQI_STATUS_SUCCESS == ret) { op_ob_q->ci_register_offset = (PQISRC_PQI_REG_OFFSET + admin_resp.resp_type.create_op_oq.ci_offset); op_ob_q->ci_register_abs = (uint32_t *)(softs->pci_mem_base_vaddr + op_ob_q->ci_register_offset); } else { int i = 0; DBG_WARN("Error Status Descriptors\n"); for(i = 0; i < 4;i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]); } DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * Function used to create an operational ib queue. */ int pqisrc_create_op_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_ib_q) { int ret = PQI_STATUS_SUCCESS; gen_adm_req_iu_t admin_req; gen_adm_resp_iu_t admin_resp; DBG_FUNC("IN\n"); memset(&admin_req, 0, sizeof(admin_req)); memset(&admin_resp, 0, sizeof(admin_resp)); admin_req.fn_code = PQI_FUNCTION_CREATE_OPERATIONAL_IQ; admin_req.req_type.create_op_iq.qid = op_ib_q->q_id; admin_req.req_type.create_op_iq.elem_arr_addr = op_ib_q->array_dma_addr; admin_req.req_type.create_op_iq.iq_ci_addr = op_ib_q->ci_dma_addr; admin_req.req_type.create_op_iq.num_elem = op_ib_q->num_elem; admin_req.req_type.create_op_iq.elem_len = op_ib_q->elem_size / 16; ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); if( PQI_STATUS_SUCCESS == ret) { op_ib_q->pi_register_offset =(PQISRC_PQI_REG_OFFSET + admin_resp.resp_type.create_op_iq.pi_offset); op_ib_q->pi_register_abs =(uint32_t *)(softs->pci_mem_base_vaddr + op_ib_q->pi_register_offset); } else { int i = 0; DBG_WARN("Error Status Decsriptors\n"); for(i = 0; i < 4;i++) DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]); } DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * subroutine used to create an operational ib queue for AIO. */ int pqisrc_create_op_aio_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_aio_ib_q) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); ret = pqisrc_create_op_ibq(softs,op_aio_ib_q); if ( PQI_STATUS_SUCCESS == ret) ret = pqisrc_change_op_ibq_queue_prop(softs, op_aio_ib_q, PQI_CHANGE_OP_IQ_PROP_ASSIGN_AIO); DBG_FUNC("OUT ret : %d\n", ret); return ret; } /* * subroutine used to create an operational ib queue for RAID. */ int pqisrc_create_op_raid_ibq(pqisrc_softstate_t *softs, ib_queue_t *op_raid_ib_q) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); ret = pqisrc_create_op_ibq(softs,op_raid_ib_q); DBG_FUNC("OUT\n"); return ret; } /* * Allocate and create an event queue to process supported events. */ int pqisrc_alloc_and_create_event_queue(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; - uint32_t alloc_size = 0; uint32_t num_elem; - char *virt_addr = NULL; - dma_addr_t dma_addr = 0; - uint64_t event_q_pi_dma_start_offset = 0; - uint32_t event_q_pi_virt_start_offset = 0; - char *event_q_pi_virt_start_addr = NULL; - ob_queue_t *event_q = NULL; - + ob_queue_t *event_q = &softs->event_q; DBG_FUNC("IN\n"); /* * Calculate memory requirements. * If event queue is shared for IO response, number of * elements in event queue depends on num elements in OP OB Q * also. Since event queue element size (32) is more than IO * response size , event queue element size need not be checked * for queue size calculation. */ #ifdef SHARE_EVENT_QUEUE_FOR_IO - num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_NUM_EVENT_Q_ELEM); + num_elem = MIN(softs->num_elem_per_op_obq, PQISRC_MAX_EVENT_QUEUE_ELEM_NUM); #else - num_elem = PQISRC_NUM_EVENT_Q_ELEM; + num_elem = PQISRC_MAX_EVENT_QUEUE_ELEM_NUM; #endif - alloc_size = num_elem * PQISRC_EVENT_Q_ELEM_SIZE; - event_q_pi_dma_start_offset = alloc_size; - event_q_pi_virt_start_offset = alloc_size; - alloc_size += sizeof(uint32_t); /*For IBQ CI*/ + event_q->num_elem = num_elem; + event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE_BYTES; + + ret = pqisrc_allocate_and_init_outbound_q(softs, event_q, "event_queue"); - /* Allocate memory for event queues */ - softs->event_q_dma_mem.tag = "event_queue"; - softs->event_q_dma_mem.size = alloc_size; - softs->event_q_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; - ret = os_dma_mem_alloc(softs, &softs->event_q_dma_mem); if (ret) { - DBG_ERR("Failed to Allocate Event Q ret : %d\n" - , ret); + DBG_ERR("Failed to Allocate EventQ\n"); goto err_out; } - - /* Set up the address */ - virt_addr = softs->event_q_dma_mem.virt_addr; - dma_addr = softs->event_q_dma_mem.dma_addr; - event_q_pi_dma_start_offset += dma_addr; - event_q_pi_virt_start_addr = virt_addr + event_q_pi_virt_start_offset; - - event_q = &softs->event_q; - ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); - FILL_QUEUE_ARRAY_ADDR(event_q,virt_addr,dma_addr); event_q->q_id = PQI_OP_EVENT_QUEUE_ID; - event_q->num_elem = num_elem; - event_q->elem_size = PQISRC_EVENT_Q_ELEM_SIZE; - event_q->pi_dma_addr = event_q_pi_dma_start_offset; - event_q->pi_virt_addr = (uint32_t *)event_q_pi_virt_start_addr; event_q->intr_msg_num = 0; /* vector zero for event */ - ASSERT(!(event_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4)); ret = pqisrc_create_op_obq(softs,event_q); if (ret) { - DBG_ERR("Failed to Create EventQ %d\n",event_q->q_id); + DBG_ERR("Failed to Create EventQ %u\n",event_q->q_id); goto err_out_create; } event_q->created = true; DBG_FUNC("OUT\n"); return ret; err_out_create: pqisrc_destroy_event_queue(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Allocate DMA memory and create operational ib queues. */ int pqisrc_alloc_and_create_ib_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; - uint32_t alloc_size = 0; - char *virt_addr = NULL; - dma_addr_t dma_addr = 0; - uint32_t ibq_size = 0; - uint64_t ib_ci_dma_start_offset = 0; - char *ib_ci_virt_start_addr = NULL; - uint32_t ib_ci_virt_start_offset = 0; - uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID; ib_queue_t *op_ib_q = NULL; - uint32_t num_op_ibq = softs->num_op_raid_ibq + - softs->num_op_aio_ibq; + uint32_t ibq_id = PQI_MIN_OP_IB_QUEUE_ID; + uint32_t total_op_ibq = softs->num_op_raid_ibq + softs->num_op_aio_ibq; int i = 0; + char *string = NULL; DBG_FUNC("IN\n"); - /* Calculate memory requirements */ - ibq_size = softs->num_elem_per_op_ibq * softs->ibq_elem_size; - alloc_size = num_op_ibq * ibq_size; - /* CI indexes starts after Queue element array */ - ib_ci_dma_start_offset = alloc_size; - ib_ci_virt_start_offset = alloc_size; - alloc_size += num_op_ibq * sizeof(uint32_t); /*For IBQ CI*/ - - /* Allocate memory for IB queues */ - softs->op_ibq_dma_mem.tag = "op_ib_queue"; - softs->op_ibq_dma_mem.size = alloc_size; - softs->op_ibq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; - ret = os_dma_mem_alloc(softs, &softs->op_ibq_dma_mem); - if (ret) { - DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n", - ret); - goto err_out; - } - - /* Set up the address */ - virt_addr = softs->op_ibq_dma_mem.virt_addr; - dma_addr = softs->op_ibq_dma_mem.dma_addr; - ib_ci_dma_start_offset += dma_addr; - ib_ci_virt_start_addr = virt_addr + ib_ci_virt_start_offset; - ASSERT(softs->num_op_raid_ibq == softs->num_op_aio_ibq); - for (i = 0; i < softs->num_op_raid_ibq; i++) { - /* OP RAID IB Q */ - op_ib_q = &softs->op_raid_ib_q[i]; - ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); - FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr); - op_ib_q->q_id = ibq_id++; + for (i = 0; i < total_op_ibq; i++) { - snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "raid_ibqlock%d", i); - ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname); - if(ret){ - /* TODO: error handling */ - DBG_ERR("raid_ibqlock %d init failed\n", i); - op_ib_q->lockcreated = false; - goto err_lock; + /* OP RAID IB Q */ + if (i % 2 == 0) + { + op_ib_q = &softs->op_raid_ib_q[i/2]; + string = "raid"; + } + else + { + op_ib_q = &softs->op_aio_ib_q[i/2]; + string = "aio"; } - op_ib_q->lockcreated = true; + /* Allocate memory for IB queues */ op_ib_q->num_elem = softs->num_elem_per_op_ibq; - op_ib_q->elem_size = softs->ibq_elem_size; - op_ib_q->ci_dma_addr = ib_ci_dma_start_offset + - (2 * i * sizeof(uint32_t)); - op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr + - (2 * i * sizeof(uint32_t))); - ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4)); - - ret = pqisrc_create_op_raid_ibq(softs, op_ib_q); + op_ib_q->elem_size = softs->max_ibq_elem_size; + + ret = pqisrc_allocate_and_init_inbound_q(softs, op_ib_q, "op_ib_queue"); if (ret) { - DBG_ERR("[ %s ] Failed to Create OP Raid IBQ %d\n", - __func__, op_ib_q->q_id); - goto err_out_create; + DBG_ERR("Failed to Allocate Operational IBQ memory ret : %d\n", + ret); + goto err_out; } - op_ib_q->created = true; - - /* OP AIO IB Q */ - virt_addr += ibq_size; - dma_addr += ibq_size; - op_ib_q = &softs->op_aio_ib_q[i]; - ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); - FILL_QUEUE_ARRAY_ADDR(op_ib_q,virt_addr,dma_addr); op_ib_q->q_id = ibq_id++; - snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "aio_ibqlock%d", i); + + snprintf(op_ib_q->lockname, LOCKNAME_SIZE, "%s_ibqlock_%d", string, i); ret = OS_INIT_PQILOCK(softs, &op_ib_q->lock, op_ib_q->lockname); - if(ret){ - /* TODO: error handling */ - DBG_ERR("aio_ibqlock %d init failed\n", i); - op_ib_q->lockcreated = false; - goto err_lock; - } - op_ib_q->lockcreated = true; + if(ret){ + /* TODO: error handling */ + DBG_ERR("%s %d init failed\n", string, i); + op_ib_q->lockcreated = false; + goto err_lock; + } + op_ib_q->lockcreated = true; - op_ib_q->num_elem = softs->num_elem_per_op_ibq; - op_ib_q->elem_size = softs->ibq_elem_size; - op_ib_q->ci_dma_addr = ib_ci_dma_start_offset + - (((2 * i) + 1) * sizeof(uint32_t)); - op_ib_q->ci_virt_addr = (uint32_t*)(ib_ci_virt_start_addr + - (((2 * i) + 1) * sizeof(uint32_t))); - ASSERT(!(op_ib_q->ci_dma_addr & PQI_ADDR_ALIGN_MASK_4)); - - ret = pqisrc_create_op_aio_ibq(softs, op_ib_q); + if (i % 2 == 0) + ret = pqisrc_create_op_raid_ibq(softs, op_ib_q); + else + ret = pqisrc_create_op_aio_ibq(softs, op_ib_q); if (ret) { - DBG_ERR("Failed to Create OP AIO IBQ %d\n",op_ib_q->q_id); + DBG_ERR("Failed to Create OP IBQ type=%s id=%u\n", + string, op_ib_q->q_id); goto err_out_create; } op_ib_q->created = true; - - virt_addr += ibq_size; - dma_addr += ibq_size; } DBG_FUNC("OUT\n"); return ret; err_lock: err_out_create: - pqisrc_destroy_op_ib_queues(softs); err_out: + pqisrc_destroy_op_ib_queues(softs); DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Allocate DMA memory and create operational ob queues. */ int pqisrc_alloc_and_create_ob_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; - uint32_t alloc_size = 0; - char *virt_addr = NULL; - dma_addr_t dma_addr = 0; - uint32_t obq_size = 0; - uint64_t ob_pi_dma_start_offset = 0; - uint32_t ob_pi_virt_start_offset = 0; - char *ob_pi_virt_start_addr = NULL; uint32_t obq_id = PQI_MIN_OP_OB_QUEUE_ID; ob_queue_t *op_ob_q = NULL; - uint32_t num_op_obq = softs->num_op_obq; int i = 0; DBG_FUNC("IN\n"); /* * OB Q element array should be 64 byte aligned. * So the number of elements in OB Q should be multiple * of 4, so that OB Queue element size (16) * num elements * will be multiple of 64. */ ALIGN_BOUNDARY(softs->num_elem_per_op_obq, 4); - obq_size = softs->num_elem_per_op_obq * softs->obq_elem_size; - alloc_size += num_op_obq * obq_size; - /* PI indexes starts after Queue element array */ - ob_pi_dma_start_offset = alloc_size; - ob_pi_virt_start_offset = alloc_size; - alloc_size += num_op_obq * sizeof(uint32_t); /*For OBQ PI*/ - - /* Allocate memory for OB queues */ - softs->op_obq_dma_mem.tag = "op_ob_queue"; - softs->op_obq_dma_mem.size = alloc_size; - softs->op_obq_dma_mem.align = PQI_OPQ_ELEM_ARRAY_ALIGN; - ret = os_dma_mem_alloc(softs, &softs->op_obq_dma_mem); - if (ret) { - DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n", - ret); - goto err_out; - } - /* Set up the address */ - virt_addr = softs->op_obq_dma_mem.virt_addr; - dma_addr = softs->op_obq_dma_mem.dma_addr; - ob_pi_dma_start_offset += dma_addr; - ob_pi_virt_start_addr = virt_addr + ob_pi_virt_start_offset; - - DBG_INFO("softs->num_op_obq %d\n",softs->num_op_obq); + DBG_INIT("softs->num_op_obq %u max_obq_elem_size=%u\n",softs->num_op_obq, softs->max_obq_elem_size); for (i = 0; i < softs->num_op_obq; i++) { op_ob_q = &softs->op_ob_q[i]; - ASSERT(!(dma_addr & PQI_ADDR_ALIGN_MASK_64)); - FILL_QUEUE_ARRAY_ADDR(op_ob_q,virt_addr,dma_addr); + + /* Allocate memory for OB queues */ + op_ob_q->num_elem = softs->num_elem_per_op_obq; + op_ob_q->elem_size = PQISRC_OP_OBQ_ELEM_SIZE_BYTES; + ret = pqisrc_allocate_and_init_outbound_q(softs, op_ob_q, "op_ob_queue"); + if (ret) { + DBG_ERR("Failed to Allocate Operational OBQ memory ret : %d\n", + ret); + goto err_out; + } op_ob_q->q_id = obq_id++; if(softs->share_opq_and_eventq == true) op_ob_q->intr_msg_num = i; else op_ob_q->intr_msg_num = i + 1; /* msg num zero for event */ - op_ob_q->num_elem = softs->num_elem_per_op_obq; - op_ob_q->elem_size = softs->obq_elem_size; - op_ob_q->pi_dma_addr = ob_pi_dma_start_offset + - (i * sizeof(uint32_t)); - op_ob_q->pi_virt_addr = (uint32_t*)(ob_pi_virt_start_addr + - (i * sizeof(uint32_t))); - ASSERT(!(op_ob_q->pi_dma_addr & PQI_ADDR_ALIGN_MASK_4)); - - ret = pqisrc_create_op_obq(softs,op_ob_q); + + ret = pqisrc_create_op_obq(softs, op_ob_q); if (ret) { - DBG_ERR("Failed to Create OP OBQ %d\n",op_ob_q->q_id); + DBG_ERR("Failed to Create OP OBQ %u\n",op_ob_q->q_id); goto err_out_create; } op_ob_q->created = true; - virt_addr += obq_size; - dma_addr += obq_size; } DBG_FUNC("OUT\n"); return ret; err_out_create: - pqisrc_destroy_op_ob_queues(softs); err_out: + pqisrc_destroy_op_ob_queues(softs); DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * Function used to create operational queues for the adapter. */ int pqisrc_create_op_queues(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; DBG_FUNC("IN\n"); /* Create Operational IB queues */ ret = pqisrc_alloc_and_create_ib_queues(softs); if (ret) goto err_out; /* Create Operational OB queues */ ret = pqisrc_alloc_and_create_ob_queues(softs); if (ret) goto err_out_obq; /* Create Event queue */ ret = pqisrc_alloc_and_create_event_queue(softs); if (ret) goto err_out_eventq; DBG_FUNC("OUT\n"); return ret; err_out_eventq: pqisrc_destroy_op_ob_queues(softs); err_out_obq: pqisrc_destroy_op_ib_queues(softs); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c index 9b1ef29a2b37..246ab891126d 100644 --- a/sys/dev/smartpqi/smartpqi_request.c +++ b/sys/dev/smartpqi/smartpqi_request.c @@ -1,1054 +1,2324 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" -/* - * Attempt to perform offload RAID mapping for a logical volume I/O. - */ - -#define HPSA_RAID_0 0 -#define HPSA_RAID_4 1 -#define HPSA_RAID_1 2 /* also used for RAID 10 */ -#define HPSA_RAID_5 3 /* also used for RAID 50 */ -#define HPSA_RAID_51 4 -#define HPSA_RAID_6 5 /* also used for RAID 60 */ -#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ -#define HPSA_RAID_MAX HPSA_RAID_ADM -#define HPSA_RAID_UNKNOWN 0xff +/* Change this if need to debug why AIO is not being used */ +#define DBG_AIO DBG_IO #define SG_FLAG_LAST 0x40000000 #define SG_FLAG_CHAIN 0x80000000 +/* Local Prototypes */ +static void pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb); +static int fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l); + + /* Subroutine to find out embedded sgl count in IU */ static inline uint32_t -pqisrc_embedded_sgl_count(uint32_t elem_alloted) +pqisrc_embedded_sgl_count(uint32_t elem_alloted, uint8_t iu_type) { - uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU; - DBG_FUNC(" IN "); + uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT; + + DBG_FUNC("IN\n"); + + if (iu_type == PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST || + iu_type == PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST) + embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO; + /** calculate embedded sgl count using num_elem_alloted for IO **/ if(elem_alloted - 1) embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU); - DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count); + /* DBG_IO("embedded_sgl_count :%d\n", embedded_sgl_count); */ - DBG_FUNC(" OUT "); + DBG_FUNC("OUT\n"); return embedded_sgl_count; } /* Subroutine to find out contiguous free elem in IU */ static inline uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q) { uint32_t contiguous_free_elem = 0; - DBG_FUNC(" IN "); + DBG_FUNC("IN\n"); if(pi >= ci) { contiguous_free_elem = (elem_in_q - pi); if(ci == 0) contiguous_free_elem -= 1; } else { contiguous_free_elem = (ci - pi - 1); } - DBG_FUNC(" OUT "); + DBG_FUNC("OUT\n"); return contiguous_free_elem; } /* Subroutine to find out num of elements need for the request */ static uint32_t -pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count) +pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count, + pqi_scsi_dev_t *devp, boolean_t is_write, IO_PATH_T io_path) { uint32_t num_sg; uint32_t num_elem_required = 1; - DBG_FUNC(" IN "); - DBG_IO("SGL_Count :%d",SG_Count); + uint32_t sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_DEFAULT; + + DBG_FUNC("IN\n"); + DBG_IO("SGL_Count :%u\n",SG_Count); + + if ((devp->raid_level == SA_RAID_5 || devp->raid_level == SA_RAID_6) + && is_write && (io_path == AIO_PATH)) + sg_in_first_iu = MAX_EMBEDDED_SG_IN_FIRST_IU_RAID56_AIO; /******** If SG_Count greater than max sg per IU i.e 4 or 68 (4 is with out spanning or 68 is with spanning) chaining is required. - OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then, + OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU_* then, on these two cases one element is enough. ********/ - if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU) + if(SG_Count > softs->max_sg_per_spanning_cmd || + SG_Count <= sg_in_first_iu) return num_elem_required; /* SGL Count Other Than First IU */ - num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU; + num_sg = SG_Count - sg_in_first_iu; num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU); - DBG_FUNC(" OUT "); + DBG_FUNC("OUT\n"); return num_elem_required; } /* Subroutine to build SG list for the IU submission*/ static boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr, uint32_t num_elem_alloted) { uint32_t i; uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb); sgt_t *sgt = sg_array; sgt_t *sg_chain = NULL; boolean_t partial = false; - DBG_FUNC(" IN "); + DBG_FUNC("IN\n"); - DBG_IO("SGL_Count :%d",num_sg); + /* DBG_IO("SGL_Count :%d",num_sg); */ if (0 == num_sg) { goto out; } - if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) { + if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted, + iu_hdr->iu_type)) { + for (i = 0; i < num_sg; i++, sgt++) { - sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); - sgt->len= OS_GET_IO_SG_LEN(rcb,i); - sgt->flags= 0; - } + sgt->addr= OS_GET_IO_SG_ADDR(rcb,i); + sgt->len= OS_GET_IO_SG_LEN(rcb,i); + sgt->flags= 0; + } sg_array[num_sg - 1].flags = SG_FLAG_LAST; } else { /** SGL Chaining **/ sg_chain = rcb->sg_chain_virt; sgt->addr = rcb->sg_chain_dma; sgt->len = num_sg * sizeof(sgt_t); sgt->flags = SG_FLAG_CHAIN; sgt = sg_chain; for (i = 0; i < num_sg; i++, sgt++) { sgt->addr = OS_GET_IO_SG_ADDR(rcb,i); sgt->len = OS_GET_IO_SG_LEN(rcb,i); sgt->flags = 0; } sg_chain[num_sg - 1].flags = SG_FLAG_LAST; num_sg = 1; partial = true; } out: iu_hdr->iu_length = num_sg * sizeof(sgt_t); - DBG_FUNC(" OUT "); + DBG_FUNC("OUT\n"); return partial; } +#if 0 +static inline void +pqisrc_show_raid_req(pqisrc_softstate_t *softs, pqisrc_raid_req_t *raid_req) +{ + DBG_IO("%30s: 0x%x\n", "raid_req->header.iu_type", + raid_req->header.iu_type); + DBG_IO("%30s: 0x%d\n", "raid_req->response_queue_id", + raid_req->response_queue_id); + DBG_IO("%30s: 0x%x\n", "raid_req->request_id", + raid_req->request_id); + DBG_IO("%30s: 0x%x\n", "raid_req->buffer_length", + raid_req->buffer_length); + DBG_IO("%30s: 0x%x\n", "raid_req->task_attribute", + raid_req->task_attribute); + DBG_IO("%30s: 0x%llx\n", "raid_req->lun_number", + *((long long unsigned int*)raid_req->lun_number)); + DBG_IO("%30s: 0x%x\n", "raid_req->error_index", + raid_req->error_index); + DBG_IO("%30s: 0x%p\n", "raid_req->sg_descriptors[0].addr", + (void *)raid_req->sg_descriptors[0].addr); + DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].len", + raid_req->sg_descriptors[0].len); + DBG_IO("%30s: 0x%x\n", "raid_req->sg_descriptors[0].flags", + raid_req->sg_descriptors[0].flags); +} +#endif + /*Subroutine used to Build the RAID request */ static void pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted) { - DBG_FUNC(" IN "); + DBG_FUNC("IN\n"); raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; raid_req->header.comp_feature = 0; raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); raid_req->work_area[0] = 0; raid_req->work_area[1] = 0; raid_req->request_id = rcb->tag; raid_req->nexus_id = 0; raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb); memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, sizeof(raid_req->lun_number)); raid_req->protocol_spec = 0; raid_req->data_direction = rcb->data_dir; raid_req->reserved1 = 0; raid_req->fence = 0; raid_req->error_index = raid_req->request_id; raid_req->reserved2 = 0; - raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); - raid_req->command_priority = 0; + raid_req->task_attribute = OS_GET_TASK_ATTR(rcb); + raid_req->command_priority = 0; raid_req->reserved3 = 0; raid_req->reserved4 = 0; raid_req->reserved5 = 0; + raid_req->ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun; /* As cdb and additional_cdb_bytes are contiguous, update them in a single statement */ - memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen); + memcpy(raid_req->cmd.cdb, rcb->cdbp, rcb->cmdlen); #if 0 DBG_IO("CDB :"); for(i = 0; i < rcb->cmdlen ; i++) DBG_IO(" 0x%x \n ",raid_req->cdb[i]); #endif switch (rcb->cmdlen) { case 6: case 10: case 12: case 16: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; break; case 20: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_4; break; case 24: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_8; break; case 28: raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_12; break; case 32: default: /* todo:review again */ raid_req->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_16; break; } /* Frame SGL Descriptor */ raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb, &raid_req->header, num_elem_alloted); raid_req->header.iu_length += offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t); #if 0 - DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type); - DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id); - DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id); - DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length); - DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute); - DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number); - DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index); - DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr); - DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len); - DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags); + pqisrc_show_raid_req(softs, raid_req); #endif rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_raid_response_error; rcb->resp_qid = raid_req->response_queue_id; - DBG_FUNC(" OUT "); + DBG_FUNC("OUT\n"); } -/*Subroutine used to Build the AIO request */ -static void -pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, - pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) +/* We will need to expand this to handle different types of + * aio request structures. + */ +#if 0 +static inline void +pqisrc_show_aio_req(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req) { - DBG_FUNC(" IN "); + DBG_IO("%30s: 0x%x\n", "aio_req->header.iu_type", + aio_req->header.iu_type); + DBG_IO("%30s: 0x%x\n", "aio_req->resp_qid", + aio_req->response_queue_id); + DBG_IO("%30s: 0x%x\n", "aio_req->req_id", + aio_req->req_id); + DBG_IO("%30s: 0x%x\n", "aio_req->nexus", + aio_req->nexus); + DBG_IO("%30s: 0x%x\n", "aio_req->buf_len", + aio_req->buf_len); + DBG_IO("%30s: 0x%x\n", "aio_req->cmd_flags.data_dir", + aio_req->cmd_flags.data_dir); + DBG_IO("%30s: 0x%x\n", "aio_req->attr_prio.task_attr", + aio_req->attr_prio.task_attr); + DBG_IO("%30s: 0x%x\n", "aio_req->err_idx", + aio_req->err_idx); + DBG_IO("%30s: 0x%x\n", "aio_req->num_sg", + aio_req->num_sg); + DBG_IO("%30s: 0x%p\n", "aio_req->sg_desc[0].addr", + (void *)aio_req->sg_desc[0].addr); + DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].len", + aio_req->sg_desc[0].len); + DBG_IO("%30s: 0x%x\n", "aio_req->sg_desc[0].flags", + aio_req->sg_desc[0].flags); +} +#endif + +void +int_to_scsilun(uint64_t lun, uint8_t *scsi_lun) +{ + int i; + + memset(scsi_lun, 0, sizeof(lun)); + for (i = 0; i < sizeof(lun); i += 2) { + scsi_lun[i] = (lun >> 8) & 0xFF; + scsi_lun[i+1] = lun & 0xFF; + lun = lun >> 16; + } +} + +/*Subroutine used to populate AIO IUs. */ +void +pqisrc_build_aio_common(pqisrc_softstate_t *softs, pqi_aio_req_t *aio_req, + rcb_t *rcb, uint32_t num_elem_alloted) +{ + DBG_FUNC("IN\n"); aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST; aio_req->header.comp_feature = 0; aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); aio_req->work_area[0] = 0; aio_req->work_area[1] = 0; aio_req->req_id = rcb->tag; aio_req->res1[0] = 0; aio_req->res1[1] = 0; aio_req->nexus = rcb->ioaccel_handle; aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); - aio_req->data_dir = rcb->data_dir; - aio_req->mem_type = 0; - aio_req->fence = 0; - aio_req->res2 = 0; - aio_req->task_attr = OS_GET_TASK_ATTR(rcb); - aio_req->cmd_prio = 0; - aio_req->res3 = 0; + aio_req->cmd_flags.data_dir = rcb->data_dir; + aio_req->cmd_flags.mem_type = 0; + aio_req->cmd_flags.fence = 0; + aio_req->cmd_flags.res2 = 0; + aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb); + aio_req->attr_prio.cmd_prio = 0; + aio_req->attr_prio.res3 = 0; aio_req->err_idx = aio_req->req_id; aio_req->cdb_len = rcb->cmdlen; - if(rcb->cmdlen > sizeof(aio_req->cdb)) + if (rcb->cmdlen > sizeof(aio_req->cdb)) rcb->cmdlen = sizeof(aio_req->cdb); memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); + memset(aio_req->res4, 0, sizeof(aio_req->res4)); + + uint64_t lun = rcb->cm_ccb->ccb_h.target_lun; + if (lun && (rcb->dvp->is_multi_lun)) { + int_to_scsilun(lun, aio_req->lun); + } + else { + memset(aio_req->lun, 0, sizeof(aio_req->lun)); + } + + /* handle encryption fields */ + if (rcb->encrypt_enable == true) { + aio_req->cmd_flags.encrypt_enable = true; + aio_req->encrypt_key_index = + LE_16(rcb->enc_info.data_enc_key_index); + aio_req->encrypt_twk_low = + LE_32(rcb->enc_info.encrypt_tweak_lower); + aio_req->encrypt_twk_high = + LE_32(rcb->enc_info.encrypt_tweak_upper); + } else { + aio_req->cmd_flags.encrypt_enable = 0; + aio_req->encrypt_key_index = 0; + aio_req->encrypt_twk_high = 0; + aio_req->encrypt_twk_low = 0; + } + /* Frame SGL Descriptor */ + aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, + &aio_req->header, num_elem_alloted); + + aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); + + /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */ + + aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - + sizeof(iu_header_t); + /* set completion and error handlers. */ + rcb->success_cmp_callback = pqisrc_process_io_response_success; + rcb->error_cmp_callback = pqisrc_process_aio_response_error; + rcb->resp_qid = aio_req->response_queue_id; + DBG_FUNC("OUT\n"); + +} +/*Subroutine used to show standard AIO IU fields */ +void +pqisrc_show_aio_common(pqisrc_softstate_t *softs, rcb_t *rcb, + pqi_aio_req_t *aio_req) +{ +#ifdef DEBUG_AIO + DBG_INFO("AIO IU Content, tag# 0x%08x", rcb->tag); + DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type); + DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature); + DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length); + DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id); + DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id); + DBG_INFO("%15s: 0x%x\n", "nexus", aio_req->nexus); + DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len); + DBG_INFO("%15s:\n", "cmd_flags"); + DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir); + DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial); + DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type); + DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence); + DBG_INFO("%15s: 0x%x\n", "encryption", + aio_req->cmd_flags.encrypt_enable); + DBG_INFO("%15s:\n", "attr_prio"); + DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr); + DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio); + DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index); + DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low); + DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high); + pqisrc_show_cdb(softs, "AIOC", rcb, aio_req->cdb); + DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx); + DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg); + DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len); #if 0 - DBG_IO("CDB : \n"); - for(int i = 0; i < rcb->cmdlen ; i++) - DBG_IO(" 0x%x \n",aio_req->cdb[i]); + DBG_INFO("%15s: 0x%x\n", "lun", aio_req->lun); + DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr", + (void *)aio_req->sg_desc[0].addr); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len", + aio_req->sg_desc[0].len); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags", + aio_req->sg_desc[0].flags); #endif - memset(aio_req->lun,0,sizeof(aio_req->lun)); - memset(aio_req->res4,0,sizeof(aio_req->res4)); - - if(rcb->encrypt_enable == true) { - aio_req->encrypt_enable = true; - aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index); - aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower); - aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper); +#endif /* DEBUG_AIO */ +} + +/*Subroutine used to populate AIO RAID 1 write bypass IU. */ +void +pqisrc_build_aio_R1_write(pqisrc_softstate_t *softs, + pqi_aio_raid1_write_req_t *aio_req, rcb_t *rcb, + uint32_t num_elem_alloted) +{ + DBG_FUNC("IN\n"); + if (!rcb->dvp) { + DBG_WARN("%s: DEBUG: dev ptr is null", __func__); + return; + } + if (!rcb->dvp->raid_map) { + DBG_WARN("%s: DEBUG: raid_map is null", __func__); + return; + } + + aio_req->header.iu_type = PQI_IU_TYPE_RAID1_WRITE_BYPASS_REQUEST; + aio_req->header.comp_feature = 0; + aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); + aio_req->work_area[0] = 0; + aio_req->work_area[1] = 0; + aio_req->req_id = rcb->tag; + aio_req->volume_id = (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF); + aio_req->nexus_1 = rcb->it_nexus[0]; + aio_req->nexus_2 = rcb->it_nexus[1]; + aio_req->nexus_3 = rcb->it_nexus[2]; + aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); + aio_req->cmd_flags.data_dir = rcb->data_dir; + aio_req->cmd_flags.mem_type = 0; + aio_req->cmd_flags.fence = 0; + aio_req->cmd_flags.res2 = 0; + aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb); + aio_req->attr_prio.cmd_prio = 0; + aio_req->attr_prio.res3 = 0; + if(rcb->cmdlen > sizeof(aio_req->cdb)) + rcb->cmdlen = sizeof(aio_req->cdb); + memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); + aio_req->err_idx = aio_req->req_id; + aio_req->cdb_len = rcb->cmdlen; + aio_req->num_drives = LE_16(rcb->dvp->raid_map->layout_map_count); + + /* handle encryption fields */ + if (rcb->encrypt_enable == true) { + aio_req->cmd_flags.encrypt_enable = true; + aio_req->encrypt_key_index = + LE_16(rcb->enc_info.data_enc_key_index); + aio_req->encrypt_twk_low = + LE_32(rcb->enc_info.encrypt_tweak_lower); + aio_req->encrypt_twk_high = + LE_32(rcb->enc_info.encrypt_tweak_upper); } else { - aio_req->encrypt_enable = 0; + aio_req->cmd_flags.encrypt_enable = 0; aio_req->encrypt_key_index = 0; aio_req->encrypt_twk_high = 0; aio_req->encrypt_twk_low = 0; } - /* Frame SGL Descriptor */ - aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, - &aio_req->header, num_elem_alloted); + aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, + &aio_req->header, num_elem_alloted); aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); - DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg); + /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */ - aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - + aio_req->header.iu_length += offsetof(pqi_aio_raid1_write_req_t, sg_desc) - sizeof(iu_header_t); + + /* set completion and error handlers. */ + rcb->success_cmp_callback = pqisrc_process_io_response_success; + rcb->error_cmp_callback = pqisrc_process_aio_response_error; + rcb->resp_qid = aio_req->response_queue_id; + DBG_FUNC("OUT\n"); + +} + +/*Subroutine used to show AIO RAID1 Write bypass IU fields */ +void +pqisrc_show_aio_R1_write(pqisrc_softstate_t *softs, rcb_t *rcb, + pqi_aio_raid1_write_req_t *aio_req) +{ + +#ifdef DEBUG_AIO + DBG_INFO("AIO RAID1 Write IU Content, tag# 0x%08x", rcb->tag); + DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type); + DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature); + DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length); + DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id); + DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id); + DBG_INFO("%15s: 0x%x\n", "volume_id", aio_req->volume_id); + DBG_INFO("%15s: 0x%x\n", "nexus_1", aio_req->nexus_1); + DBG_INFO("%15s: 0x%x\n", "nexus_2", aio_req->nexus_2); + DBG_INFO("%15s: 0x%x\n", "nexus_3", aio_req->nexus_3); + DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len); + DBG_INFO("%15s:\n", "cmd_flags"); + DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir); + DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial); + DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type); + DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence); + DBG_INFO("%15s: 0x%x\n", "encryption", + aio_req->cmd_flags.encrypt_enable); + DBG_INFO("%15s:\n", "attr_prio"); + DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr); + DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio); + DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index); + pqisrc_show_cdb(softs, "AIOR1W", rcb, aio_req->cdb); + DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx); + DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg); + DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len); + DBG_INFO("%15s: 0x%x\n", "num_drives", aio_req->num_drives); + DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low); + DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high); #if 0 - DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type); - DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid); - DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id); - DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus); - DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len); - DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir); - DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr); - DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx); - DBG_IO("aio_req->num_sg :%d",aio_req->num_sg); - DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr); - DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len); - DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags); + DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr", + (void *)aio_req->sg_desc[0].addr); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len", + aio_req->sg_desc[0].len); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags", + aio_req->sg_desc[0].flags); #endif +#endif /* DEBUG_AIO */ +} +/*Subroutine used to populate AIO Raid5 or 6 write bypass IU */ +void +pqisrc_build_aio_R5or6_write(pqisrc_softstate_t *softs, + pqi_aio_raid5or6_write_req_t *aio_req, rcb_t *rcb, + uint32_t num_elem_alloted) +{ + DBG_FUNC("IN\n"); + uint32_t index; + unsigned num_data_disks; + unsigned num_metadata_disks; + unsigned total_disks; + num_data_disks = LE_16(rcb->dvp->raid_map->data_disks_per_row); + num_metadata_disks = LE_16(rcb->dvp->raid_map->metadata_disks_per_row); + total_disks = num_data_disks + num_metadata_disks; + + index = PQISRC_DIV_ROUND_UP(rcb->raid_map_index + 1, total_disks); + index *= total_disks; + index -= num_metadata_disks; + + switch (rcb->dvp->raid_level) { + case SA_RAID_5: + aio_req->header.iu_type = + PQI_IU_TYPE_RAID5_WRITE_BYPASS_REQUEST; + break; + case SA_RAID_6: + aio_req->header.iu_type = + PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST; + break; + default: + DBG_ERR("WRONG RAID TYPE FOR FUNCTION\n"); + } + aio_req->header.comp_feature = 0; + aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb); + aio_req->work_area[0] = 0; + aio_req->work_area[1] = 0; + aio_req->req_id = rcb->tag; + aio_req->volume_id = (LE_32(rcb->dvp->scsi3addr[0]) & 0x3FFF); + aio_req->data_it_nexus = rcb->dvp->raid_map->dev_data[rcb->raid_map_index].ioaccel_handle; + aio_req->p_parity_it_nexus = + rcb->dvp->raid_map->dev_data[index].ioaccel_handle; + if (aio_req->header.iu_type == + PQI_IU_TYPE_RAID6_WRITE_BYPASS_REQUEST) { + aio_req->q_parity_it_nexus = + rcb->dvp->raid_map->dev_data[index + 1].ioaccel_handle; + } + aio_req->xor_multiplier = + rcb->dvp->raid_map->dev_data[rcb->raid_map_index].xor_mult[1]; + aio_req->row = rcb->row_num; + /*aio_req->reserved = rcb->row_num * rcb->blocks_per_row + + rcb->dvp->raid_map->disk_starting_blk;*/ + aio_req->buf_len = GET_SCSI_BUFFLEN(rcb); + aio_req->cmd_flags.data_dir = rcb->data_dir; + aio_req->cmd_flags.mem_type = 0; + aio_req->cmd_flags.fence = 0; + aio_req->cmd_flags.res2 = 0; + aio_req->attr_prio.task_attr = OS_GET_TASK_ATTR(rcb); + aio_req->attr_prio.cmd_prio = 0; + aio_req->attr_prio.res3 = 0; + if (rcb->cmdlen > sizeof(aio_req->cdb)) + rcb->cmdlen = sizeof(aio_req->cdb); + memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen); + aio_req->err_idx = aio_req->req_id; + aio_req->cdb_len = rcb->cmdlen; +#if 0 + /* Stubbed out for later */ + aio_req->header.iu_type = iu_type; + aio_req->data_it_nexus = ; + aio_req->p_parity_it_nexus = ; + aio_req->q_parity_it_nexus = ; + aio_req->row = ; + aio_req->stripe_lba = ; +#endif + /* handle encryption fields */ + if (rcb->encrypt_enable == true) { + aio_req->cmd_flags.encrypt_enable = true; + aio_req->encrypt_key_index = + LE_16(rcb->enc_info.data_enc_key_index); + aio_req->encrypt_twk_low = + LE_32(rcb->enc_info.encrypt_tweak_lower); + aio_req->encrypt_twk_high = + LE_32(rcb->enc_info.encrypt_tweak_upper); + } else { + aio_req->cmd_flags.encrypt_enable = 0; + aio_req->encrypt_key_index = 0; + aio_req->encrypt_twk_high = 0; + aio_req->encrypt_twk_low = 0; + } + /* Frame SGL Descriptor */ + aio_req->cmd_flags.partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb, + &aio_req->header, num_elem_alloted); + + aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t); + + /* DBG_INFO("aio_req->num_sg :%d\n", aio_req->num_sg); */ + + aio_req->header.iu_length += offsetof(pqi_aio_raid5or6_write_req_t, sg_desc) - + sizeof(iu_header_t); + /* set completion and error handlers. */ rcb->success_cmp_callback = pqisrc_process_io_response_success; rcb->error_cmp_callback = pqisrc_process_aio_response_error; rcb->resp_qid = aio_req->response_queue_id; + DBG_FUNC("OUT\n"); + +} + +/*Subroutine used to show AIO RAID5/6 Write bypass IU fields */ +void +pqisrc_show_aio_R5or6_write(pqisrc_softstate_t *softs, rcb_t *rcb, + pqi_aio_raid5or6_write_req_t *aio_req) +{ +#ifdef DEBUG_AIO + DBG_INFO("AIO RAID5or6 Write IU Content, tag# 0x%08x\n", rcb->tag); + DBG_INFO("%15s: 0x%x\n", "iu_type", aio_req->header.iu_type); + DBG_INFO("%15s: 0x%x\n", "comp_feat", aio_req->header.comp_feature); + DBG_INFO("%15s: 0x%x\n", "length", aio_req->header.iu_length); + DBG_INFO("%15s: 0x%x\n", "resp_qid", aio_req->response_queue_id); + DBG_INFO("%15s: 0x%x\n", "req_id", aio_req->req_id); + DBG_INFO("%15s: 0x%x\n", "volume_id", aio_req->volume_id); + DBG_INFO("%15s: 0x%x\n", "data_it_nexus", + aio_req->data_it_nexus); + DBG_INFO("%15s: 0x%x\n", "p_parity_it_nexus", + aio_req->p_parity_it_nexus); + DBG_INFO("%15s: 0x%x\n", "q_parity_it_nexus", + aio_req->q_parity_it_nexus); + DBG_INFO("%15s: 0x%x\n", "buf_len", aio_req->buf_len); + DBG_INFO("%15s:\n", "cmd_flags"); + DBG_INFO("%15s: 0x%x\n", "data_dir", aio_req->cmd_flags.data_dir); + DBG_INFO("%15s: 0x%x\n", "partial", aio_req->cmd_flags.partial); + DBG_INFO("%15s: 0x%x\n", "mem_type", aio_req->cmd_flags.mem_type); + DBG_INFO("%15s: 0x%x\n", "fence", aio_req->cmd_flags.fence); + DBG_INFO("%15s: 0x%x\n", "encryption", + aio_req->cmd_flags.encrypt_enable); + DBG_INFO("%15s:\n", "attr_prio"); + DBG_INFO("%15s: 0x%x\n", "task_attr", aio_req->attr_prio.task_attr); + DBG_INFO("%15s: 0x%x\n", "cmd_prio", aio_req->attr_prio.cmd_prio); + DBG_INFO("%15s: 0x%x\n", "dek_index", aio_req->encrypt_key_index); + pqisrc_show_cdb(softs, "AIOR56W", rcb, aio_req->cdb); + DBG_INFO("%15s: 0x%x\n", "err_idx", aio_req->err_idx); + DBG_INFO("%15s: 0x%x\n", "num_sg", aio_req->num_sg); + DBG_INFO("%15s: 0x%x\n", "cdb_len", aio_req->cdb_len); + DBG_INFO("%15s: 0x%x\n", "tweak_lower", aio_req->encrypt_twk_low); + DBG_INFO("%15s: 0x%x\n", "tweak_upper", aio_req->encrypt_twk_high); + DBG_INFO("%15s: 0x%lx\n", "row", aio_req->row); +#if 0 + DBG_INFO("%15s: 0x%lx\n", "stripe_lba", aio_req->stripe_lba); + DBG_INFO("%15s: 0x%p\n", "sg_desc[0].addr", + (void *)aio_req->sg_desc[0].addr); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].len", + aio_req->sg_desc[0].len); + DBG_INFO("%15s: 0x%x\n", "sg_desc[0].flags", + aio_req->sg_desc[0].flags); +#endif +#endif /* DEBUG_AIO */ +} - DBG_FUNC(" OUT "); +/* Is the cdb a read command? */ +boolean_t +pqisrc_cdb_is_read(uint8_t *cdb) +{ + if (cdb[0] == SCMD_READ_6 || cdb[0] == SCMD_READ_10 || + cdb[0] == SCMD_READ_12 || cdb[0] == SCMD_READ_16) + return true; + return false; +} + +/* Is the cdb a write command? */ +boolean_t +pqisrc_cdb_is_write(uint8_t *cdb) +{ + if (cdb == NULL) + return false; + if (cdb[0] == SCMD_WRITE_6 || cdb[0] == SCMD_WRITE_10 || + cdb[0] == SCMD_WRITE_12 || cdb[0] == SCMD_WRITE_16) + return true; + return false; } -/*Function used to build and send RAID/AIO */ +/*Subroutine used to show the AIO request */ +void +pqisrc_show_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, + pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) +{ + boolean_t is_write; + DBG_FUNC("IN\n"); + + is_write = pqisrc_cdb_is_write(rcb->cdbp); + + if (!is_write) { + pqisrc_show_aio_common(softs, rcb, aio_req); + goto out; + } + + switch (rcb->dvp->raid_level) { + case SA_RAID_0: + pqisrc_show_aio_common(softs, rcb, aio_req); + break; + case SA_RAID_1: + case SA_RAID_ADM: + pqisrc_show_aio_R1_write(softs, rcb, + (pqi_aio_raid1_write_req_t *)aio_req); + break; + case SA_RAID_5: + case SA_RAID_6: + pqisrc_show_aio_R5or6_write(softs, rcb, + (pqi_aio_raid5or6_write_req_t *)aio_req); + break; + } + +out: + DBG_FUNC("OUT\n"); + +} + + +void +pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, + pqi_aio_req_t *aio_req, uint32_t num_elem_alloted) +{ + boolean_t is_write; + DBG_FUNC("IN\n"); + + is_write = pqisrc_cdb_is_write(rcb->cdbp); + + if (is_write) { + switch (rcb->dvp->raid_level) { + case SA_RAID_0: + pqisrc_build_aio_common(softs, aio_req, + rcb, num_elem_alloted); + break; + case SA_RAID_1: + case SA_RAID_ADM: + pqisrc_build_aio_R1_write(softs, + (pqi_aio_raid1_write_req_t *)aio_req, + rcb, num_elem_alloted); + + break; + case SA_RAID_5: + case SA_RAID_6: + pqisrc_build_aio_R5or6_write(softs, + (pqi_aio_raid5or6_write_req_t *)aio_req, + rcb, num_elem_alloted); + break; + } + } else { + pqisrc_build_aio_common(softs, aio_req, rcb, num_elem_alloted); + } + + pqisrc_show_aio_io(softs, rcb, aio_req, num_elem_alloted); + + DBG_FUNC("OUT\n"); +} + +/* + * Return true from this function to prevent AIO from handling this request. + * True is returned if the request is determined to be part of a stream, or + * if the controller does not handle AIO at the appropriate RAID level. + */ +static boolean_t +pqisrc_is_parity_write_stream(pqisrc_softstate_t *softs, rcb_t *rcb) +{ + os_ticks_t oldest_ticks; + uint8_t lru_index; + int i; + int rc; + pqi_scsi_dev_t *device; + struct pqi_stream_data *pqi_stream_data; + aio_req_locator_t loc; + + DBG_FUNC("IN\n"); + + rc = fill_lba_for_scsi_rw(softs, rcb->cdbp , &loc); + if (rc != PQI_STATUS_SUCCESS) { + return false; + } + + /* check writes only */ + if (!pqisrc_cdb_is_write(rcb->cdbp)) { + return false; + } + + if (!softs->enable_stream_detection) { + return false; + } + + device = rcb->dvp; + if (!device) { + return false; + } + + /* + * check for R5/R6 streams. + */ + if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) { + return false; + } + + /* + * If controller does not support AIO R{5,6} writes, need to send + * requests down non-aio path. + */ + if ((device->raid_level == SA_RAID_5 && !softs->aio_raid5_write_bypass) || + (device->raid_level == SA_RAID_6 && !softs->aio_raid6_write_bypass)) { + return true; + } + + lru_index = 0; + oldest_ticks = INT_MAX; + for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { + pqi_stream_data = &device->stream_data[i]; + /* + * check for adjacent request or request is within + * the previous request. + */ + if ((pqi_stream_data->next_lba && + loc.block.first >= pqi_stream_data->next_lba) && + loc.block.first <= pqi_stream_data->next_lba + + loc.block.cnt) { + pqi_stream_data->next_lba = loc.block.first + + loc.block.cnt; + pqi_stream_data->last_accessed = TICKS; + return true; + } + + /* unused entry */ + if (pqi_stream_data->last_accessed == 0) { + lru_index = i; + break; + } + + /* Find entry with oldest last accessed time */ + if (pqi_stream_data->last_accessed <= oldest_ticks) { + oldest_ticks = pqi_stream_data->last_accessed; + lru_index = i; + } + } + + /* + * Set LRU entry + */ + pqi_stream_data = &device->stream_data[lru_index]; + pqi_stream_data->last_accessed = TICKS; + pqi_stream_data->next_lba = loc.block.first + loc.block.cnt; + + DBG_FUNC("OUT\n"); + + return false; +} + +/** + Determine if a request is eligible for AIO. Build/map + the request if using AIO path to a RAID volume. + + return the path that should be used for this request +*/ +static IO_PATH_T +determine_io_path_build_bypass(pqisrc_softstate_t *softs,rcb_t *rcb) +{ + IO_PATH_T io_path = AIO_PATH; + pqi_scsi_dev_t *devp = rcb->dvp; + int ret = PQI_STATUS_FAILURE; + + /* Default to using the host CDB directly (will be used if targeting RAID + path or HBA mode */ + rcb->cdbp = OS_GET_CDBP(rcb); + + if(!rcb->aio_retry) { + + /** IO for Physical Drive, Send in AIO PATH **/ + if(IS_AIO_PATH(devp)) { + rcb->ioaccel_handle = devp->ioaccel_handle; + return io_path; + } + + /** IO for RAID Volume, ByPass IO, Send in AIO PATH unless part of stream **/ + if (devp->offload_enabled && !pqisrc_is_parity_write_stream(softs, rcb)) { + ret = pqisrc_build_scsi_cmd_raidbypass(softs, devp, rcb); + } + + if (PQI_STATUS_FAILURE == ret) { + io_path = RAID_PATH; + } else { + ASSERT(rcb->cdbp == rcb->bypass_cdb); + } + } else { + /* Retrying failed AIO IO */ + io_path = RAID_PATH; + } + + return io_path; +} + +uint8_t +pqisrc_get_aio_data_direction(rcb_t *rcb) +{ + switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: return SOP_DATA_DIR_FROM_DEVICE; + case CAM_DIR_OUT: return SOP_DATA_DIR_TO_DEVICE; + case CAM_DIR_NONE: return SOP_DATA_DIR_NONE; + default: return SOP_DATA_DIR_UNKNOWN; + } +} + +uint8_t +pqisrc_get_raid_data_direction(rcb_t *rcb) +{ + switch (rcb->cm_ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: return SOP_DATA_DIR_TO_DEVICE; + case CAM_DIR_OUT: return SOP_DATA_DIR_FROM_DEVICE; + case CAM_DIR_NONE: return SOP_DATA_DIR_NONE; + default: return SOP_DATA_DIR_UNKNOWN; + } +} + +/* Function used to build and send RAID/AIO */ int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb) { ib_queue_t *ib_q_array = softs->op_aio_ib_q; ib_queue_t *ib_q = NULL; char *ib_iu = NULL; - IO_PATH_T io_path = AIO_PATH; + IO_PATH_T io_path; uint32_t TraverseCount = 0; int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); int qindex = first_qindex; uint32_t num_op_ib_q = softs->num_op_aio_ibq; uint32_t num_elem_needed; uint32_t num_elem_alloted = 0; pqi_scsi_dev_t *devp = rcb->dvp; - uint8_t raidbypass_cdb[16]; + boolean_t is_write; - DBG_FUNC(" IN "); + DBG_FUNC("IN\n"); - if(!rcb->aio_retry) { - rcb->cdbp = OS_GET_CDBP(rcb); - if(IS_AIO_PATH(devp)) { - /** IO for Physical Drive **/ - /** Send in AIO PATH**/ - rcb->ioaccel_handle = devp->ioaccel_handle; - } else { - int ret = PQI_STATUS_FAILURE; - /** IO for RAID Volume **/ - if (devp->offload_enabled) { - /** ByPass IO ,Send in AIO PATH **/ - ret = pqisrc_send_scsi_cmd_raidbypass(softs, - devp, rcb, raidbypass_cdb); - } - if (PQI_STATUS_FAILURE == ret) { - /** Send in RAID PATH **/ - io_path = RAID_PATH; - num_op_ib_q = softs->num_op_raid_ibq; - ib_q_array = softs->op_raid_ib_q; - } else { - rcb->cdbp = raidbypass_cdb; - } - } - } else { - /* Retrying failed AIO IO */ - io_path = RAID_PATH; - rcb->cdbp = OS_GET_CDBP(rcb); + /* Note: this will determine if the request is eligble for AIO */ + io_path = determine_io_path_build_bypass(softs, rcb); + + if (io_path == RAID_PATH) + { + /* Update direction for RAID path */ + rcb->data_dir = pqisrc_get_raid_data_direction(rcb); num_op_ib_q = softs->num_op_raid_ibq; ib_q_array = softs->op_raid_ib_q; } + else { + rcb->data_dir = pqisrc_get_aio_data_direction(rcb); + if (rcb->data_dir == SOP_DATA_DIR_UNKNOWN) { + DBG_ERR("Unknown Direction\n"); + } + } - num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb)); - DBG_IO("num_elem_needed :%d",num_elem_needed); + is_write = pqisrc_cdb_is_write(rcb->cdbp); + /* coverity[unchecked_value] */ + num_elem_needed = pqisrc_num_elem_needed(softs, + OS_GET_IO_SG_COUNT(rcb), devp, is_write, io_path); + DBG_IO("num_elem_needed :%u",num_elem_needed); do { uint32_t num_elem_available; ib_q = (ib_q_array + qindex); PQI_LOCK(&ib_q->lock); num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local, *(ib_q->ci_virt_addr), ib_q->num_elem); - DBG_IO("num_elem_avialable :%d\n",num_elem_available); + DBG_IO("num_elem_avialable :%u\n",num_elem_available); if(num_elem_available >= num_elem_needed) { num_elem_alloted = num_elem_needed; break; } DBG_IO("Current queue is busy! Hop to next queue\n"); PQI_UNLOCK(&ib_q->lock); qindex = (qindex + 1) % num_op_ib_q; if(qindex == first_qindex) { if (num_elem_needed == 1) break; TraverseCount += 1; num_elem_needed = 1; } }while(TraverseCount < 2); - DBG_IO("num_elem_alloted :%d",num_elem_alloted); + DBG_IO("num_elem_alloted :%u",num_elem_alloted); if (num_elem_alloted == 0) { DBG_WARN("OUT: IB Queues were full\n"); return PQI_STATUS_QFULL; } pqisrc_increment_device_active_io(softs,devp); /* Get IB Queue Slot address to build IU */ ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size); if(io_path == AIO_PATH) { - /** Build AIO structure **/ - pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu, - num_elem_alloted); + /* Fill in the AIO IU per request and raid type */ + pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t *)ib_iu, + num_elem_alloted); } else { /** Build RAID structure **/ - pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu, + pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t *)ib_iu, num_elem_alloted); } rcb->req_pending = true; rcb->req_q = ib_q; rcb->path = io_path; + pqisrc_increment_io_counters(softs, rcb); + /* Update the local PI */ ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem; - DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local); - DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); + DBG_IO("ib_q->pi_local : %x\n", ib_q->pi_local); + DBG_IO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr)); /* Inform the fw about the new IU */ PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local); - PQI_UNLOCK(&ib_q->lock); - DBG_FUNC(" OUT "); - return PQI_STATUS_SUCCESS; + PQI_UNLOCK(&ib_q->lock); + DBG_FUNC("OUT\n"); + return PQI_STATUS_SUCCESS; +} + +/* Subroutine used to set encryption info as part of RAID bypass IO*/ +static inline void +pqisrc_set_enc_info(struct pqi_enc_info *enc_info, + struct raid_map *raid_map, uint64_t first_block) +{ + uint32_t volume_blk_size; + + /* + * Set the encryption tweak values based on logical block address. + * If the block size is 512, the tweak value is equal to the LBA. + * For other block sizes, tweak value is (LBA * block size) / 512. + */ + volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); + if (volume_blk_size != 512) + first_block = (first_block * volume_blk_size) / 512; + + enc_info->data_enc_key_index = + GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); + enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); + enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); +} + + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ + +#define HPSA_RAID_0 0 +#define HPSA_RAID_4 1 +#define HPSA_RAID_1 2 /* also used for RAID 10 */ +#define HPSA_RAID_5 3 /* also used for RAID 50 */ +#define HPSA_RAID_51 4 +#define HPSA_RAID_6 5 /* also used for RAID 60 */ +#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ +#define HPSA_RAID_MAX HPSA_RAID_ADM +#define HPSA_RAID_UNKNOWN 0xff + +/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ +static int +fill_lba_for_scsi_rw(pqisrc_softstate_t *softs, uint8_t *cdb, aio_req_locator_t *l) +{ + + if (!l) { + DBG_INFO("No locator ptr: AIO ineligible"); + return PQI_STATUS_FAILURE; + } + + if (cdb == NULL) + return PQI_STATUS_FAILURE; + + switch (cdb[0]) { + case SCMD_WRITE_6: + l->is_write = true; + /* coverity[fallthrough] */ + case SCMD_READ_6: + l->block.first = (uint64_t)(((cdb[1] & 0x1F) << 16) | + (cdb[2] << 8) | cdb[3]); + l->block.cnt = (uint32_t)cdb[4]; + if (l->block.cnt == 0) + l->block.cnt = 256; /*blkcnt 0 means 256 */ + break; + case SCMD_WRITE_10: + l->is_write = true; + /* coverity[fallthrough] */ + case SCMD_READ_10: + l->block.first = (uint64_t)GET_BE32(&cdb[2]); + l->block.cnt = (uint32_t)GET_BE16(&cdb[7]); + break; + case SCMD_WRITE_12: + l->is_write = true; + /* coverity[fallthrough] */ + case SCMD_READ_12: + l->block.first = (uint64_t)GET_BE32(&cdb[2]); + l->block.cnt = GET_BE32(&cdb[6]); + break; + case SCMD_WRITE_16: + l->is_write = true; + /* coverity[fallthrough] */ + case SCMD_READ_16: + l->block.first = GET_BE64(&cdb[2]); + l->block.cnt = GET_BE32(&cdb[10]); + break; + default: + /* Process via normal I/O path. */ + DBG_AIO("NOT read or write 6/10/12/16: AIO ineligible"); + return PQI_STATUS_FAILURE; + } + return PQI_STATUS_SUCCESS; +} + + +/* determine whether writes to certain types of RAID are supported. */ +inline boolean_t +pqisrc_is_supported_write(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device) +{ + + DBG_FUNC("IN\n"); + + /* Raid0 was always supported */ + if (device->raid_level == SA_RAID_0) + return true; + + /* module params for individual adv. aio write features may be on, + * which affects ALL controllers, but some controllers + * do not support adv. aio write. + */ + if (!softs->adv_aio_capable) + return false; + + /* if the raid write bypass feature is turned on, + * then the write is supported. + */ + switch (device->raid_level) { + case SA_RAID_1: + case SA_RAID_ADM: + if (softs->aio_raid1_write_bypass) + return true; + break; + case SA_RAID_5: + if (softs->aio_raid5_write_bypass) + return true; + break; + case SA_RAID_6: + if (softs->aio_raid6_write_bypass) + return true; + } + + /* otherwise, it must be an unsupported write. */ + DBG_IO("AIO ineligible: write not supported for raid type\n"); + DBG_FUNC("OUT\n"); + return false; + +} + +/* check for zero-byte transfers, invalid blocks, and wraparound */ +static inline boolean_t +pqisrc_is_invalid_block(pqisrc_softstate_t *softs, aio_req_locator_t *l) +{ + DBG_FUNC("IN\n"); + + if (l->block.cnt == 0) { + DBG_AIO("AIO ineligible: blk_cnt=0\n"); + DBG_FUNC("OUT\n"); + return true; + } + + if (l->block.last < l->block.first || + l->block.last >= + GET_LE64((uint8_t *)&l->raid_map->volume_blk_cnt)) { + DBG_AIO("AIO ineligible: last block < first\n"); + DBG_FUNC("OUT\n"); + return true; + } + + DBG_FUNC("OUT\n"); + return false; +} + +/* Compute various attributes of request's location */ +static inline boolean_t +pqisrc_calc_disk_params(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb) +{ + DBG_FUNC("IN\n"); + + /* grab #disks, strip size, and layout map count from raid map */ + l->row.data_disks = + GET_LE16((uint8_t *)&l->raid_map->data_disks_per_row); + l->strip_sz = + GET_LE16((uint8_t *)(&l->raid_map->strip_size)); + l->map.layout_map_count = + GET_LE16((uint8_t *)(&l->raid_map->layout_map_count)); + + /* Calculate stripe information for the request. */ + l->row.blks_per_row = l->row.data_disks * l->strip_sz; + if (!l->row.blks_per_row || !l->strip_sz) { + DBG_AIO("AIO ineligible\n"); + DBG_FUNC("OUT\n"); + return false; + } + /* use __udivdi3 ? */ + rcb->blocks_per_row = l->row.blks_per_row; + l->row.first = l->block.first / l->row.blks_per_row; + rcb->row_num = l->row.first; + l->row.last = l->block.last / l->row.blks_per_row; + l->row.offset_first = (uint32_t)(l->block.first - + (l->row.first * l->row.blks_per_row)); + l->row.offset_last = (uint32_t)(l->block.last - + (l->row.last * l->row.blks_per_row)); + l->col.first = l->row.offset_first / l->strip_sz; + l->col.last = l->row.offset_last / l->strip_sz; + + DBG_FUNC("OUT\n"); + return true; +} + +/* Not AIO-eligible if it isnt' a single row/column. */ +static inline boolean_t +pqisrc_is_single_row_column(pqisrc_softstate_t *softs, aio_req_locator_t *l) +{ + boolean_t ret = true; + DBG_FUNC("IN\n"); + + if (l->row.first != l->row.last || l->col.first != l->col.last) { + DBG_AIO("AIO ineligible\n"); + ret = false; + } + DBG_FUNC("OUT\n"); + return ret; +} + +/* figure out disks/row, row, and map index. */ +static inline boolean_t +pqisrc_set_map_row_and_idx(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t *rcb) +{ + if (!l->row.data_disks) { + DBG_INFO("AIO ineligible: no data disks?\n"); + return false; + } + + l->row.total_disks = l->row.data_disks + + LE_16(l->raid_map->metadata_disks_per_row); + + l->map.row = ((uint32_t)(l->row.first >> + l->raid_map->parity_rotation_shift)) % + GET_LE16((uint8_t *)(&l->raid_map->row_cnt)); + + l->map.idx = (l->map.row * l->row.total_disks) + l->col.first; + rcb->raid_map_index = l->map.idx; + rcb->raid_map_row = l->map.row; + + return true; +} + +/* set the mirror for a raid 1/10/ADM */ +static inline void +pqisrc_set_read_mirror(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device, aio_req_locator_t *l) +{ + /* Avoid direct use of device->offload_to_mirror within this + * function since multiple threads might simultaneously + * increment it beyond the range of device->layout_map_count -1. + */ + + int mirror = device->offload_to_mirror[l->map.idx]; + int next_mirror = mirror + 1; + + if (next_mirror >= l->map.layout_map_count) + next_mirror = 0; + + device->offload_to_mirror[l->map.idx] = next_mirror; + l->map.idx += mirror * l->row.data_disks; +} + +/* collect ioaccel handles for mirrors of given location. */ +static inline boolean_t +pqisrc_set_write_mirrors( + pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device, + aio_req_locator_t *l, + rcb_t *rcb) +{ + uint32_t mirror = 0; + uint32_t index; + + if (l->map.layout_map_count > PQISRC_MAX_SUPPORTED_MIRRORS) + return false; + + do { + index = l->map.idx + (l->row.data_disks * mirror); + rcb->it_nexus[mirror] = + l->raid_map->dev_data[index].ioaccel_handle; + mirror++; + } while (mirror != l->map.layout_map_count); + + return true; +} + +/* Make sure first and last block are in the same R5/R6 RAID group. */ +static inline boolean_t +pqisrc_is_r5or6_single_group(pqisrc_softstate_t *softs, aio_req_locator_t *l) +{ + boolean_t ret = true; + + DBG_FUNC("IN\n"); + l->r5or6.row.blks_per_row = l->strip_sz * l->row.data_disks; + l->stripesz = l->r5or6.row.blks_per_row * l->map.layout_map_count; + l->group.first = (l->block.first % l->stripesz) / + l->r5or6.row.blks_per_row; + l->group.last = (l->block.last % l->stripesz) / + l->r5or6.row.blks_per_row; + + if (l->group.first != l->group.last) { + DBG_AIO("AIO ineligible"); + ret = false; + } + + DBG_FUNC("OUT\n"); + ASSERT(ret == true); + return ret; +} +/* Make sure R5 or R6 request doesn't span rows. */ +static inline boolean_t +pqisrc_is_r5or6_single_row(pqisrc_softstate_t *softs, aio_req_locator_t *l) +{ + boolean_t ret = true; + + DBG_FUNC("IN\n"); + + /* figure row nums containing first & last block */ + l->row.first = l->r5or6.row.first = + l->block.first / l->stripesz; + l->r5or6.row.last = l->block.last / l->stripesz; + + if (l->r5or6.row.first != l->r5or6.row.last) { + DBG_AIO("AIO ineligible"); + ret = false; + } + + DBG_FUNC("OUT\n"); + ASSERT(ret == true); + return ret; +} + +/* Make sure R5 or R6 request doesn't span columns. */ +static inline boolean_t +pqisrc_is_r5or6_single_column(pqisrc_softstate_t *softs, aio_req_locator_t *l) +{ + boolean_t ret = true; + + /* Find the columns of the first and last block */ + l->row.offset_first = l->r5or6.row.offset_first = + (uint32_t)((l->block.first % l->stripesz) % + l->r5or6.row.blks_per_row); + l->r5or6.row.offset_last = + (uint32_t)((l->block.last % l->stripesz) % + l->r5or6.row.blks_per_row); + + l->col.first = l->r5or6.row.offset_first / l->strip_sz; + l->r5or6.col.first = l->col.first; + l->r5or6.col.last = l->r5or6.row.offset_last / l->strip_sz; + + if (l->r5or6.col.first != l->r5or6.col.last) { + DBG_AIO("AIO ineligible"); + ret = false; + } + + ASSERT(ret == true); + return ret; +} + + +/* Set the map row and index for a R5 or R6 AIO request */ +static inline void +pqisrc_set_r5or6_row_and_index(aio_req_locator_t *l, + rcb_t *rcb) +{ + l->map.row = ((uint32_t) + (l->row.first >> l->raid_map->parity_rotation_shift)) % + GET_LE16((uint8_t *)(&l->raid_map->row_cnt)); + + l->map.idx = (l->group.first * + (GET_LE16((uint8_t *)(&l->raid_map->row_cnt)) + * l->row.total_disks)) + + (l->map.row * l->row.total_disks) + + l->col.first; + + rcb->raid_map_index = l->map.idx; + rcb->raid_map_row = l->map.row; +} + +/* calculate physical disk block for aio request */ +static inline boolean_t +pqisrc_calc_aio_block(aio_req_locator_t *l) +{ + boolean_t ret = true; + + l->block.disk_block = + GET_LE64((uint8_t *) (&l->raid_map->disk_starting_blk)) + + (l->row.first * l->strip_sz) + + ((uint64_t)(l->row.offset_first) - (uint64_t)(l->col.first) * l->strip_sz); + + /* any values we should be checking here? if not convert to void */ + return ret; +} + +/* Handle differing logical/physical block sizes. */ +static inline uint32_t +pqisrc_handle_blk_size_diffs(aio_req_locator_t *l) +{ + uint32_t disk_blk_cnt; + disk_blk_cnt = l->block.cnt; + + if (l->raid_map->phys_blk_shift) { + l->block.disk_block <<= l->raid_map->phys_blk_shift; + disk_blk_cnt <<= l->raid_map->phys_blk_shift; + } + return disk_blk_cnt; +} + +/* Make sure AIO request doesn't exceed the max that AIO device can + * handle based on dev type, Raid level, and encryption status. + * TODO: make limits dynamic when this becomes possible. + */ +inline boolean_t +pqisrc_aio_req_too_big(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device, rcb_t *rcb, + aio_req_locator_t *l, uint32_t disk_blk_cnt) +{ + boolean_t ret = false; + uint32_t dev_max; + uint32_t size = disk_blk_cnt * device->raid_map->volume_blk_size; + dev_max = size; + + /* filter for nvme crypto */ + if (device->is_nvme && rcb->encrypt_enable) { + if (softs->max_aio_rw_xfer_crypto_nvme != 0) { + dev_max = MIN(dev_max,softs->max_aio_rw_xfer_crypto_nvme); + } + } + + /* filter for RAID 5/6/50/60 */ + if (!device->is_physical_device && + (device->raid_level == SA_RAID_5 || + device->raid_level == SA_RAID_51 || + device->raid_level == SA_RAID_6)) { + if (softs->max_aio_write_raid5_6 != 0) { + dev_max = MIN(dev_max,softs->max_aio_write_raid5_6); + } + } + + /* filter for RAID ADM */ + if (!device->is_physical_device && + (device->raid_level == SA_RAID_ADM) && + (softs->max_aio_write_raid1_10_3drv != 0)) { + dev_max = MIN(dev_max, + softs->max_aio_write_raid1_10_3drv); + } + + /* filter for RAID 1/10 */ + if (!device->is_physical_device && + (device->raid_level == SA_RAID_1) && + (softs->max_aio_write_raid1_10_2drv != 0)) { + dev_max = MIN(dev_max, + softs->max_aio_write_raid1_10_2drv); + } + + + if (size > dev_max) { + DBG_AIO("AIO ineligible: size=%u, max=%u", size, dev_max); + ret = true; + } + + return ret; } -/* Subroutine used to set encryption info as part of RAID bypass IO*/ + +#ifdef DEBUG_RAID_MAP static inline void -pqisrc_set_enc_info(struct pqi_enc_info *enc_info, - struct raid_map *raid_map, uint64_t first_block) +pqisrc_aio_show_raid_map(pqisrc_softstate_t *softs, struct raid_map *m) { - uint32_t volume_blk_size; + int i; - /* - * Set the encryption tweak values based on logical block address. - * If the block size is 512, the tweak value is equal to the LBA. - * For other block sizes, tweak value is (LBA * block size) / 512. - */ - volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size); - if (volume_blk_size != 512) - first_block = (first_block * volume_blk_size) / 512; + if (!m) { + DBG_WARN("No RAID MAP!\n"); + return; + } + DBG_INFO("======= Raid Map ================\n"); + DBG_INFO("%-25s: 0x%x\n", "StructureSize", m->structure_size); + DBG_INFO("%-25s: 0x%x\n", "LogicalBlockSize", m->volume_blk_size); + DBG_INFO("%-25s: 0x%lx\n", "LogicalBlockCount", m->volume_blk_cnt); + DBG_INFO("%-25s: 0x%x\n", "PhysicalBlockShift", m->phys_blk_shift); + DBG_INFO("%-25s: 0x%x\n", "ParityRotationShift", + m->parity_rotation_shift); + DBG_INFO("%-25s: 0x%x\n", "StripSize", m->strip_size); + DBG_INFO("%-25s: 0x%lx\n", "DiskStartingBlock", m->disk_starting_blk); + DBG_INFO("%-25s: 0x%lx\n", "DiskBlockCount", m->disk_blk_cnt); + DBG_INFO("%-25s: 0x%x\n", "DataDisksPerRow", m->data_disks_per_row); + DBG_INFO("%-25s: 0x%x\n", "MetdataDisksPerRow", + m->metadata_disks_per_row); + DBG_INFO("%-25s: 0x%x\n", "RowCount", m->row_cnt); + DBG_INFO("%-25s: 0x%x\n", "LayoutMapCnt", m->layout_map_count); + DBG_INFO("%-25s: 0x%x\n", "fEncryption", m->flags); + DBG_INFO("%-25s: 0x%x\n", "DEK", m->data_encryption_key_index); + for (i = 0; i < RAID_MAP_MAX_ENTRIES; i++) { + if (m->dev_data[i].ioaccel_handle == 0) + break; + DBG_INFO("%-25s: %d: 0x%04x\n", "ioaccel_handle, disk", + i, m->dev_data[i].ioaccel_handle); + } +} +#endif /* DEBUG_RAID_MAP */ - enc_info->data_enc_key_index = - GET_LE16((uint8_t *)&raid_map->data_encryption_key_index); - enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16)); - enc_info->encrypt_tweak_lower = ((uint32_t)(first_block)); +static inline void +pqisrc_aio_show_locator_info(pqisrc_softstate_t *softs, + aio_req_locator_t *l, uint32_t disk_blk_cnt, rcb_t *rcb) +{ +#ifdef DEBUG_AIO_LOCATOR + pqisrc_aio_show_raid_map(softs, l->raid_map); + + DBG_INFO("======= AIO Locator Content, tag#0x%08x =====\n", rcb->tag); + DBG_INFO("%-25s: 0x%lx\n", "block.first", l->block.first); + DBG_INFO("%-25s: 0x%lx\n", "block.last", l->block.last); + DBG_INFO("%-25s: 0x%x\n", "block.cnt", l->block.cnt); + DBG_INFO("%-25s: 0x%lx\n", "block.disk_block", l->block.disk_block); + DBG_INFO("%-25s: 0x%x\n", "row.blks_per_row", l->row.blks_per_row); + DBG_INFO("%-25s: 0x%lx\n", "row.first", l->row.first); + DBG_INFO("%-25s: 0x%lx\n", "row.last", l->row.last); + DBG_INFO("%-25s: 0x%x\n", "row.offset_first", l->row.offset_first); + DBG_INFO("%-25s: 0x%x\n", "row.offset_last", l->row.offset_last); + DBG_INFO("%-25s: 0x%x\n", "row.data_disks", l->row.data_disks); + DBG_INFO("%-25s: 0x%x\n", "row.total_disks", l->row.total_disks); + DBG_INFO("%-25s: 0x%x\n", "col.first", l->col.first); + DBG_INFO("%-25s: 0x%x\n", "col.last", l->col.last); + + if (l->raid_level == SA_RAID_5 || l->raid_level == SA_RAID_6) { + DBG_INFO("%-25s: 0x%x\n", "r5or6.row.blks_per_row", + l->r5or6.row.blks_per_row); + DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.first", l->r5or6.row.first); + DBG_INFO("%-25s: 0x%lx\n", "r5or6.row.last", l->r5or6.row.last); + DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_first", + l->r5or6.row.offset_first); + DBG_INFO("%-25s: 0x%x\n", "r5or6.row.offset_last", + l->r5or6.row.offset_last); + DBG_INFO("%-25s: 0x%x\n", "r5or6.row.data_disks", + l->r5or6.row.data_disks); + DBG_INFO("%-25s: 0x%x\n", "r5or6.row.total_disks", + l->r5or6.row.total_disks); + DBG_INFO("%-25s: 0x%x\n", "r5or6.col.first", l->r5or6.col.first); + DBG_INFO("%-25s: 0x%x\n", "r5or6.col.last", l->r5or6.col.last); + } + DBG_INFO("%-25s: 0x%x\n", "map.row", l->map.row); + DBG_INFO("%-25s: 0x%x\n", "map.idx", l->map.idx); + DBG_INFO("%-25s: 0x%x\n", "map.layout_map_count", + l->map.layout_map_count); + DBG_INFO("%-25s: 0x%x\n", "group.first", l->group.first); + DBG_INFO("%-25s: 0x%x\n", "group.last", l->group.last); + DBG_INFO("%-25s: 0x%x\n", "group.cur", l->group.cur); + DBG_INFO("%-25s: %d\n", "is_write", l->is_write); + DBG_INFO("%-25s: 0x%x\n", "stripesz", l->stripesz); + DBG_INFO("%-25s: 0x%x\n", "strip_sz", l->strip_sz); + DBG_INFO("%-25s: %d\n", "offload_to_mirror", l->offload_to_mirror); + DBG_INFO("%-25s: %d\n", "raid_level", l->raid_level); + +#endif /* DEBUG_AIO_LOCATOR */ } -/* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/ -int -check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk, - uint32_t *blk_cnt) +/* build the aio cdb */ +inline void +pqisrc_aio_build_cdb(aio_req_locator_t *l, + uint32_t disk_blk_cnt, rcb_t *rcb, uint8_t *cdb) { + uint8_t cdb_length; - switch (cdb[0]) { - case SCMD_WRITE_6: - *is_write = true; - case SCMD_READ_6: - *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) | - (cdb[2] << 8) | cdb[3]); - *blk_cnt = (uint32_t)cdb[4]; - if (*blk_cnt == 0) - *blk_cnt = 256; - break; - case SCMD_WRITE_10: - *is_write = true; - case SCMD_READ_10: - *fst_blk = (uint64_t)GET_BE32(&cdb[2]); - *blk_cnt = (uint32_t)GET_BE16(&cdb[7]); - break; - case SCMD_WRITE_12: - *is_write = true; - case SCMD_READ_12: - *fst_blk = (uint64_t)GET_BE32(&cdb[2]); - *blk_cnt = GET_BE32(&cdb[6]); - break; - case SCMD_WRITE_16: - *is_write = true; - case SCMD_READ_16: - *fst_blk = GET_BE64(&cdb[2]); - *blk_cnt = GET_BE32(&cdb[10]); - break; - default: - /* Process via normal I/O path. */ - return PQI_STATUS_FAILURE; + if (l->block.disk_block > 0xffffffff) { + cdb[0] = l->is_write ? SCMD_WRITE_16 : SCMD_READ_16; + cdb[1] = 0; + PUT_BE64(l->block.disk_block, &cdb[2]); + PUT_BE32(disk_blk_cnt, &cdb[10]); + cdb[15] = 0; + cdb_length = 16; + } else { + cdb[0] = l->is_write ? SCMD_WRITE_10 : SCMD_READ_10; + cdb[1] = 0; + PUT_BE32(l->block.disk_block, &cdb[2]); + cdb[6] = 0; + PUT_BE16(disk_blk_cnt, &cdb[7]); + cdb[9] = 0; + cdb_length = 10; } - return PQI_STATUS_SUCCESS; + + rcb->cmdlen = cdb_length; + } /* print any arbitrary buffer of length total_len */ void pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf, uint32_t total_len, uint32_t flags) { #define LINE_BUF_LEN 60 #define INDEX_PER_LINE 16 uint32_t buf_consumed = 0; int ii; char line_buf[LINE_BUF_LEN]; int line_len; /* written length per line */ uint8_t this_char; if (user_buf == NULL) return; + memset(line_buf, 0, LINE_BUF_LEN); + /* Print index columns */ if (flags & PRINT_FLAG_HDR_COLUMN) { for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++) { line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii); if ((line_len + 4) >= LINE_BUF_LEN) - break; + break; } - DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf); + DBG_INFO("%15.15s:[ %s ]\n", "header", line_buf); } /* Print index columns */ while(buf_consumed < total_len) { memset(line_buf, 0, LINE_BUF_LEN); for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++) { this_char = *((char*)(user_buf) + buf_consumed); line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char); buf_consumed++; if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN) - break; + break; } - DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf); + DBG_INFO("%15.15s:[ %s ]\n", msg, line_buf); + } +} + +/* print CDB with column header */ +void +pqisrc_show_cdb(pqisrc_softstate_t *softs, char *msg, rcb_t *rcb, uint8_t *cdb) +{ + /* Print the CDB contents */ + pqisrc_print_buffer(softs, msg, cdb, rcb->cmdlen, PRINT_FLAG_HDR_COLUMN); +} + +void +pqisrc_show_rcb_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg, void *err_info) +{ + pqi_scsi_dev_t *devp; + + if (rcb == NULL || rcb->dvp == NULL) + { + DBG_ERR("Invalid rcb or dev ptr! rcb=%p\n", rcb); + return; + } + + devp = rcb->dvp; + + /* print the host and mapped CDB */ + DBG_INFO("\n"); + DBG_INFO("----- Start Dump: %s -----\n", msg); + pqisrc_print_buffer(softs, "host cdb", OS_GET_CDBP(rcb), rcb->cmdlen, PRINT_FLAG_HDR_COLUMN); + if (OS_GET_CDBP(rcb) != rcb->cdbp) + pqisrc_print_buffer(softs, "aio mapped cdb", rcb->cdbp, rcb->cmdlen, 0); + + DBG_INFO("tag=0x%x dir=%u host_timeout=%ums\n", rcb->tag, + rcb->data_dir, (uint32_t)rcb->host_timeout_ms); + + DBG_INFO("BTL: %d:%d:%d addr=0x%x\n", devp->bus, devp->target, + devp->lun, GET_LE32(devp->scsi3addr)); + + if (rcb->path == AIO_PATH) + { + DBG_INFO("handle=0x%x\n", rcb->ioaccel_handle); + DBG_INFO("row=%u blk/row=%u index=%u map_row=%u\n", + rcb->row_num, rcb->blocks_per_row, rcb->raid_map_index, rcb->raid_map_row); + + if (err_info) + pqisrc_show_aio_error_info(softs, rcb, err_info); + } + + else /* RAID path */ + { + if (err_info) + pqisrc_show_raid_error_info(softs, rcb, err_info); } + + + DBG_INFO("----- Done -----\n\n"); } /* * Function used to build and send RAID bypass request to the adapter */ int -pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, - pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb) +pqisrc_build_scsi_cmd_raidbypass(pqisrc_softstate_t *softs, + pqi_scsi_dev_t *device, rcb_t *rcb) { - struct raid_map *raid_map; - boolean_t is_write = false; - uint32_t map_idx; - uint64_t fst_blk, lst_blk; - uint32_t blk_cnt, blks_per_row; - uint64_t fst_row, lst_row; - uint32_t fst_row_offset, lst_row_offset; - uint32_t fst_col, lst_col; - uint32_t r5or6_blks_per_row; - uint64_t r5or6_fst_row, r5or6_lst_row; - uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset; - uint32_t r5or6_fst_col, r5or6_lst_col; - uint16_t data_disks_per_row, total_disks_per_row; - uint16_t layout_map_count; - uint32_t stripesz; - uint16_t strip_sz; - uint32_t fst_grp, lst_grp, cur_grp; - uint32_t map_row; - uint64_t disk_block; uint32_t disk_blk_cnt; - uint8_t cdb_length; - int offload_to_mirror; - int i; - DBG_FUNC(" IN \n"); - DBG_IO("!!!!!\n"); + struct aio_req_locator loc; + struct aio_req_locator *l = &loc; + int rc; + memset(l, 0, sizeof(*l)); - /* Check for eligible opcode, get LBA and block count. */ - memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen); + DBG_FUNC("IN\n"); - for(i = 0; i < rcb->cmdlen ; i++) - DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]); - if(check_for_scsi_opcode(cdb, &is_write, - &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE) - return PQI_STATUS_FAILURE; - /* Check for write to non-RAID-0. */ - if (is_write && device->raid_level != SA_RAID_0) + if (device == NULL) { + DBG_INFO("device is NULL\n"); return PQI_STATUS_FAILURE; - - if(blk_cnt == 0) + } + if (device->raid_map == NULL) { + DBG_INFO("tag=0x%x BTL: %d:%d:%d Raid map is NULL\n", + rcb->tag, device->bus, device->target, device->lun); return PQI_STATUS_FAILURE; + } - lst_blk = fst_blk + blk_cnt - 1; - raid_map = device->raid_map; - - /* Check for invalid block or wraparound. */ - if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) || - lst_blk < fst_blk) + /* Check for eligible op, get LBA and block count. */ + rc = fill_lba_for_scsi_rw(softs, OS_GET_CDBP(rcb), l); + if (rc == PQI_STATUS_FAILURE) return PQI_STATUS_FAILURE; - data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row); - strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size)); - layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count)); - - /* Calculate stripe information for the request. */ - blks_per_row = data_disks_per_row * strip_sz; - if (!blks_per_row) - return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/ - - /* use __udivdi3 ? */ - fst_row = fst_blk / blks_per_row; - lst_row = lst_blk / blks_per_row; - fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row)); - lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row)); - fst_col = fst_row_offset / strip_sz; - lst_col = lst_row_offset / strip_sz; - - /* If this isn't a single row/column then give to the controller. */ - if (fst_row != lst_row || fst_col != lst_col) + if (l->is_write && !pqisrc_is_supported_write(softs, device)) return PQI_STATUS_FAILURE; - /* Proceeding with driver mapping. */ - total_disks_per_row = data_disks_per_row + - GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row)); - map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % - GET_LE16((uint8_t *)(&raid_map->row_cnt)); - map_idx = (map_row * total_disks_per_row) + fst_col; - - /* RAID 1 */ - if (device->raid_level == SA_RAID_1) { - if (device->offload_to_mirror) - map_idx += data_disks_per_row; - device->offload_to_mirror = !device->offload_to_mirror; - } else if (device->raid_level == SA_RAID_ADM) { - /* RAID ADM */ - /* - * Handles N-way mirrors (R1-ADM) and R10 with # of drives - * divisible by 3. - */ - offload_to_mirror = device->offload_to_mirror; - if (offload_to_mirror == 0) { - /* use physical disk in the first mirrored group. */ - map_idx %= data_disks_per_row; - } else { - do { - /* - * Determine mirror group that map_idx - * indicates. - */ - cur_grp = map_idx / data_disks_per_row; - - if (offload_to_mirror != cur_grp) { - if (cur_grp < - layout_map_count - 1) { - /* - * Select raid index from - * next group. - */ - map_idx += data_disks_per_row; - cur_grp++; - } else { - /* - * Select raid index from first - * group. - */ - map_idx %= data_disks_per_row; - cur_grp = 0; - } - } - } while (offload_to_mirror != cur_grp); - } - - /* Set mirror group to use next time. */ - offload_to_mirror = - (offload_to_mirror >= layout_map_count - 1) ? - 0 : offload_to_mirror + 1; - if(offload_to_mirror >= layout_map_count) - return PQI_STATUS_FAILURE; + l->raid_map = device->raid_map; + l->block.last = l->block.first + l->block.cnt - 1; + l->raid_level = device->raid_level; - device->offload_to_mirror = offload_to_mirror; - /* - * Avoid direct use of device->offload_to_mirror within this - * function since multiple threads might simultaneously - * increment it beyond the range of device->layout_map_count -1. - */ - } else if ((device->raid_level == SA_RAID_5 || - device->raid_level == SA_RAID_6) && layout_map_count > 1) { - /* RAID 50/60 */ - /* Verify first and last block are in same RAID group */ - r5or6_blks_per_row = strip_sz * data_disks_per_row; - stripesz = r5or6_blks_per_row * layout_map_count; + if (pqisrc_is_invalid_block(softs, l)) + return PQI_STATUS_FAILURE; - fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row; - lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row; + if (!pqisrc_calc_disk_params(softs, l, rcb)) + return PQI_STATUS_FAILURE; - if (fst_grp != lst_grp) - return PQI_STATUS_FAILURE; + if (!pqisrc_is_single_row_column(softs, l)) + return PQI_STATUS_FAILURE; - /* Verify request is in a single row of RAID 5/6 */ - fst_row = r5or6_fst_row = - fst_blk / stripesz; - r5or6_lst_row = lst_blk / stripesz; + if (!pqisrc_set_map_row_and_idx(softs, l, rcb)) + return PQI_STATUS_FAILURE; - if (r5or6_fst_row != r5or6_lst_row) - return PQI_STATUS_FAILURE; + /* Proceeding with driver mapping. */ - /* Verify request is in a single column */ - fst_row_offset = r5or6_fst_row_offset = - (uint32_t)((fst_blk % stripesz) % - r5or6_blks_per_row); - r5or6_lst_row_offset = - (uint32_t)((lst_blk % stripesz) % - r5or6_blks_per_row); + switch (device->raid_level) { + case SA_RAID_1: + case SA_RAID_ADM: + if (l->is_write) { + if (!pqisrc_set_write_mirrors(softs, device, l, rcb)) + return PQI_STATUS_FAILURE; + } else + pqisrc_set_read_mirror(softs, device, l); + break; + case SA_RAID_5: + case SA_RAID_6: + if (l->map.layout_map_count > 1 || l->is_write) { - fst_col = r5or6_fst_row_offset / strip_sz; - r5or6_fst_col = fst_col; - r5or6_lst_col = r5or6_lst_row_offset / strip_sz; + if (!pqisrc_is_r5or6_single_group(softs, l)) + return PQI_STATUS_FAILURE; - if (r5or6_fst_col != r5or6_lst_col) - return PQI_STATUS_FAILURE; + if (!pqisrc_is_r5or6_single_row(softs, l)) + return PQI_STATUS_FAILURE; - /* Request is eligible */ - map_row = - ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) % - GET_LE16((uint8_t *)(&raid_map->row_cnt)); + if (!pqisrc_is_r5or6_single_column(softs, l)) + return PQI_STATUS_FAILURE; - map_idx = (fst_grp * - (GET_LE16((uint8_t *)(&raid_map->row_cnt)) * - total_disks_per_row)) + - (map_row * total_disks_per_row) + fst_col; + pqisrc_set_r5or6_row_and_index(l, rcb); + } + break; } - rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle; - disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) + - fst_row * strip_sz + - (fst_row_offset - fst_col * strip_sz); - disk_blk_cnt = blk_cnt; - - /* Handle differing logical/physical block sizes. */ - if (raid_map->phys_blk_shift) { - disk_block <<= raid_map->phys_blk_shift; - disk_blk_cnt <<= raid_map->phys_blk_shift; + if (l->map.idx >= RAID_MAP_MAX_ENTRIES) { + DBG_INFO("AIO ineligible: index exceeds max map entries"); + return PQI_STATUS_FAILURE; } - if (disk_blk_cnt > 0xffff) + rcb->ioaccel_handle = + l->raid_map->dev_data[l->map.idx].ioaccel_handle; + + if (!pqisrc_calc_aio_block(l)) return PQI_STATUS_FAILURE; - /* Build the new CDB for the physical disk I/O. */ - if (disk_block > 0xffffffff) { - cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16; - cdb[1] = 0; - PUT_BE64(disk_block, &cdb[2]); - PUT_BE32(disk_blk_cnt, &cdb[10]); - cdb[14] = 0; - cdb[15] = 0; - cdb_length = 16; - } else { - cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10; - cdb[1] = 0; - PUT_BE32(disk_block, &cdb[2]); - cdb[6] = 0; - PUT_BE16(disk_blk_cnt, &cdb[7]); - cdb[9] = 0; - cdb_length = 10; - } + disk_blk_cnt = pqisrc_handle_blk_size_diffs(l); + - if (GET_LE16((uint8_t *)(&raid_map->flags)) & + /* Set encryption flag if needed. */ + rcb->encrypt_enable = false; + if (GET_LE16((uint8_t *)(&l->raid_map->flags)) & RAID_MAP_ENCRYPTION_ENABLED) { - pqisrc_set_enc_info(&rcb->enc_info, raid_map, - fst_blk); + pqisrc_set_enc_info(&rcb->enc_info, l->raid_map, + l->block.first); rcb->encrypt_enable = true; - } else { - rcb->encrypt_enable = false; } - rcb->cmdlen = cdb_length; + if (pqisrc_aio_req_too_big(softs, device, rcb, l, disk_blk_cnt)) + return PQI_STATUS_FAILURE; + + /* set the cdb ptr to the local bypass cdb */ + rcb->cdbp = &rcb->bypass_cdb[0]; + + /* Build the new CDB for the physical disk I/O. */ + pqisrc_aio_build_cdb(l, disk_blk_cnt, rcb, rcb->cdbp); + pqisrc_aio_show_locator_info(softs, l, disk_blk_cnt, rcb); - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); return PQI_STATUS_SUCCESS; } /* Function used to submit an AIO TMF to the adapter * DEVICE_RESET is not supported. */ + static int pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int rval = PQI_STATUS_SUCCESS; pqi_aio_tmf_req_t tmf_req; ib_queue_t *op_ib_q = NULL; + boolean_t is_write; memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t)); - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT; tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); tmf_req.req_id = rcb->tag; tmf_req.error_idx = rcb->tag; tmf_req.nexus = devp->ioaccel_handle; - //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); + /* memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); */ tmf_req.tmf = tmf_type; tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); op_ib_q = &softs->op_aio_ib_q[0]; + is_write = pqisrc_cdb_is_write(rcb->cdbp); + + uint64_t lun = rcb->cm_ccb->ccb_h.target_lun; + if (lun && (rcb->dvp->is_multi_lun)) { + int_to_scsilun(lun, tmf_req.lun); + } + else { + memset(tmf_req.lun, 0, sizeof(tmf_req.lun)); + } if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { tmf_req.req_id_to_manage = rcb_to_manage->tag; tmf_req.nexus = rcb_to_manage->ioaccel_handle; } - DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage); - DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id); + if (devp->raid_level == SA_RAID_1 || + devp->raid_level == SA_RAID_5 || + devp->raid_level == SA_RAID_6) { + if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK && is_write) + tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_BYPASS_TASK_MGMT; + } DBG_WARN("aio tmf: iu_type=0x%x req_id_to_manage=0x%x\n", tmf_req.header.iu_type, tmf_req.req_id_to_manage); - DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%d\n", + DBG_WARN("aio tmf: req_id=0x%x nexus=0x%x tmf=0x%x QID=%u\n", tmf_req.req_id, tmf_req.nexus, tmf_req.tmf, op_ib_q->q_id); rcb->path = AIO_PATH; rcb->req_pending = true; /* Timedout tmf response goes here */ rcb->error_cmp_callback = pqisrc_process_aio_response_error; rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command rval=%d\n", rval); return rval; } rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); if (rval != PQI_STATUS_SUCCESS){ DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); rcb->status = rval; } - if (rcb->status != REQUEST_SUCCESS) { + if (rcb->status != PQI_STATUS_SUCCESS) { DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d " "stat:0x%x\n", tmf_type, rcb->status); rval = PQI_STATUS_FAILURE; } - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); return rval; } /* Function used to submit a Raid TMF to the adapter */ static int pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int rval = PQI_STATUS_SUCCESS; pqi_raid_tmf_req_t tmf_req; ib_queue_t *op_ib_q = NULL; memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t)); - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT; tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t); tmf_req.req_id = rcb->tag; memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun)); + tmf_req.ml_device_lun_number = (uint8_t)rcb->cm_ccb->ccb_h.target_lun; + tmf_req.tmf = tmf_type; tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb); /* Decide the queue where the tmf request should be submitted */ if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid; tmf_req.req_id_to_manage = rcb_to_manage->tag; } if (softs->timeout_in_tmf && tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) { - /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */ + /* OS_TMF_TIMEOUT_SEC - 1 to accomodate driver processing */ tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1; /* if OS tmf timeout is 0, set minimum value for timeout */ if (!tmf_req.timeout_in_sec) tmf_req.timeout_in_sec = 1; } op_ib_q = &softs->op_raid_ib_q[0]; + + DBG_WARN("raid tmf: iu_type=0x%x req_id_to_manage=%d\n", + tmf_req.header.iu_type, tmf_req.req_id_to_manage); + rcb->path = RAID_PATH; rcb->req_pending = true; /* Timedout tmf response goes here */ rcb->error_cmp_callback = pqisrc_process_raid_response_error; rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Unable to submit command rval=%d\n", rval); return rval; } rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT); if (rval != PQI_STATUS_SUCCESS) { DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type); rcb->status = rval; } - if (rcb->status != REQUEST_SUCCESS) { + if (rcb->status != PQI_STATUS_SUCCESS) { DBG_NOTE("Task Management failed tmf_type:%d " "stat:0x%x\n", tmf_type, rcb->status); rval = PQI_STATUS_FAILURE; } - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); return rval; } +void +dump_tmf_details(pqisrc_softstate_t *softs, rcb_t *rcb, char *msg) +{ + uint32_t qid = rcb->req_q ? rcb->req_q->q_id : -1; + + DBG_INFO("%s: pending=%d path=%d tag=0x%x=%u qid=%u timeout=%ums\n", + msg, rcb->req_pending, rcb->path, rcb->tag, + rcb->tag, qid, (uint32_t)rcb->host_timeout_ms); +} + int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp, rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type) { int ret = PQI_STATUS_SUCCESS; - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); + + DBG_WARN("sending TMF. io outstanding=%u\n", + softs->max_outstanding_io - softs->taglist.num_elem); + + rcb->is_abort_cmd_from_host = true; rcb->softs = softs; + /* No target rcb for general purpose TMFs like LUN RESET */ + if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) + { + rcb_to_manage->host_wants_to_abort_this = true; + dump_tmf_details(softs, rcb_to_manage, "rcb_to_manage"); + } + + + dump_tmf_details(softs, rcb, "rcb"); + if(!devp->is_physical_device) { if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) { if(rcb_to_manage->path == AIO_PATH) { if(devp->offload_enabled) ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } else { DBG_INFO("TASK ABORT not supported in raid\n"); ret = PQI_STATUS_FAILURE; } } else { ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } } else { if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); else ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type); } - DBG_FUNC("IN"); + DBG_FUNC("OUT\n"); return ret; } -/* - * Function used to build and send the vendor general request - * Used for configuring PQI feature bits between firmware and driver - */ -int -pqisrc_build_send_vendor_request( - pqisrc_softstate_t *softs, - pqi_vendor_general_request_t *request, - raid_path_error_info_elem_t *error_info) +/* return index into the global (softs) counters based on raid level */ +static counter_types_t +get_counter_index(rcb_t *rcb) { - int ret = PQI_STATUS_SUCCESS; - ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; - ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; + if (IS_AIO_PATH(rcb->dvp)) + return HBA_COUNTER; + + switch (rcb->dvp->raid_level) { + case SA_RAID_0: return RAID0_COUNTER; + case SA_RAID_1: + case SA_RAID_ADM: return RAID1_COUNTER; + case SA_RAID_5: return RAID5_COUNTER; + case SA_RAID_6: return RAID6_COUNTER; + case SA_RAID_UNKNOWN: + default: + { + static boolean_t asserted = false; + if (!asserted) + { + asserted = true; + ASSERT(rcb->path == RAID_PATH); + ASSERT(0); + } + return UNKNOWN_COUNTER; + } + } +} - rcb_t *rcb = NULL; +/* return the counter type as ASCII-string */ +static char * +counter_type_to_raid_ascii(counter_types_t type) +{ + switch (type) + { + case UNKNOWN_COUNTER: return "Unknown"; + case HBA_COUNTER: return "HbaPath"; + case RAID0_COUNTER: return "Raid0"; + case RAID1_COUNTER: return "Raid1"; + case RAID5_COUNTER: return "Raid5"; + case RAID6_COUNTER: return "Raid6"; + default: return "Unsupported"; + } +} - uint16_t request_id = 0; +/* return the path as ASCII-string */ +char * +io_path_to_ascii(IO_PATH_T path) +{ + switch (path) + { + case AIO_PATH: return "Aio"; + case RAID_PATH: return "Raid"; + default: return "Unknown"; + } +} - /* Get the tag */ - request_id = pqisrc_get_tag(&softs->taglist); - if (INVALID_ELEM == request_id) { - DBG_ERR("Tag not available\n"); - ret = PQI_STATUS_FAILURE; - goto err_notag; +/* return the io type as ASCII-string */ +static char * +io_type_to_ascii(io_type_t io_type) +{ + switch (io_type) + { + case UNKNOWN_IO_TYPE: return "Unknown"; + case READ_IO_TYPE: return "Read"; + case WRITE_IO_TYPE: return "Write"; + case NON_RW_IO_TYPE: return "NonRW"; + default: return "Unsupported"; } +} + - ((pqi_vendor_general_request_t *)request)->request_id = request_id; - ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id; +/* return the io type based on cdb */ +io_type_t +get_io_type_from_cdb(uint8_t *cdb) +{ + if (cdb == NULL) + return UNKNOWN_IO_TYPE; - rcb = &softs->rcb[request_id]; + else if (pqisrc_cdb_is_read(cdb)) + return READ_IO_TYPE; - rcb->req_pending = true; - rcb->tag = request_id; + else if (pqisrc_cdb_is_write(cdb)) + return WRITE_IO_TYPE; - ret = pqisrc_submit_cmnd(softs, op_ib_q, request); + return NON_RW_IO_TYPE; +} - if (ret != PQI_STATUS_SUCCESS) { - DBG_ERR("Unable to submit command\n"); - goto err_out; - } +/* increment this counter based on path and read/write */ +OS_ATOMIC64_T +increment_this_counter(io_counters_t *pcounter, IO_PATH_T path, io_type_t io_type) +{ + OS_ATOMIC64_T ret_val; - ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); - if (ret != PQI_STATUS_SUCCESS) { - DBG_ERR("Management request timed out!\n"); - goto err_out; + if (path == AIO_PATH) + { + if (io_type == READ_IO_TYPE) + ret_val = OS_ATOMIC64_INC(&pcounter->aio_read_cnt); + else if (io_type == WRITE_IO_TYPE) + ret_val = OS_ATOMIC64_INC(&pcounter->aio_write_cnt); + else + ret_val = OS_ATOMIC64_INC(&pcounter->aio_non_read_write); + } + else + { + if (io_type == READ_IO_TYPE) + ret_val = OS_ATOMIC64_INC(&pcounter->raid_read_cnt); + else if (io_type == WRITE_IO_TYPE) + ret_val = OS_ATOMIC64_INC(&pcounter->raid_write_cnt); + else + ret_val = OS_ATOMIC64_INC(&pcounter->raid_non_read_write); } - ret = rcb->status; - if (ret) { - ret = PQI_STATUS_FAILURE; - if(error_info) { - // TODO: config table err handling. - } - } else { - if(error_info) { - ret = PQI_STATUS_SUCCESS; - memset(error_info, 0, sizeof(*error_info)); - } + return ret_val; +} + +/* increment appropriate counter(s) anytime we post a new request */ +static void +pqisrc_increment_io_counters(pqisrc_softstate_t *softs, rcb_t *rcb) +{ + io_type_t io_type = get_io_type_from_cdb(rcb->cdbp); + counter_types_t type_index = get_counter_index(rcb); + io_counters_t *pcounter = &softs->counters[type_index]; + OS_ATOMIC64_T ret_val; + + ret_val = increment_this_counter(pcounter, rcb->path, io_type); + +#if 1 /* leave this enabled while we gain confidence for each io path */ + if (ret_val == 1) + { + char *raid_type = counter_type_to_raid_ascii(type_index); + char *path = io_path_to_ascii(rcb->path); + char *io_ascii = io_type_to_ascii(io_type); + + DBG_INFO("Got first path/type hit. " + "Path=%s RaidType=%s IoType=%s\n", + path, raid_type, io_ascii); } +#endif - os_reset_rcb(rcb); - pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); - DBG_FUNC("OUT\n"); - return ret; + /* @todo future: may want to make a per-dev counter */ +} -err_out: - DBG_ERR("Vender general request submission failed.\n"); - os_reset_rcb(rcb); - pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id); -err_notag: - DBG_FUNC("FAILED \n"); - return ret; +/* public routine to print a particular counter with header msg */ +void +print_this_counter(pqisrc_softstate_t *softs, io_counters_t *pcounter, char *msg) +{ + io_counters_t counter; + uint32_t percent_reads; + uint32_t percent_aio; + + if (!softs->log_io_counters) + return; + + /* Use a cached copy so percentages are based on the data that is printed */ + memcpy(&counter, pcounter, sizeof(counter)); + + DBG_NOTE("Counter: %s (ptr=%p)\n", msg, pcounter); + + percent_reads = CALC_PERCENT_VS(counter.aio_read_cnt + counter.raid_read_cnt, + counter.aio_write_cnt + counter.raid_write_cnt); + + percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt + counter.aio_write_cnt, + counter.raid_read_cnt + counter.raid_write_cnt); + + DBG_NOTE(" R/W Percentages: Reads=%3u%% AIO=%3u%%\n", percent_reads, percent_aio); + + /* Print the Read counts */ + percent_aio = CALC_PERCENT_VS(counter.aio_read_cnt, counter.raid_read_cnt); + DBG_NOTE(" Reads : AIO=%8u(%3u%%) RAID=%8u\n", + (uint32_t)counter.aio_read_cnt, percent_aio, (uint32_t)counter.raid_read_cnt); + + /* Print the Write counts */ + percent_aio = CALC_PERCENT_VS(counter.aio_write_cnt, counter.raid_write_cnt); + DBG_NOTE(" Writes: AIO=%8u(%3u%%) RAID=%8u\n", + (uint32_t)counter.aio_write_cnt, percent_aio, (uint32_t)counter.raid_write_cnt); + + /* Print the Non-Rw counts */ + percent_aio = CALC_PERCENT_VS(counter.aio_non_read_write, counter.raid_non_read_write); + DBG_NOTE(" Non-RW: AIO=%8u(%3u%%) RAID=%8u\n", + (uint32_t)counter.aio_non_read_write, percent_aio, (uint32_t)counter.raid_non_read_write); } -/* return the path as ASCII-string */ -char * -io_path_to_ascii(IO_PATH_T path) +/* return true if buffer is all zeroes */ +boolean_t +is_buffer_zero(void *buffer, uint32_t size) { - switch (path) + char *buf = buffer; + DWORD ii; + + if (buffer == NULL || size == 0) + return false; + + for (ii = 0; ii < size; ii++) { - case AIO_PATH: return "Aio"; - case RAID_PATH: return "Raid"; - default: return "Unknown"; + if (buf[ii] != 0x00) + return false; + } + return true; +} + +/* public routine to print a all global counter types */ +void +print_all_counters(pqisrc_softstate_t *softs, uint32_t flags) +{ + int ii; + io_counters_t *pcounter; + char *raid_type; + + for (ii = 0; ii < MAX_IO_COUNTER; ii++) + { + pcounter = &softs->counters[ii]; + raid_type = counter_type_to_raid_ascii(ii); + + if ((flags & COUNTER_FLAG_ONLY_NON_ZERO) && + is_buffer_zero(pcounter, sizeof(*pcounter))) + { + continue; + } + + print_this_counter(softs, pcounter, raid_type); + } + + if (flags & COUNTER_FLAG_CLEAR_COUNTS) + { + DBG_NOTE("Clearing all counters\n"); + memset(softs->counters, 0, sizeof(softs->counters)); } } diff --git a/sys/dev/smartpqi/smartpqi_response.c b/sys/dev/smartpqi/smartpqi_response.c index 3e53506889d8..1b4f0d86095d 100644 --- a/sys/dev/smartpqi/smartpqi_response.c +++ b/sys/dev/smartpqi/smartpqi_response.c @@ -1,350 +1,522 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" /* * Process internal RAID response in the case of success. */ void pqisrc_process_internal_raid_response_success(pqisrc_softstate_t *softs,rcb_t *rcb) { - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; rcb->req_pending = false; - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); +} + +/* Safely determines if cdb is available and if so, will return SCSI opcode or + BMIC cmd if BMIC op code is detected */ +uint8_t +pqisrc_get_cmd_from_rcb(rcb_t *rcb) +{ + uint8_t opcode = 0xFF; + + if (rcb && rcb->cdbp) + { + opcode = rcb->cdbp[0]; + if (IS_BMIC_OPCODE(opcode)) + return rcb->cdbp[6]; + } + + return opcode; } /* * Process internal RAID response in the case of failure. */ void pqisrc_process_internal_raid_response_error(pqisrc_softstate_t *softs, rcb_t *rcb, uint16_t err_idx) { raid_path_error_info_elem_t error_info; - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); rcb->error_info = (char *) (softs->err_buf_dma_mem.virt_addr) + (err_idx * PQI_ERROR_BUFFER_ELEMENT_LENGTH); memcpy(&error_info, rcb->error_info, sizeof(error_info)); - DBG_INFO("error_status 0x%x data_in_result 0x%x data_out_result 0x%x\n", - error_info.status, error_info.data_in_result, error_info.data_out_result); - - rcb->status = REQUEST_FAILED; + rcb->status = PQI_STATUS_TIMEOUT; switch (error_info.data_out_result) { case PQI_RAID_DATA_IN_OUT_GOOD: if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD) - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; break; case PQI_RAID_DATA_IN_OUT_UNDERFLOW: if (error_info.status == PQI_RAID_DATA_IN_OUT_GOOD || error_info.status == PQI_RAID_STATUS_CHECK_CONDITION) - rcb->status = REQUEST_SUCCESS; + rcb->status = PQI_STATUS_SUCCESS; break; + default: + DBG_WARN("error_status 0x%x data_in_result 0x%x data_out_result 0x%x cmd rcb tag 0x%x\n", + error_info.status, error_info.data_in_result, error_info.data_out_result, rcb->tag); + } + + if (rcb->status != PQI_STATUS_SUCCESS) + { + DBG_INFO("error_status=0x%x data_in=0x%x data_out=0x%x detail=0x%x\n", + error_info.status, error_info.data_in_result, error_info.data_out_result, + pqisrc_get_cmd_from_rcb(rcb)); } rcb->req_pending = false; - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); } /* * Process the AIO/RAID IO in the case of success. */ void pqisrc_process_io_response_success(pqisrc_softstate_t *softs, rcb_t *rcb) { - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); os_io_response_success(rcb); - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); } static void pqisrc_extract_sense_data(sense_data_u_t *sense_data, uint8_t *key, uint8_t *asc, uint8_t *ascq) { if (sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_70 || sense_data->fixed_format.response_code == SCSI_SENSE_RESPONSE_71) { sense_data_fixed_t *fixed = &sense_data->fixed_format; *key = fixed->sense_key; *asc = fixed->sense_code; *ascq = fixed->sense_qual; } else if (sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_72 || sense_data->descriptor_format.response_code == SCSI_SENSE_RESPONSE_73) { sense_data_descriptor_t *desc = &sense_data->descriptor_format; *key = desc->sense_key; *asc = desc->sense_code; *ascq = desc->sense_qual; } else { *key = 0xFF; *asc = 0xFF; *ascq = 0xFF; } } +/* Suppress common errors unless verbose debug flag is on */ +boolean_t +suppress_innocuous_error_prints(pqisrc_softstate_t *softs, rcb_t *rcb) +{ + uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF; + + if ((opcode == SCSI_INQUIRY || /* 0x12 */ + opcode == SCSI_MODE_SENSE || /* 0x1a */ + opcode == SCSI_REPORT_LUNS || /* 0xa0 */ + opcode == SCSI_LOG_SENSE || /* 0x4d */ + opcode == SCSI_ATA_PASSTHRU16) /* 0x85 */ + && (softs->err_resp_verbose == false)) + return true; + + return false; +} + static void pqisrc_show_sense_data_simple(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data) { uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF; char *path = io_path_to_ascii(rcb->path); uint8_t key, asc, ascq; pqisrc_extract_sense_data(sense_data, &key, &asc, &ascq); DBG_NOTE("[ERR INFO] BTL: %d:%d:%d op=0x%x path=%s K:C:Q: %x:%x:%x\n", rcb->dvp->bus, rcb->dvp->target, rcb->dvp->lun, opcode, path, key, asc, ascq); } void pqisrc_show_sense_data_full(pqisrc_softstate_t *softs, rcb_t *rcb, sense_data_u_t *sense_data) { + if (suppress_innocuous_error_prints(softs, rcb)) + return; + pqisrc_print_buffer(softs, "sense data", sense_data, 32, 0); pqisrc_show_sense_data_simple(softs, rcb, sense_data); /* add more detail here as needed */ } +/* dumps the aio error info and sense data then breaks down the output */ +void +pqisrc_show_aio_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, aio_path_error_info_elem_t *aio_err) +{ + DBG_NOTE("\n"); + DBG_NOTE("aio err: status=0x%x serv_resp=0x%x data_pres=0x%x data_len=0x%x\n", + aio_err->status, aio_err->service_resp, aio_err->data_pres, aio_err->data_len); + + pqisrc_print_buffer(softs, "aio err info", aio_err, + offsetof(aio_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN); + + pqisrc_show_sense_data_full(softs, rcb, &aio_err->sense_data); +} + + +/* dumps the raid error info and sense data then breaks down the output */ +void +pqisrc_show_raid_error_info(pqisrc_softstate_t *softs, rcb_t *rcb, raid_path_error_info_elem_t *raid_err) +{ + DBG_NOTE("\n"); + DBG_NOTE("raid err: data_in=0x%x out=0x%x status=0x%x sense_len=0x%x resp_len=0x%x\n", + raid_err->data_in_result, raid_err->data_in_result, + raid_err->status, raid_err->sense_data_len, raid_err->resp_data_len); + + pqisrc_print_buffer(softs, "raid err info", raid_err, + offsetof(raid_path_error_info_elem_t, data), PRINT_FLAG_HDR_COLUMN); + + pqisrc_show_sense_data_full(softs, rcb, &raid_err->sense_data); +} + +/* return true if this an innocuous error */ +boolean_t +pqisrc_is_innocuous_error(pqisrc_softstate_t *softs, rcb_t *rcb, void *err_info) +{ + uint8_t opcode = rcb->cdbp ? rcb->cdbp[0] : 0xFF; + + /* These SCSI cmds are frequently cause "underrun" and other minor "error" + conditions while determining log page length, support, etc. */ + if (opcode != SCSI_INQUIRY && /* 0x12 */ + opcode != SCSI_MODE_SENSE && /* 0x1a */ + opcode != SCSI_REPORT_LUNS && /* 0xa0 */ + opcode != SCSI_LOG_SENSE && /* 0x4d */ + opcode != SCSI_ATA_PASSTHRU16) /* 0x85 */ + { + return false; + } + + /* treat all cmds above as innocuous unless verbose flag is set. */ + if (softs->err_resp_verbose == false) + return true; + + if (rcb->path == AIO_PATH) + { + aio_path_error_info_elem_t *aio_err = err_info; + uint8_t key, asc, ascq; + + /* Byte[0]=Status=0x51, Byte[1]=service_resp=0x01 */ + if (aio_err->status == PQI_AIO_STATUS_UNDERRUN && + aio_err->service_resp == PQI_AIO_SERV_RESPONSE_FAILURE) + { + return true; + } + + /* get the key info so we can apply more filters... */ + pqisrc_extract_sense_data(&aio_err->sense_data, &key, &asc, &ascq); + + /* Seeing a lot of invalid field in CDB for REPORT LUNs on AIO path. + Example CDB = a0 00 11 00 00 00 00 00 20 08 00 00 + So filter out the full dump info for now. Also wonder if we should + just send REPORT LUNS to raid path? */ + if (opcode == SCSI_REPORT_LUNS && + key == 5 && asc == 0x24) + { + pqisrc_show_sense_data_simple(softs, rcb, &aio_err->sense_data); + return true; + } + + /* may want to return true here eventually? */ + } + else + { + raid_path_error_info_elem_t *raid_err = err_info; + + /* Byte[1]=data_out=0x01 */ + if (raid_err->data_out_result == PQI_RAID_DATA_IN_OUT_UNDERFLOW) + return true; + + /* We get these a alot: leave a tiny breadcrumb about the error, + but don't do full spew about it */ + if (raid_err->status == PQI_AIO_STATUS_CHECK_CONDITION) + { + pqisrc_show_sense_data_simple(softs, rcb, &raid_err->sense_data); + return true; + } + } + + return false; +} + /* * Process the error info for AIO in the case of failure. */ void pqisrc_process_aio_response_error(pqisrc_softstate_t *softs, rcb_t *rcb, uint16_t err_idx) { aio_path_error_info_elem_t *err_info = NULL; - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); + + ASSERT(rcb->path == AIO_PATH); err_info = (aio_path_error_info_elem_t*) softs->err_buf_dma_mem.virt_addr + err_idx; if(err_info == NULL) { - DBG_ERR("err_info structure is NULL err_idx :%x", err_idx); + DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx); return; } + /* filter out certain underrun/success "errors" from printing */ + if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) { + + if (softs->err_resp_verbose == true) + pqisrc_show_rcb_details(softs, rcb, + "aio error", err_info); + } + os_aio_response_error(rcb, err_info); - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); } /* * Process the error info for RAID IO in the case of failure. */ void pqisrc_process_raid_response_error(pqisrc_softstate_t *softs, rcb_t *rcb, uint16_t err_idx) { raid_path_error_info_elem_t *err_info = NULL; - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); + + ASSERT(rcb->path == RAID_PATH); err_info = (raid_path_error_info_elem_t*) softs->err_buf_dma_mem.virt_addr + err_idx; if(err_info == NULL) { - DBG_ERR("err_info structure is NULL err_idx :%x", err_idx); + DBG_ERR("err_info structure is NULL err_idx :%x\n", err_idx); return; } + /* filter out certain underrun/success "errors" from printing */ + if (!pqisrc_is_innocuous_error(softs, rcb, err_info)) { + + if( softs->err_resp_verbose == true ) + pqisrc_show_rcb_details(softs, rcb, + "raid error", err_info); + + } + os_raid_response_error(rcb, err_info); - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); } /* * Process the Task Management function response. */ int pqisrc_process_task_management_response(pqisrc_softstate_t *softs, pqi_tmf_resp_t *tmf_resp) { - int ret = REQUEST_SUCCESS; + int ret = PQI_STATUS_SUCCESS; uint32_t tag = (uint32_t)tmf_resp->req_id; rcb_t *rcb = &softs->rcb[tag]; ASSERT(rcb->tag == tag); DBG_FUNC("IN\n"); switch (tmf_resp->resp_code) { case SOP_TASK_MANAGEMENT_FUNCTION_COMPLETE: case SOP_TASK_MANAGEMENT_FUNCTION_SUCCEEDED: - ret = REQUEST_SUCCESS; + ret = PQI_STATUS_SUCCESS; break; default: - DBG_WARN("TMF Failed, Response code : 0x%x\n", tmf_resp->resp_code); - ret = REQUEST_FAILED; + DBG_ERR("Tag #0x%08x TMF Failed, Response code : 0x%x\n", + rcb->tag, tmf_resp->resp_code); + ret = PQI_STATUS_TIMEOUT; break; } rcb->status = ret; rcb->req_pending = false; - DBG_FUNC("OUT"); + DBG_FUNC("OUT\n"); return ret; } static int pqisrc_process_vendor_general_response(pqi_vendor_general_response_t *response) { - int ret = REQUEST_SUCCESS; + int ret = PQI_STATUS_SUCCESS; switch(response->status) { case PQI_VENDOR_RESPONSE_IU_SUCCESS: break; case PQI_VENDOR_RESPONSE_IU_UNSUCCESS: case PQI_VENDOR_RESPONSE_IU_INVALID_PARAM: case PQI_VENDOR_RESPONSE_IU_INSUFF_RESRC: - ret = REQUEST_FAILED; + ret = PQI_STATUS_TIMEOUT; break; } return ret; } /* * Function used to process the response from the adapter * which is invoked by IRQ handler. */ void pqisrc_process_response_queue(pqisrc_softstate_t *softs, int oq_id) { ob_queue_t *ob_q; struct pqi_io_response *response; uint32_t oq_pi, oq_ci; - pqi_scsi_dev_t *dvp = NULL; + pqi_scsi_dev_t *dvp = NULL; + - DBG_FUNC("IN"); + DBG_FUNC("IN\n"); ob_q = &softs->op_ob_q[oq_id - 1]; /* zero for event Q */ oq_ci = ob_q->ci_local; oq_pi = *(ob_q->pi_virt_addr); - DBG_INFO("ci : %d pi : %d qid : %d\n", oq_ci, oq_pi, ob_q->q_id); + DBG_IO("ci : %u pi : %u qid : %u\n", oq_ci, oq_pi, ob_q->q_id); while (1) { + boolean_t os_scsi_cmd = false; rcb_t *rcb = NULL; uint32_t tag = 0; uint32_t offset; - boolean_t os_scsi_cmd = false; if (oq_pi == oq_ci) break; /* Get the response */ offset = oq_ci * ob_q->elem_size; response = (struct pqi_io_response *)(ob_q->array_virt_addr + offset); tag = response->request_id; rcb = &softs->rcb[tag]; /* Make sure we are processing a valid response. */ if ((rcb->tag != tag) || (rcb->req_pending == false)) { - DBG_ERR("No such request pending with tag : %x", tag); + DBG_ERR("No such request pending with tag : %x rcb->tag : %x", tag, rcb->tag); oq_ci = (oq_ci + 1) % ob_q->num_elem; break; } /* Timedout request has been completed. This should not hit, * if timeout is set as TIMEOUT_INFINITE while calling * pqisrc_wait_on_condition(softs,rcb,timeout). */ if (rcb->timedout) { - DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag %d\n", tag); + DBG_WARN("timed out request completing from firmware, driver already completed it with failure , free the tag 0x%x\n", tag); oq_ci = (oq_ci + 1) % ob_q->num_elem; os_reset_rcb(rcb); pqisrc_put_tag(&softs->taglist, tag); break; } + if (rcb->host_wants_to_abort_this) + { + DBG_INFO("cmd that was aborted came back. tag=%u\n", rcb->tag); + } + if (rcb->is_abort_cmd_from_host) + { + DBG_INFO("abort cmd came back. tag=%u\n", rcb->tag); + } if (IS_OS_SCSICMD(rcb)) { dvp = rcb->dvp; if (dvp) os_scsi_cmd = true; else DBG_WARN("Received IO completion for the Null device!!!\n"); } - - DBG_INFO("response.header.iu_type : %x \n", response->header.iu_type); + DBG_IO("response.header.iu_type : %x \n", response->header.iu_type); switch (response->header.iu_type) { case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: rcb->success_cmp_callback(softs, rcb); if (os_scsi_cmd) pqisrc_decrement_device_active_io(softs, dvp); - break; case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: rcb->error_cmp_callback(softs, rcb, LE_16(response->error_index)); if (os_scsi_cmd) pqisrc_decrement_device_active_io(softs, dvp); break; case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: rcb->req_pending = false; break; case PQI_RESPONSE_IU_VENDOR_GENERAL: rcb->req_pending = false; rcb->status = pqisrc_process_vendor_general_response( (pqi_vendor_general_response_t *)response); break; case PQI_RESPONSE_IU_TASK_MANAGEMENT: rcb->status = pqisrc_process_task_management_response(softs, (void *)response); break; default: DBG_ERR("Invalid Response IU 0x%x\n",response->header.iu_type); break; } oq_ci = (oq_ci + 1) % ob_q->num_elem; } ob_q->ci_local = oq_ci; PCI_MEM_PUT32(softs, ob_q->ci_register_abs, - ob_q->ci_register_offset, ob_q->ci_local ); - DBG_FUNC("OUT"); + ob_q->ci_register_offset, ob_q->ci_local ); + DBG_FUNC("OUT\n"); } diff --git a/sys/dev/smartpqi/smartpqi_sis.c b/sys/dev/smartpqi/smartpqi_sis.c index cbfc4658a3f3..74dcb90e7d3d 100644 --- a/sys/dev/smartpqi/smartpqi_sis.c +++ b/sys/dev/smartpqi/smartpqi_sis.c @@ -1,512 +1,536 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" /* Function for disabling msix interrupots */ void sis_disable_msix(pqisrc_softstate_t *softs) { uint32_t db_reg; DBG_FUNC("IN\n"); db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR); db_reg &= ~SIS_ENABLE_MSIX; PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, db_reg); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ DBG_FUNC("OUT\n"); } void sis_enable_intx(pqisrc_softstate_t *softs) { uint32_t db_reg; DBG_FUNC("IN\n"); db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR); db_reg |= SIS_ENABLE_INTX; PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, db_reg); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ if (pqisrc_sis_wait_for_db_bit_to_clear(softs,SIS_ENABLE_INTX) != PQI_STATUS_SUCCESS) { DBG_ERR("Failed to wait for enable intx db bit to clear\n"); } DBG_FUNC("OUT\n"); } void sis_disable_intx(pqisrc_softstate_t *softs) { uint32_t db_reg; DBG_FUNC("IN\n"); db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR); db_reg &= ~SIS_ENABLE_INTX; PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, db_reg); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ DBG_FUNC("OUT\n"); } void sis_disable_interrupt(pqisrc_softstate_t *softs) { DBG_FUNC("IN"); switch(softs->intr_type) { case INTR_TYPE_FIXED: pqisrc_configure_legacy_intx(softs,false); sis_disable_intx(softs); break; case INTR_TYPE_MSI: case INTR_TYPE_MSIX: - sis_disable_msix(softs); + sis_disable_msix(softs); break; default: DBG_ERR("Inerrupt mode none!\n"); break; } DBG_FUNC("OUT"); } /* Trigger a NMI as part of taking controller offline procedure */ void pqisrc_trigger_nmi_sis(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, LE_32(TRIGGER_NMI_SIS)); DBG_FUNC("OUT\n"); } /* Switch the adapter back to SIS mode during uninitialization */ int pqisrc_reenable_sis(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t timeout = SIS_ENABLE_TIMEOUT; DBG_FUNC("IN\n"); PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, LE_32(REENABLE_SIS)); + OS_SLEEP(1000); /* 1 ms delay for PCI W/R ordering issue */ COND_WAIT(((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) & REENABLE_SIS) == 0), timeout) if (!timeout) { DBG_WARN(" [ %s ] failed to re enable sis\n",__func__); ret = PQI_STATUS_TIMEOUT; } DBG_FUNC("OUT\n"); return ret; } /* Validate the FW status PQI_CTRL_KERNEL_UP_AND_RUNNING */ int pqisrc_check_fw_status(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t timeout = SIS_STATUS_OK_TIMEOUT; DBG_FUNC("IN\n"); OS_SLEEP(1000000); COND_WAIT((GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_UP_AND_RUNNING), timeout); if (!timeout) { DBG_ERR("FW check status timedout\n"); ret = PQI_STATUS_TIMEOUT; } DBG_FUNC("OUT\n"); return ret; } /* Function used to submit a SIS command to the adapter */ static int pqisrc_send_sis_cmd(pqisrc_softstate_t *softs, uint32_t *mb) { int ret = PQI_STATUS_SUCCESS; int i = 0; uint32_t timeout = SIS_CMD_COMPLETE_TIMEOUT; int val; DBG_FUNC("IN\n"); /* Copy Command to mailbox */ for (i = 0; i < 6; i++) PCI_MEM_PUT32(softs, &softs->ioa_reg->mb[i], LEGACY_SIS_SRCV_MAILBOX+i*4, LE_32(mb[i])); /* TODO : Switch to INTX Mode ?*/ PCI_MEM_PUT32(softs, &softs->ioa_reg->ioa_to_host_db_clr, LEGACY_SIS_ODBR_R, LE_32(0x1000)); /* Submit the command */ PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR, LE_32(SIS_CMD_SUBMIT)); #ifdef SIS_POLL_WAIT /* Wait for 20 milli sec to poll */ OS_BUSYWAIT(SIS_POLL_START_WAIT_TIME); #endif val = PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R); DBG_FUNC("val : %x\n",val); /* Spin waiting for the command to complete */ COND_WAIT((PCI_MEM_GET32(softs, &softs->ioa_reg->ioa_to_host_db, LEGACY_SIS_ODBR_R) & SIS_CMD_COMPLETE), timeout); if (!timeout) { DBG_ERR("Sync command %x, timedout\n", mb[0]); ret = PQI_STATUS_TIMEOUT; goto err_out; } /* Check command status */ mb[0] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[0], LEGACY_SIS_SRCV_MAILBOX)); if (mb[0] != SIS_CMD_STATUS_SUCCESS) { DBG_ERR("SIS cmd failed with status = 0x%x\n", mb[0]); ret = PQI_STATUS_FAILURE; goto err_out; } /* Copy the mailbox back */ for (i = 1; i < 6; i++) mb[i] = LE_32(PCI_MEM_GET32(softs, &softs->ioa_reg->mb[i], LEGACY_SIS_SRCV_MAILBOX+i*4)); DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("OUT failed\n"); return ret; } /* First SIS command for the adapter to check PQI support */ int pqisrc_get_adapter_properties(pqisrc_softstate_t *softs, uint32_t *prop, uint32_t *ext_prop) { int ret = PQI_STATUS_SUCCESS; uint32_t mb[6] = {0}; DBG_FUNC("IN\n"); mb[0] = SIS_CMD_GET_ADAPTER_PROPERTIES; ret = pqisrc_send_sis_cmd(softs, mb); if (!ret) { DBG_INIT("GET_PROPERTIES prop = %x, ext_prop = %x\n", mb[1], mb[4]); *prop = mb[1]; *ext_prop = mb[4]; } DBG_FUNC("OUT\n"); return ret; } /* Second SIS command to the adapter GET_COMM_PREFERRED_SETTINGS */ int pqisrc_get_preferred_settings(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t mb[6] = {0}; DBG_FUNC("IN\n"); mb[0] = SIS_CMD_GET_COMM_PREFERRED_SETTINGS; ret = pqisrc_send_sis_cmd(softs, mb); if (!ret) { /* 31:16 maximum command size in KB */ softs->pref_settings.max_cmd_size = mb[1] >> 16; /* 15:00: Maximum FIB size in bytes */ softs->pref_settings.max_fib_size = mb[1] & 0x0000FFFF; DBG_INIT("cmd size = %x, fib size = %x\n", softs->pref_settings.max_cmd_size, softs->pref_settings.max_fib_size); } DBG_FUNC("OUT\n"); return ret; } /* Get supported PQI capabilities from the adapter */ int pqisrc_get_sis_pqi_cap(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t mb[6] = {0}; DBG_FUNC("IN\n"); mb[0] = SIS_CMD_GET_PQI_CAPABILITIES; ret = pqisrc_send_sis_cmd(softs, mb); if (!ret) { softs->pqi_cap.max_sg_elem = mb[1]; softs->pqi_cap.max_transfer_size = mb[2]; softs->pqi_cap.max_outstanding_io = mb[3]; + if (softs->pqi_cap.max_outstanding_io > + PQISRC_MAX_OUTSTANDING_REQ) { + DBG_WARN("Controller-supported max outstanding " + "commands %u reduced to %d to align with " + "driver-supported max.\n", + softs->pqi_cap.max_outstanding_io, + PQISRC_MAX_OUTSTANDING_REQ); + softs->pqi_cap.max_outstanding_io = + PQISRC_MAX_OUTSTANDING_REQ; + } + +#ifdef DEVICE_HINT + bsd_set_hint_adapter_cap(softs); +#endif + softs->pqi_cap.conf_tab_off = mb[4]; softs->pqi_cap.conf_tab_sz = mb[5]; os_update_dma_attributes(softs); DBG_INIT("max_sg_elem = %x\n", softs->pqi_cap.max_sg_elem); DBG_INIT("max_transfer_size = %x\n", softs->pqi_cap.max_transfer_size); DBG_INIT("max_outstanding_io = %x\n", softs->pqi_cap.max_outstanding_io); + /* DBG_INIT("config_table_offset = %x\n", + softs->pqi_cap.conf_tab_off); + DBG_INIT("config_table_size = %x\n", + softs->pqi_cap.conf_tab_sz); + */ } DBG_FUNC("OUT\n"); return ret; } /* Send INIT STRUCT BASE ADDR - one of the SIS command */ int pqisrc_init_struct_base(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t elem_size = 0; uint32_t num_elem = 0; struct dma_mem init_struct_mem = {0}; struct init_base_struct *init_struct = NULL; uint32_t mb[6] = {0}; DBG_FUNC("IN\n"); /* Allocate init struct */ memset(&init_struct_mem, 0, sizeof(struct dma_mem)); init_struct_mem.size = sizeof(struct init_base_struct); init_struct_mem.align = PQISRC_INIT_STRUCT_DMA_ALIGN; - init_struct_mem.tag = "init_struct"; + os_strlcpy(init_struct_mem.tag, "init_struct", sizeof(init_struct_mem.tag)); ret = os_dma_mem_alloc(softs, &init_struct_mem); if (ret) { DBG_ERR("Failed to Allocate error buffer ret : %d\n", ret); goto err_out; } /* Calculate error buffer size */ /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io * The rcb and error buffer will be accessed by using the tag as index * As 0 tag index is not used, we need to allocate one extra. */ num_elem = softs->pqi_cap.max_outstanding_io + 1; elem_size = PQISRC_ERR_BUF_ELEM_SIZE; softs->err_buf_dma_mem.size = num_elem * elem_size; /* Allocate error buffer */ softs->err_buf_dma_mem.align = PQISRC_ERR_BUF_DMA_ALIGN; - softs->err_buf_dma_mem.tag = "error_buffer"; + os_strlcpy(softs->err_buf_dma_mem.tag, "error_buffer", sizeof(softs->err_buf_dma_mem.tag)); ret = os_dma_mem_alloc(softs, &softs->err_buf_dma_mem); if (ret) { DBG_ERR("Failed to Allocate error buffer ret : %d\n", ret); goto err_error_buf_alloc; } /* Fill init struct */ init_struct = (struct init_base_struct *)DMA_TO_VIRT(&init_struct_mem); init_struct->revision = PQISRC_INIT_STRUCT_REVISION; init_struct->flags = 0; init_struct->err_buf_paddr_l = DMA_PHYS_LOW(&softs->err_buf_dma_mem); init_struct->err_buf_paddr_h = DMA_PHYS_HIGH(&softs->err_buf_dma_mem); init_struct->err_buf_elem_len = elem_size; init_struct->err_buf_num_elem = num_elem; mb[0] = SIS_CMD_INIT_BASE_STRUCT_ADDRESS; mb[1] = DMA_PHYS_LOW(&init_struct_mem); mb[2] = DMA_PHYS_HIGH(&init_struct_mem); mb[3] = init_struct_mem.size; ret = pqisrc_send_sis_cmd(softs, mb); if (ret) goto err_sis_cmd; DBG_FUNC("OUT\n"); os_dma_mem_free(softs, &init_struct_mem); return ret; err_sis_cmd: os_dma_mem_free(softs, &softs->err_buf_dma_mem); err_error_buf_alloc: os_dma_mem_free(softs, &init_struct_mem); err_out: DBG_FUNC("OUT failed %d\n", ret); return PQI_STATUS_FAILURE; } /* * SIS initialization of the adapter in a sequence of * - GET_ADAPTER_PROPERTIES * - GET_COMM_PREFERRED_SETTINGS * - GET_PQI_CAPABILITIES * - INIT_STRUCT_BASE ADDR */ int pqisrc_sis_init(pqisrc_softstate_t *softs) { int ret = PQI_STATUS_SUCCESS; uint32_t prop = 0; uint32_t ext_prop = 0; DBG_FUNC("IN\n"); ret = pqisrc_force_sis(softs); if (ret) { DBG_ERR("Failed to switch back the adapter to SIS mode!\n"); goto err_out; } /* Check FW status ready */ ret = pqisrc_check_fw_status(softs); if (ret) { DBG_ERR("PQI Controller is not ready !!!\n"); goto err_out; } /* Check For PQI support(19h) */ ret = pqisrc_get_adapter_properties(softs, &prop, &ext_prop); if (ret) { DBG_ERR("Failed to get adapter properties\n"); goto err_out; } if (!((prop & SIS_SUPPORT_EXT_OPT) && (ext_prop & SIS_SUPPORT_PQI))) { DBG_ERR("PQI Mode Not Supported\n"); ret = PQI_STATUS_FAILURE; goto err_out; } softs->pqi_reset_quiesce_allowed = false; if (ext_prop & SIS_SUPPORT_PQI_RESET_QUIESCE) softs->pqi_reset_quiesce_allowed = true; - /* Send GET_COMM_PREFERRED_SETTINGS (26h) */ + /* Send GET_COMM_PREFERRED_SETTINGS (26h), TODO : is it required */ ret = pqisrc_get_preferred_settings(softs); if (ret) { DBG_ERR("Failed to get adapter pref settings\n"); goto err_out; } /* Get PQI settings , 3000h*/ ret = pqisrc_get_sis_pqi_cap(softs); if (ret) { DBG_ERR("Failed to get PQI Capabilities\n"); goto err_out; } /* We need to allocate DMA memory here , * Do any os specific DMA setup. */ ret = os_dma_setup(softs); if (ret) { DBG_ERR("Failed to Setup DMA\n"); goto err_out; } /* Init struct base addr */ ret = pqisrc_init_struct_base(softs); if (ret) { DBG_ERR("Failed to set init struct base addr\n"); goto err_dma; } DBG_FUNC("OUT\n"); return ret; err_dma: os_dma_destroy(softs); err_out: DBG_FUNC("OUT failed\n"); return ret; } /* Deallocate the resources used during SIS initialization */ void pqisrc_sis_uninit(pqisrc_softstate_t *softs) { DBG_FUNC("IN\n"); os_dma_mem_free(softs, &softs->err_buf_dma_mem); os_dma_destroy(softs); os_resource_free(softs); pqi_reset(softs); DBG_FUNC("OUT\n"); } int pqisrc_sis_wait_for_db_bit_to_clear(pqisrc_softstate_t *softs, uint32_t bit) { int rcode = PQI_STATUS_SUCCESS; uint32_t db_reg; uint32_t loop_cnt = 0; DBG_FUNC("IN\n"); while (1) { db_reg = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, LEGACY_SIS_IDBR); if ((db_reg & bit) == 0) break; if (GET_FW_STATUS(softs) & PQI_CTRL_KERNEL_PANIC) { DBG_ERR("controller kernel panic\n"); rcode = PQI_STATUS_FAILURE; break; } if (loop_cnt++ == SIS_DB_BIT_CLEAR_TIMEOUT_CNT) { DBG_ERR("door-bell reg bit 0x%x not cleared\n", bit); rcode = PQI_STATUS_TIMEOUT; break; } OS_SLEEP(500); } DBG_FUNC("OUT\n"); return rcode; } diff --git a/sys/dev/smartpqi/smartpqi_structures.h b/sys/dev/smartpqi/smartpqi_structures.h index ea9df8eab885..4af824fa2592 100644 --- a/sys/dev/smartpqi/smartpqi_structures.h +++ b/sys/dev/smartpqi/smartpqi_structures.h @@ -1,1197 +1,1474 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef _PQI_STRUCTURES_H #define _PQI_STRUCTURES_H + +#include "smartpqi_defines.h" + struct bmic_host_wellness_driver_version { uint8_t start_tag[4]; uint8_t driver_version_tag[2]; uint16_t driver_version_length; char driver_version[32]; uint8_t end_tag[2]; }OS_ATTRIBUTE_PACKED; + struct bmic_host_wellness_time { uint8_t start_tag[4]; uint8_t time_tag[2]; uint16_t time_length; uint8_t hour; uint8_t min; uint8_t sec; uint8_t reserved; uint8_t month; uint8_t day; uint8_t century; uint8_t year; uint8_t dont_write_tag[2]; uint8_t end_tag[2]; }OS_ATTRIBUTE_PACKED; + /* As per PQI Spec pqi-2r00a , 6.2.2. */ /* device capability register , for admin q table 24 */ struct pqi_dev_adminq_cap { uint8_t max_admin_ibq_elem; uint8_t max_admin_obq_elem; uint8_t admin_ibq_elem_len; uint8_t admin_obq_elem_len; uint16_t max_pqi_dev_reset_tmo; uint8_t res[2]; }OS_ATTRIBUTE_PACKED; /* admin q parameter reg , table 36 */ struct admin_q_param { uint8_t num_iq_elements; uint8_t num_oq_elements; uint8_t intr_msg_num; uint8_t msix_disable; }OS_ATTRIBUTE_PACKED; struct pqi_registers { uint64_t signature; uint64_t admin_q_config; uint64_t pqi_dev_adminq_cap; uint32_t legacy_intr_status; uint32_t legacy_intr_mask_set; uint32_t legacy_intr_mask_clr; uint8_t res1[28]; uint32_t pqi_dev_status; uint8_t res2[4]; uint64_t admin_ibq_pi_offset; uint64_t admin_obq_ci_offset; uint64_t admin_ibq_elem_array_addr; uint64_t admin_obq_elem_array_addr; uint64_t admin_ibq_ci_addr; uint64_t admin_obq_pi_addr; uint32_t admin_q_param; uint8_t res3[4]; uint32_t pqi_dev_err; uint8_t res4[4]; uint64_t error_details; uint32_t dev_reset; uint32_t power_action; uint8_t res5[104]; }OS_ATTRIBUTE_PACKED; /* * IOA controller registers * Mapped in PCIe BAR 0. */ struct ioa_registers { uint8_t res1[0x18]; uint32_t host_to_ioa_db_mask_clr; /* 18h */ uint8_t res2[4]; uint32_t host_to_ioa_db; /* 20h */ uint8_t res3[4]; uint32_t host_to_ioa_db_clr; /* 28h */ uint8_t res4[8]; uint32_t ioa_to_host_glob_int_mask; /* 34h */ uint8_t res5[0x64]; uint32_t ioa_to_host_db; /* 9Ch */ uint32_t ioa_to_host_db_clr; /* A0h */ uint8_t res6[4]; uint32_t ioa_to_host_db_mask; /* A8h */ uint32_t ioa_to_host_db_mask_clr; /* ACh */ uint32_t scratchpad0; /* B0h */ uint32_t scratchpad1; /* B4h */ uint32_t scratchpad2; /* B8h */ uint32_t scratchpad3_fw_status; /* BCh */ uint8_t res7[8]; uint32_t scratchpad4; /* C8h */ uint8_t res8[0xf34]; /* 0xC8 + 4 + 0xf34 = 1000h */ uint32_t mb[8]; /* 1000h */ }OS_ATTRIBUTE_PACKED; /* PQI Preferred settings */ struct pqi_pref_settings { uint16_t max_cmd_size; uint16_t max_fib_size; }OS_ATTRIBUTE_PACKED; /* pqi capability by sis interface */ struct pqi_cap { uint32_t max_sg_elem; uint32_t max_transfer_size; uint32_t max_outstanding_io; uint32_t conf_tab_off; uint32_t conf_tab_sz; }OS_ATTRIBUTE_PACKED; struct pqi_conf_table { uint8_t sign[8]; /* "CFGTABLE" */ uint32_t first_section_off; }; struct pqi_conf_table_section_header { uint16_t section_id; uint16_t next_section_off; }; struct pqi_conf_table_general_info { struct pqi_conf_table_section_header header; uint32_t section_len; uint32_t max_outstanding_req; uint32_t max_sg_size; uint32_t max_sg_per_req; }; struct pqi_conf_table_debug { struct pqi_conf_table_section_header header; uint32_t scratchpad; }; struct pqi_conf_table_heartbeat { struct pqi_conf_table_section_header header; uint32_t heartbeat_counter; }; typedef union pqi_reset_reg { struct { uint32_t reset_type : 3; uint32_t reserved : 2; uint32_t reset_action : 3; uint32_t hold_in_pd1 : 1; uint32_t reserved2 : 23; } bits; uint32_t all_bits; }pqi_reset_reg_t; /* Memory descriptor for DMA memory allocation */ typedef struct dma_mem { void *virt_addr; - dma_addr_t dma_addr; + dma_addr_t dma_addr; uint32_t size; uint32_t align; - char *tag; - bus_dma_tag_t dma_tag; - bus_dmamap_t dma_map; + char tag[32]; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; }dma_mem_t; -/* Lock should be 8 byte aligned */ +/* Lock should be 8 byte aligned + TODO : need to apply aligned for lock alone ? +*/ #ifndef LOCKFREE_STACK typedef struct pqi_taglist { uint32_t max_elem; uint32_t num_elem; uint32_t head; uint32_t tail; uint32_t *elem_array; boolean_t lockcreated; char lockname[LOCKNAME_SIZE]; OS_LOCK_T lock OS_ATTRIBUTE_ALIGNED(8); }pqi_taglist_t; #else /* LOCKFREE_STACK */ union head_list { struct { uint32_t seq_no; /* To avoid aba problem */ uint32_t index; /* Index at the top of the stack */ }top; uint64_t data; }; /* lock-free stack used to push and pop the tag used for IO request */ typedef struct lockless_stack { uint32_t *next_index_array; uint32_t max_elem;/*No.of total elements*/ uint32_t num_elem;/*No.of present elements*/ volatile union head_list head OS_ATTRIBUTE_ALIGNED(8); }lockless_stack_t; #endif /* LOCKFREE_STACK */ /* * PQI SGL descriptor layouts. */ /* * SGL (Scatter Gather List) descriptor Codes */ #define SGL_DESCRIPTOR_CODE_DATA_BLOCK 0x0 #define SGL_DESCRIPTOR_CODE_BIT_BUCKET 0x1 #define SGL_DESCRIPTOR_CODE_STANDARD_SEGMENT 0x2 #define SGL_DESCRIPTOR_CODE_LAST_STANDARD_SEGMENT 0x3 #define SGL_DESCRIPTOR_CODE_LAST_ALTERNATIVE_SGL_SEGMENT 0x4 #define SGL_DESCRIPTOR_CODE_VENDOR_SPECIFIC 0xF typedef struct sgl_descriptor { uint64_t addr; /* !< Bytes 0-7. The starting 64-bit memory byte address of the data block. */ uint32_t length; /* !< Bytes 8-11. The length in bytes of the data block. Set to 0x00000000 specifies that no data be transferred. */ uint8_t res[3]; /* !< Bytes 12-14. */ uint8_t zero : 4; /* !< Byte 15, Bits 0-3. */ uint8_t type : 4; /* !< Byte 15, Bits 4-7. sgl descriptor type */ } sg_desc_t; /* PQI IUs */ typedef struct iu_header { uint8_t iu_type; uint8_t comp_feature; uint16_t iu_length; }OS_ATTRIBUTE_PACKED iu_header_t; typedef struct general_admin_request /* REPORT_PQI_DEVICE_CAPABILITY, REPORT_MANUFACTURER_INFO, REPORT_OPERATIONAL_IQ, REPORT_OPERATIONAL_OQ all same layout. */ { iu_header_t header; /* !< Bytes 0-3. */ uint16_t res1; uint16_t work; uint16_t req_id; /* !< Bytes 8-9. request identifier */ uint8_t fn_code; /* !< Byte 10. which administrator function */ union { struct { uint8_t res2[33]; /* !< Bytes 11-43. function specific */ uint32_t buf_size; /* !< Bytes 44-47. size in bytes of the Data-In/Out Buffer */ sg_desc_t sg_desc; /* !< Bytes 48-63. SGL */ } OS_ATTRIBUTE_PACKED general_func; struct { uint8_t res1; uint16_t qid; uint8_t res2[2]; uint64_t elem_arr_addr; uint64_t iq_ci_addr; uint16_t num_elem; uint16_t elem_len; uint8_t queue_proto; uint8_t arb_prio; uint8_t res3[22]; uint32_t vend_specific; } OS_ATTRIBUTE_PACKED create_op_iq; struct { uint8_t res1; uint16_t qid; uint8_t res2[2]; uint64_t elem_arr_addr; uint64_t ob_pi_addr; uint16_t num_elem; uint16_t elem_len; uint8_t queue_proto; uint8_t res3[3]; uint16_t intr_msg_num; uint16_t coales_count; uint32_t min_coales_time; uint32_t max_coales_time; uint8_t res4[8]; uint32_t vend_specific; } OS_ATTRIBUTE_PACKED create_op_oq; struct { uint8_t res1; uint16_t qid; uint8_t res2[50]; } OS_ATTRIBUTE_PACKED delete_op_queue; struct { uint8_t res1; uint16_t qid; uint8_t res2[46]; uint32_t vend_specific; } OS_ATTRIBUTE_PACKED change_op_iq_prop; } OS_ATTRIBUTE_PACKED req_type; }OS_ATTRIBUTE_PACKED gen_adm_req_iu_t; typedef struct general_admin_response { iu_header_t header; uint16_t res1; uint16_t work; uint16_t req_id; uint8_t fn_code; uint8_t status; union { struct { uint8_t status_desc[4]; uint64_t pi_offset; uint8_t res[40]; } OS_ATTRIBUTE_PACKED create_op_iq; struct { uint8_t status_desc[4]; uint64_t ci_offset; uint8_t res[40]; } OS_ATTRIBUTE_PACKED create_op_oq; } OS_ATTRIBUTE_PACKED resp_type; } OS_ATTRIBUTE_PACKED gen_adm_resp_iu_t ; /*report and set Event config IU*/ typedef struct pqi_event_config_request { iu_header_t header; uint16_t response_queue_id; /* specifies the OQ where the response IU is to be delivered */ uint8_t work_area[2]; /* reserved for driver use */ uint16_t request_id; union { uint16_t reserved; /* Report event config iu */ uint16_t global_event_oq_id; /* Set event config iu */ }iu_specific; uint32_t buffer_length; sg_desc_t sg_desc; }pqi_event_config_request_t; #if 0 typedef struct pqi_set_event_config_request { iu_header_t header; uint16_t response_queue_id; /* specifies the OQ where the response IU is to be delivered */ uint8_t work_area[2]; /* reserved for driver use */ uint16_t request_id; uint16_t global_event_oq_id; uint32_t buffer_length; sg_desc_t sg_desc; }pqi_set_event_config_request_t; #endif /* Report/Set event config data-in/data-out buffer structure */ #define PQI_MAX_EVENT_DESCRIPTORS 255 struct pqi_event_descriptor { uint8_t event_type; uint8_t reserved; uint16_t oq_id; }; typedef struct pqi_event_config { uint8_t reserved[2]; uint8_t num_event_descriptors; uint8_t reserved1; struct pqi_event_descriptor descriptors[PQI_MAX_EVENT_DESCRIPTORS]; }pqi_event_config_t; /*management response IUs */ typedef struct pqi_management_response{ iu_header_t header; uint16_t reserved1; uint8_t work_area[2]; uint16_t req_id; uint8_t result; uint8_t reserved[5]; uint64_t result_data; }pqi_management_response_t; /*Event response IU*/ typedef struct pqi_event_response { iu_header_t header; uint16_t reserved1; uint8_t work_area[2]; uint8_t event_type; uint8_t reserved2 : 7; uint8_t request_acknowledge : 1; uint16_t event_id; uint32_t additional_event_id; uint8_t data[16]; }pqi_event_response_t; /*event acknowledge IU*/ typedef struct pqi_event_acknowledge_request { iu_header_t header; uint16_t reserved1; uint8_t work_area[2]; uint8_t event_type; uint8_t reserved2; uint16_t event_id; uint32_t additional_event_id; }pqi_event_acknowledge_request_t; struct pqi_event { boolean_t pending; uint8_t event_type; uint16_t event_id; uint32_t additional_event_id; }; -typedef struct pqi_vendor_general_request { - iu_header_t header; - uint16_t response_queue_id; - uint8_t work_area[2]; - uint16_t request_id; - uint16_t function_code; - union { - struct { - uint16_t first_section; - uint16_t last_section; - uint8_t reserved1[48]; - } OS_ATTRIBUTE_PACKED config_table_update; - - struct { - uint64_t buffer_address; - uint32_t buffer_length; - uint8_t reserved2[40]; - } OS_ATTRIBUTE_PACKED ofa_memory_allocation; - } data; -} OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t; - typedef struct pqi_vendor_general_response { iu_header_t header; uint16_t reserved1; uint8_t work_area[2]; uint16_t request_id; uint16_t function_code; uint16_t status; uint8_t reserved2[2]; } OS_ATTRIBUTE_PACKED pqi_vendor_general_response_t; typedef struct op_q_params { uint8_t fn_code; uint16_t qid; uint16_t num_elem; uint16_t elem_len; uint16_t int_msg_num; } OS_ATTRIBUTE_PACKED op_q_params; + /* "Fixed Format Sense Data" (0x70 or 0x71) (Table 45 in SPC5) */ typedef struct sense_data_fixed { - uint8_t response_code : 7; // Byte 0, 0x70 or 0x71 - uint8_t valid : 1; // Byte 0, bit 7 - uint8_t byte_1; // Byte 1 - uint8_t sense_key : 4; // Byte 2, bit 0-3 (Key) - uint8_t byte_2_other : 4; // Byte 2, bit 4-7 - uint32_t information; // Byte 3-6, big-endian like block # in CDB - uint8_t addtnl_length; // Byte 7 - uint8_t cmd_specific[4]; // Byte 8-11 - uint8_t sense_code; // Byte 12 (ASC) - uint8_t sense_qual; // Byte 13 (ASCQ) - uint8_t fru_code; // Byte 14 - uint8_t sense_key_specific[3]; // Byte 15-17 - uint8_t addtnl_sense[1]; // Byte 18+ + uint8_t response_code : 7; /* Byte 0, 0x70 or 0x71 */ + uint8_t valid : 1; /* Byte 0, bit 7 */ + uint8_t byte_1; /* Byte 1 */ + uint8_t sense_key : 4; /* Byte 2, bit 0-3 (Key) */ + uint8_t byte_2_other : 4; /* Byte 2, bit 4-7 */ + uint32_t information; /* Byte 3-6, big-endian like block # in CDB */ + uint8_t addtnl_length; /* Byte 7 */ + uint8_t cmd_specific[4]; /* Byte 8-11 */ + uint8_t sense_code; /* Byte 12 (ASC) */ + uint8_t sense_qual; /* Byte 13 (ASCQ) */ + uint8_t fru_code; /* Byte 14 */ + uint8_t sense_key_specific[3]; /* Byte 15-17 */ + uint8_t addtnl_sense[1]; /* Byte 18+ */ } OS_ATTRIBUTE_PACKED sense_data_fixed_t; /* Generic Sense Data Descriptor (Table 29 in SPC5) */ typedef struct descriptor_entry { - uint8_t desc_type; // Byte 9/0 - uint8_t desc_type_length; // Byte 10/1 + uint8_t desc_type; /* Byte 9/0 */ + uint8_t desc_type_length; /* Byte 10/1 */ union { /* Sense data descriptor specific */ uint8_t bytes[1]; /* Information (Type 0) (Table 31 is SPC5) */ struct { - uint8_t byte_2_rsvd : 7; // Byte 11/2 - uint8_t valid : 1; // Byte 11/2, bit 7 - uint8_t byte_3; // Byte 12/3 - uint8_t information[8]; // Byte 13-20/4-11 + uint8_t byte_2_rsvd : 7; /* Byte 11/2 */ + uint8_t valid : 1; /* Byte 11/2, bit 7 */ + uint8_t byte_3; /* Byte 12/3 */ + uint8_t information[8]; /* Byte 13-20/4-11 */ } OS_ATTRIBUTE_PACKED type_0; }u; } OS_ATTRIBUTE_PACKED descriptor_entry_t; /* "Descriptor Format Sense Data" (0x72 or 0x73) (Table 28 in SPC5) */ typedef struct sense_data_descriptor { - uint8_t response_code : 7; // Byte 0, 0x72 or 0x73 - uint8_t byte_0_rsvd: 1; // Byte 0, bit 7 - uint8_t sense_key : 4; // Byte 1, bit 0-3 (Key) - uint8_t byte_1_other : 4; // Byte 1, bit 4-7 - uint8_t sense_code; // Byte 2 (ASC) - uint8_t sense_qual; // Byte 3 (ASCQ) - uint8_t byte4_6[3]; // Byte 4-6 - uint8_t more_length; // Byte 7 - descriptor_entry_t descriptor_list; // Bytes 8+ + uint8_t response_code : 7; /* Byte 0, 0x72 or 0x73 */ + uint8_t byte_0_rsvd: 1; /* Byte 0, bit 7 */ + uint8_t sense_key : 4; /* Byte 1, bit 0-3 (Key) */ + uint8_t byte_1_other : 4; /* Byte 1, bit 4-7 */ + uint8_t sense_code; /* Byte 2 (ASC) */ + uint8_t sense_qual; /* Byte 3 (ASCQ) */ + uint8_t byte4_6[3]; /* Byte 4-6 */ + uint8_t more_length; /* Byte 7 */ + descriptor_entry_t descriptor_list; /* Bytes 8+ */ } OS_ATTRIBUTE_PACKED sense_data_descriptor_t; typedef union sense_data_u { sense_data_fixed_t fixed_format; sense_data_descriptor_t descriptor_format; uint8_t data[256]; } sense_data_u_t; - - /* Driver will use this structure to interpret the error info element returned from a failed requests */ typedef struct raid_path_error_info_elem { - uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */ - uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */ - uint8_t reserved[3]; /* !< Bytes 2-4. */ - uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40. Defined in Storport.h */ - uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */ - uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */ - uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */ - uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */ - uint32_t data_out_transferred; /* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */ + uint8_t data_in_result; /* !< Byte 0. See SOP spec Table 77. */ + uint8_t data_out_result; /* !< Byte 1. See SOP spec Table 78. */ + uint8_t reserved[3]; /* !< Bytes 2-4. */ + uint8_t status; /* !< Byte 5. See SAM-5 specification "Status" codes Table 40.*/ + uint16_t status_qual; /* !< Bytes 6-7. See SAM-5 specification Table 43. */ + uint16_t sense_data_len; /* !< Bytes 8-9. See SOP specification table 79. */ + uint16_t resp_data_len; /* !< Bytes 10-11. See SOP specification table 79. */ + uint32_t data_in_transferred; /* !< Bytes 12-15. If "dada_in_result = 0x01 (DATA_IN BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-In buffer else Ignored. */ + uint32_t data_out_transferred;/* !< Bytes 16-19. If "data_out_result = 0x01 (DATA_OUT BUFFER UNDERFLOW)", Indicates the number of contiguous bytes starting with offset zero in Data-Out buffer else Ignored. */ union { sense_data_u_t sense_data; uint8_t data[256]; /* !< Bytes 20-275. Response Data buffer or Sense Data buffer but not both. */ }; }OS_ATTRIBUTE_PACKED raid_path_error_info_elem_t; #define PQI_ERROR_BUFFER_ELEMENT_LENGTH sizeof(raid_path_error_info_elem_t) typedef enum error_data_present { - DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */ - DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */ - DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */ + DATA_PRESENT_NO_DATA = 0, /* !< No data present in Data buffer. */ + DATA_PRESENT_RESPONSE_DATA = 1, /* !< Response data is present in Data buffer. */ + DATA_PRESENT_SENSE_DATA = 2 /* !< Sense data is present in Data buffer. */ } error_data_present_t; typedef struct aio_path_error_info_elem { - uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40. Defined in Storport.h */ - uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */ - uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */ - uint8_t reserved1; /* !< Byte 3. Reserved. */ - uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */ - uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */ - uint16_t reserved2; /* !< Bytes 10. Reserved. */ - uint8_t data[256]; /* !< Bytes 11-267. Response data buffer or Sense data buffer but not both. */ - uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */ + uint8_t status; /* !< Byte 0. See SAM-5 specification "SCSI Status" codes Table 40.*/ + uint8_t service_resp; /* !< Byte 1. SCSI Service Response. */ + uint8_t data_pres; /* !< Byte 2. Bits [7:2] reserved. Bits [1:0] - 0=No data, 1=Response data, 2=Sense data. */ + uint8_t reserved1; /* !< Byte 3. Reserved. */ + uint32_t resd_count; /* !< Bytes 4-7. The residual data length in bytes. Need the original transfer size and if Status is OverRun or UnderRun. */ + uint16_t data_len; /* !< Bytes 8-9. The amount of Sense data or Response data returned in Response/Sense Data buffer. */ + uint16_t reserved2; /* !< Bytes 10-11. Reserved. */ + union + { + sense_data_u_t sense_data; /* */ + uint8_t data[256]; /* !< Bytes 12-267. Response data buffer or Sense data buffer but not both. */ + }; + uint8_t padding[8]; /* !< Bytes 268-275. Padding to make AIO_PATH_ERROR_INFO_ELEMENT = RAID_PATH_ERROR_INFO_ELEMENT */ }OS_ATTRIBUTE_PACKED aio_path_error_info_elem_t; struct init_base_struct { uint32_t revision; /* revision of init structure */ uint32_t flags; /* reserved */ uint32_t err_buf_paddr_l; /* lower 32 bits of physical address of error buffer */ uint32_t err_buf_paddr_h; /* upper 32 bits of physical address of error buffer */ uint32_t err_buf_elem_len; /* length of each element in error buffer (in bytes) */ uint32_t err_buf_num_elem; /* number of elements in error buffer */ }OS_ATTRIBUTE_PACKED; /* Queue details */ typedef struct ib_queue { uint32_t q_id; uint32_t num_elem; uint32_t elem_size; char *array_virt_addr; dma_addr_t array_dma_addr; uint32_t pi_local; uint32_t pi_register_offset; uint32_t *pi_register_abs; uint32_t *ci_virt_addr; dma_addr_t ci_dma_addr; boolean_t created; boolean_t lockcreated; char lockname[LOCKNAME_SIZE]; OS_PQILOCK_T lock OS_ATTRIBUTE_ALIGNED(8); + struct dma_mem alloc_dma; }ib_queue_t; typedef struct ob_queue { uint32_t q_id; uint32_t num_elem; uint32_t elem_size; uint32_t intr_msg_num; char *array_virt_addr; dma_addr_t array_dma_addr; uint32_t ci_local; uint32_t ci_register_offset; uint32_t *ci_register_abs; uint32_t *pi_virt_addr; dma_addr_t pi_dma_addr; boolean_t created; + struct dma_mem alloc_dma; }ob_queue_t; typedef struct pqisrc_sg_desc{ uint64_t addr; uint32_t len; uint32_t flags; }sgt_t; typedef struct pqi_iu_layer_desc { uint8_t ib_spanning_supported : 1; uint8_t res1 : 7; uint8_t res2[5]; uint16_t max_ib_iu_len; uint8_t ob_spanning_supported : 1; uint8_t res3 : 7; uint8_t res4[5]; uint16_t max_ob_iu_len; }OS_ATTRIBUTE_PACKED pqi_iu_layer_desc_t; /* Response IU data */ typedef struct pqi_device_capabilities { uint16_t length; uint8_t res1[6]; uint8_t ibq_arb_priority_support_bitmask; uint8_t max_aw_a; uint8_t max_aw_b; uint8_t max_aw_c; uint8_t max_arb_burst : 3; uint8_t res2 : 4; uint8_t iqa : 1; uint8_t res3[2]; uint8_t iq_freeze : 1; uint8_t res4 : 7; uint16_t max_iqs; uint16_t max_iq_elements; uint8_t res5[4]; uint16_t max_iq_elem_len; uint16_t min_iq_elem_len; uint8_t res6[2]; uint16_t max_oqs; uint16_t max_oq_elements; uint16_t intr_coales_time_granularity; uint16_t max_oq_elem_len; uint16_t min_oq_elem_len; uint8_t res7[24]; pqi_iu_layer_desc_t iu_layer_desc[32]; }OS_ATTRIBUTE_PACKED pqi_dev_cap_t; /* IO path */ -typedef struct pqi_aio_req { - iu_header_t header; - uint16_t response_queue_id; - uint8_t work_area[2]; - uint16_t req_id; - uint8_t res1[2]; - uint32_t nexus; - uint32_t buf_len; +typedef struct iu_cmd_flags +{ uint8_t data_dir : 2; uint8_t partial : 1; uint8_t mem_type : 1; uint8_t fence : 1; uint8_t encrypt_enable : 1; uint8_t res2 : 2; +}OS_ATTRIBUTE_PACKED iu_cmd_flags_t; + +typedef struct iu_attr_prio +{ uint8_t task_attr : 3; uint8_t cmd_prio : 4; uint8_t res3 : 1; +}OS_ATTRIBUTE_PACKED iu_attr_prio_t; + +typedef struct pqi_aio_req { + iu_header_t header; + uint16_t response_queue_id; + uint8_t work_area[2]; + uint16_t req_id; + uint8_t res1[2]; + uint32_t nexus; + uint32_t buf_len; + iu_cmd_flags_t cmd_flags; + iu_attr_prio_t attr_prio; uint16_t encrypt_key_index; uint32_t encrypt_twk_low; uint32_t encrypt_twk_high; uint8_t cdb[16]; uint16_t err_idx; uint8_t num_sg; uint8_t cdb_len; uint8_t lun[8]; uint8_t res4[4]; sgt_t sg_desc[4]; }OS_ATTRIBUTE_PACKED pqi_aio_req_t; +typedef struct pqi_aio_raid1_write_req { + iu_header_t header; + uint16_t response_queue_id; + uint8_t work_area[2]; + uint16_t req_id; + uint16_t volume_id; /* ID of raid volume */ + uint32_t nexus_1; /* 1st drive in RAID 1 */ + uint32_t nexus_2; /* 2nd drive in RAID 1 */ + uint32_t nexus_3; /* 3rd drive in RAID 1 */ + uint32_t buf_len; + iu_cmd_flags_t cmd_flags; + iu_attr_prio_t attr_prio; + uint16_t encrypt_key_index; + uint8_t cdb[16]; + uint16_t err_idx; + uint8_t num_sg; + uint8_t cdb_len; + uint8_t num_drives; /* drives in raid1 (2 or 3) */ + uint8_t reserved_bytes[3]; + uint32_t encrypt_twk_low; + uint32_t encrypt_twk_high; + sgt_t sg_desc[4]; +}OS_ATTRIBUTE_PACKED pqi_aio_raid1_write_req_t; + +typedef struct pqi_aio_raid5or6_write_req { + iu_header_t header; + uint16_t response_queue_id; + uint8_t work_area[2]; + uint16_t req_id; + uint16_t volume_id; /* ID of raid volume */ + uint32_t data_it_nexus; /* IT nexus of data drive */ + uint32_t p_parity_it_nexus;/* It nexus of p parity disk */ + uint32_t q_parity_it_nexus;/* It nexus of q parity disk (R6) */ + uint32_t buf_len; + iu_cmd_flags_t cmd_flags; + iu_attr_prio_t attr_prio; + uint16_t encrypt_key_index; + uint8_t cdb[16]; + uint16_t err_idx; + uint8_t num_sg; + uint8_t cdb_len; + uint8_t xor_multiplier; /* for generating RAID 6 Q parity */ + uint8_t reserved[3]; + uint32_t encrypt_twk_low; + uint32_t encrypt_twk_high; + uint64_t row; /* logical lba / blocks per row */ + uint8_t reserved2[8]; /* changed to reserved, used to stripe_lba */ + sgt_t sg_desc[3]; /* only 3 entries for R5/6 */ +}OS_ATTRIBUTE_PACKED pqi_aio_raid5or6_write_req_t; typedef struct pqisrc_raid_request { iu_header_t header; uint16_t response_queue_id; /* specifies the OQ where the response IU is to be delivered */ uint8_t work_area[2]; /* reserved for driver use */ uint16_t request_id; uint16_t nexus_id; uint32_t buffer_length; uint8_t lun_number[8]; uint16_t protocol_spec; uint8_t data_direction : 2; uint8_t partial : 1; uint8_t reserved1 : 4; uint8_t fence : 1; uint16_t error_index; uint8_t reserved2; uint8_t task_attribute : 3; uint8_t command_priority : 4; uint8_t reserved3 : 1; uint8_t reserved4 : 2; uint8_t additional_cdb_bytes_usage : 3; uint8_t reserved5 : 3; - uint8_t cdb[16]; - uint8_t reserved[12]; + union + { + uint8_t cdb[16]; + struct + { + uint8_t op_code; /* Byte 0. SCSI opcode (0x26 or 0x27) */ + uint8_t lun_lower; /* Byte 1 */ + uint32_t detail; /* Byte 2-5 */ + uint8_t cmd; /* Byte 6. Vendor specific op code. */ + uint16_t xfer_len; /* Byte 7-8 */ + uint8_t lun_upper; /* Byte 9 */ + uint8_t unused[6]; /* Bytes 10-15. */ + }OS_ATTRIBUTE_PACKED bmic_cdb; + }OS_ATTRIBUTE_PACKED cmd; + uint8_t reserved[11]; + uint8_t ml_device_lun_number; uint32_t timeout_in_sec; sgt_t sg_descriptors[4]; -} OS_ATTRIBUTE_PACKED pqisrc_raid_req_t; +}OS_ATTRIBUTE_PACKED pqisrc_raid_req_t; typedef struct pqi_raid_tmf_req { - iu_header_t header; - uint16_t resp_qid; - uint8_t work_area[2]; - uint16_t req_id; - uint16_t nexus; - uint8_t res1[2]; - uint16_t timeout_in_sec; - uint8_t lun[8]; - uint16_t protocol_spec; - uint16_t obq_id_to_manage; - uint16_t req_id_to_manage; - uint8_t tmf; - uint8_t res2 : 7; - uint8_t fence : 1; + iu_header_t header; + uint16_t resp_qid; + uint8_t work_area[2]; + uint16_t req_id; + uint16_t nexus; + uint8_t res1[1]; + uint8_t ml_device_lun_number; + uint16_t timeout_in_sec; + uint8_t lun[8]; + uint16_t protocol_spec; + uint16_t obq_id_to_manage; + uint16_t req_id_to_manage; + uint8_t tmf; + uint8_t res2 : 7; + uint8_t fence : 1; } OS_ATTRIBUTE_PACKED pqi_raid_tmf_req_t; typedef struct pqi_aio_tmf_req { iu_header_t header; uint16_t resp_qid; uint8_t work_area[2]; uint16_t req_id; uint16_t res1; uint32_t nexus; uint8_t lun[8]; uint32_t req_id_to_manage; uint8_t tmf; uint8_t res2 : 7; uint8_t fence : 1; uint16_t error_idx; }OS_ATTRIBUTE_PACKED pqi_aio_tmf_req_t; typedef struct pqi_tmf_resp { iu_header_t header; uint16_t resp_qid; uint8_t work_area[2]; uint16_t req_id; uint16_t nexus; uint8_t add_resp_info[3]; uint8_t resp_code; }pqi_tmf_resp_t; struct pqi_io_response { iu_header_t header; uint16_t queue_id; uint8_t work_area[2]; uint16_t request_id; uint16_t error_index; uint8_t reserved[4]; }OS_ATTRIBUTE_PACKED; struct pqi_enc_info { uint16_t data_enc_key_index; uint32_t encrypt_tweak_lower; uint32_t encrypt_tweak_upper; }; +typedef uint32_t os_ticks_t; + +struct pqi_stream_data { + uint64_t next_lba; + os_ticks_t last_accessed; +}; typedef struct pqi_scsi_device { device_type_t devtype; /* as reported by INQUIRY command */ uint8_t device_type; /* as reported by BMIC_IDENTIFY_PHYSICAL_DEVICE - only valid for devtype = TYPE_DISK */ int bus; int target; int lun; uint8_t flags; uint8_t scsi3addr[8]; uint64_t wwid; uint8_t is_physical_device : 1; uint8_t is_external_raid_device : 1; uint8_t target_lun_valid : 1; uint8_t expose_device : 1; uint8_t no_uld_attach : 1; uint8_t is_obdr_device : 1; uint8_t aio_enabled : 1; uint8_t device_gone : 1; uint8_t new_device : 1; uint8_t volume_offline : 1; - uint8_t scsi_rescan : 1; + uint8_t is_nvme : 1; + uint8_t scsi_rescan : 1; uint8_t vendor[8]; /* bytes 8-15 of inquiry data */ uint8_t model[16]; /* bytes 16-31 of inquiry data */ uint64_t sas_address; uint8_t raid_level; uint16_t queue_depth; /* max. queue_depth for this device */ - uint16_t advertised_queue_depth; uint32_t ioaccel_handle; uint8_t volume_status; uint8_t active_path_index; uint8_t path_map; uint8_t bay; uint8_t box[8]; uint16_t phys_connector[8]; int offload_config; /* I/O accel RAID offload configured */ int offload_enabled; /* I/O accel RAID offload enabled */ int offload_enabled_pending; - int offload_to_mirror; /* Send next I/O accelerator RAID + int *offload_to_mirror; /* Send next I/O accelerator RAID offload request to mirror drive. */ struct raid_map *raid_map; /* I/O accelerator RAID map */ int reset_in_progress; int logical_unit_number; os_dev_info_t *dip; /*os specific scsi device information*/ boolean_t invalid; boolean_t path_destroyed; boolean_t firmware_queue_depth_set; OS_ATOMIC64_T active_requests; -}pqi_scsi_dev_t; - -typedef struct pqisrc_softstate pqisrc_softstate_t; -typedef struct pqi_firmware_feature pqi_firmware_feature_t; -typedef void (*feature_status_fn)(pqisrc_softstate_t *softs, - pqi_firmware_feature_t *firmware_feature); - -struct pqi_firmware_feature { - char *feature_name; - unsigned int feature_bit; - boolean_t supported; - boolean_t enabled; - feature_status_fn feature_status; -}; - -struct pqi_conf_table_firmware_features { - struct pqi_conf_table_section_header header; - uint16_t num_elements; - uint8_t features_supported[]; -}; + struct pqisrc_softstate *softs; + boolean_t schedule_rescan; + boolean_t in_remove; + struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; + boolean_t is_multi_lun; -struct pqi_conf_table_section_info { - struct pqisrc_softstate *softs; - void *section; - uint32_t section_offset; - void *section_addr; -}; +}pqi_scsi_dev_t; struct sense_header_scsi { /* See SPC-3 section 4.5 */ uint8_t response_code; /* permit: 0x0, 0x70, 0x71, 0x72, 0x73 */ uint8_t sense_key; uint8_t asc; uint8_t ascq; uint8_t byte4; uint8_t byte5; uint8_t byte6; uint8_t additional_length; /* always 0 for fixed sense format */ }OS_ATTRIBUTE_PACKED; - - typedef struct report_lun_header { uint32_t list_length; uint8_t extended_response; uint8_t reserved[3]; }OS_ATTRIBUTE_PACKED reportlun_header_t; typedef struct report_lun_ext_entry { uint8_t lunid[8]; uint64_t wwid; uint8_t device_type; uint8_t device_flags; uint8_t lun_count; /* number of LUNs in a multi-LUN device */ uint8_t redundant_paths; uint32_t ioaccel_handle; }OS_ATTRIBUTE_PACKED reportlun_ext_entry_t; typedef struct report_lun_data_ext { reportlun_header_t header; reportlun_ext_entry_t lun_entries[1]; }OS_ATTRIBUTE_PACKED reportlun_data_ext_t; typedef struct reportlun_queue_depth_entry { uint8_t logical_unit_num; uint8_t reserved_1:6; uint8_t address:2; uint8_t box_bus_num; uint8_t reserved_2:6; uint8_t mode:2; uint8_t bus_ident; /* Byte 5 */ uint8_t queue_depth:7; uint8_t multiplier:1; /* Byte 6 */ uint8_t drive_type_mix_flags; uint8_t level_2_bus:6; uint8_t level_2_mode:2; uint8_t unused_bytes[16]; }OS_ATTRIBUTE_PACKED reportlun_queue_depth_entry_t; typedef struct reportlun_queue_depth_data { reportlun_header_t header; - reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */ + reportlun_queue_depth_entry_t lun_entries[1]; /* lun list with Queue Depth values for each lun */ }OS_ATTRIBUTE_PACKED reportlun_queue_depth_data_t; typedef struct raidmap_data { uint32_t ioaccel_handle; uint8_t xor_mult[2]; uint8_t reserved[2]; }OS_ATTRIBUTE_PACKED raidmap_data_t; typedef struct raid_map { uint32_t structure_size; /* size of entire structure in bytes */ uint32_t volume_blk_size; /* bytes / block in the volume */ uint64_t volume_blk_cnt; /* logical blocks on the volume */ uint8_t phys_blk_shift; /* shift factor to convert between units of logical blocks and physical disk blocks */ uint8_t parity_rotation_shift; /* shift factor to convert between units of logical stripes and physical stripes */ uint16_t strip_size; /* blocks used on each disk / stripe */ uint64_t disk_starting_blk; /* first disk block used in volume */ uint64_t disk_blk_cnt; /* disk blocks used by volume / disk */ uint16_t data_disks_per_row; /* data disk entries / row in the map */ uint16_t metadata_disks_per_row; /* mirror/parity disk entries / row in the map */ uint16_t row_cnt; /* rows in each layout map */ uint16_t layout_map_count; /* layout maps (1 map per mirror/parity group) */ uint16_t flags; uint16_t data_encryption_key_index; uint8_t reserved[16]; raidmap_data_t dev_data[RAID_MAP_MAX_ENTRIES]; }OS_ATTRIBUTE_PACKED pqisrc_raid_map_t; +typedef struct aio_row { + uint32_t blks_per_row; /* blocks per row */ + uint64_t first; /* first row */ + uint64_t last; /* last row */ + uint32_t offset_first; /* offset in first row */ + uint32_t offset_last; /* offset in last row */ + uint16_t data_disks; /* number of data disks per row */ + uint16_t total_disks; /* data + parity disks per row. */ +}OS_ATTRIBUTE_PACKED pqisrc_aio_row_t; + +typedef struct aio_column { + uint32_t first; /* 1st column of req */ + uint32_t last; /* last column of req */ +}OS_ATTRIBUTE_PACKED pqisrc_aio_column_t; + +typedef struct aio_block { + uint64_t first; /* 1st block number of req */ + uint64_t last; /* last block number of req */ + uint32_t cnt; /* total blocks in req */ + uint64_t disk_block; /* block number of phys disk */ +}OS_ATTRIBUTE_PACKED pqisrc_aio_block_t; + +typedef struct aio_r5or6_loc { + struct aio_row row; /* row information */ + struct aio_column col; /* column information */ +}OS_ATTRIBUTE_PACKED pqisrc_aio_r5or6_loc_t; + +typedef struct aio_map { + uint32_t row; + uint32_t idx; /* index into array of handles */ + uint16_t layout_map_count; +}OS_ATTRIBUTE_PACKED pqisrc_aio_map_t; + +typedef struct aio_disk_group { + uint32_t first; /* first group */ + uint32_t last; /* last group */ + uint32_t cur; /* current group */ +}OS_ATTRIBUTE_PACKED pqisrc_aio_disk_group_t; + +typedef struct aio_req_locator { + uint8_t raid_level; + struct raid_map *raid_map; /* relevant raid map */ + struct aio_block block; /* block range and count */ + struct aio_row row; /* row range and offset info */ + struct aio_column col; /* first/last column info */ + struct aio_r5or6_loc r5or6; /* Raid 5/6-specific bits */ + struct aio_map map; /* map row, count, and index */ + struct aio_disk_group group; /* first, last, and curr group */ + boolean_t is_write; + uint32_t stripesz; + uint16_t strip_sz; + int offload_to_mirror; +}OS_ATTRIBUTE_PACKED aio_req_locator_t; typedef struct bmic_ident_ctrl { uint8_t conf_ld_count; uint32_t conf_sign; uint8_t fw_version[4]; uint8_t rom_fw_rev[4]; uint8_t hw_rev; uint8_t reserved[140]; uint16_t extended_lun_count; uint8_t reserved1[34]; uint16_t fw_build_number; uint8_t reserved2[100]; uint8_t ctrl_mode; uint8_t reserved3[32]; }OS_ATTRIBUTE_PACKED bmic_ident_ctrl_t; typedef struct bmic_identify_physical_device { uint8_t scsi_bus; /* SCSI Bus number on controller */ uint8_t scsi_id; /* SCSI ID on this bus */ uint16_t block_size; /* sector size in bytes */ uint32_t total_blocks; /* number for sectors on drive */ uint32_t reserved_blocks; /* controller reserved (RIS) */ uint8_t model[40]; /* Physical Drive Model */ uint8_t serial_number[40]; /* Drive Serial Number */ uint8_t firmware_revision[8]; /* drive firmware revision */ uint8_t scsi_inquiry_bits; /* inquiry byte 7 bits */ uint8_t compaq_drive_stamp; /* 0 means drive not stamped */ uint8_t last_failure_reason; uint8_t flags; uint8_t more_flags; uint8_t scsi_lun; /* SCSI LUN for phys drive */ uint8_t yet_more_flags; uint8_t even_more_flags; uint32_t spi_speed_rules; uint8_t phys_connector[2]; /* connector number on controller */ uint8_t phys_box_on_bus; /* phys enclosure this drive resides */ uint8_t phys_bay_in_box; /* phys drv bay this drive resides */ uint32_t rpm; /* drive rotational speed in RPM */ uint8_t device_type; /* type of drive */ uint8_t sata_version; /* only valid when device_type = BMIC_DEVICE_TYPE_SATA */ uint64_t big_total_block_count; uint64_t ris_starting_lba; uint32_t ris_size; uint8_t wwid[20]; uint8_t controller_phy_map[32]; uint16_t phy_count; uint8_t phy_connected_dev_type[256]; uint8_t phy_to_drive_bay_num[256]; uint16_t phy_to_attached_dev_index[256]; uint8_t box_index; uint8_t reserved; uint16_t extra_physical_drive_flags; uint8_t negotiated_link_rate[256]; uint8_t phy_to_phy_map[256]; uint8_t redundant_path_present_map; uint8_t redundant_path_failure_map; uint8_t active_path_number; uint16_t alternate_paths_phys_connector[8]; uint8_t alternate_paths_phys_box_on_port[8]; uint8_t multi_lun_device_lun_count; uint8_t minimum_good_fw_revision[8]; uint8_t unique_inquiry_bytes[20]; uint8_t current_temperature_degreesC; uint8_t temperature_threshold_degreesC; uint8_t max_temperature_degreesC; uint8_t logical_blocks_per_phys_block_exp; uint16_t current_queue_depth_limit; uint8_t switch_name[10]; uint16_t switch_port; uint8_t alternate_paths_switch_name[40]; uint8_t alternate_paths_switch_port[8]; uint16_t power_on_hours; uint16_t percent_endurance_used; uint8_t drive_authentication; uint8_t smart_carrier_authentication; uint8_t smart_carrier_app_fw_version; uint8_t smart_carrier_bootloader_fw_version; uint8_t encryption_key_name[64]; uint32_t misc_drive_flags; uint16_t dek_index; uint8_t padding[112]; }OS_ATTRIBUTE_PACKED bmic_ident_physdev_t; +typedef struct bmic_sense_feature { + uint8_t opcode; + uint8_t reserved1[1]; + uint8_t page; + uint8_t sub_page; + uint8_t reserved2[2]; + uint8_t cmd; + uint16_t transfer_length; + uint8_t reserved3[7]; +}OS_ATTRIBUTE_PACKED bmic_sense_feature_t; + +typedef struct bmic_sense_feature_buffer_header { + uint8_t page; + uint8_t sub_page; + uint16_t buffer_length; +} OS_ATTRIBUTE_PACKED bmic_sense_feature_buffer_header_t; + +typedef struct bmic_sense_feature_page_header { + uint8_t page; + uint8_t sub_page; + uint16_t total_length; /** Total length of the page. + * The length is the same wheteher the request buffer is too short or not. + * When printing out the page, only print the buffer length. */ +} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_header_t; + +typedef struct bmic_sense_feature_page_io { + struct bmic_sense_feature_page_header header; + uint8_t flags1; +} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_t; + +typedef struct bmic_sense_feature_page_io_aio_subpage { + struct bmic_sense_feature_page_header header; + uint8_t fw_aio_read_support; + uint8_t driver_aio_read_support; + uint8_t fw_aio_write_support; + uint8_t driver_aio_write_support; + uint16_t max_aio_rw_xfer_crypto_sas_sata; /* in kb */ + uint16_t max_aio_rw_xfer_crypto_nvme; /* in kb */ + uint16_t max_aio_write_raid5_6; /* in kb */ + uint16_t max_aio_write_raid1_10_2drv; /* in kb */ + uint16_t max_aio_write_raid1_10_3drv; /* in kb */ +} OS_ATTRIBUTE_PACKED bmic_sense_feature_page_io_aio_subpage_t; + +typedef struct bmic_sense_feature_aio_buffer { + struct bmic_sense_feature_buffer_header header; + struct bmic_sense_feature_page_io_aio_subpage aio_subpage; +} OS_ATTRIBUTE_PACKED bmic_sense_feature_aio_buffer_t; + + typedef struct pqisrc_bmic_flush_cache { uint8_t disable_cache; uint8_t power_action; uint8_t ndu_flush_cache; uint8_t halt_event; uint8_t reserved[28]; } OS_ATTRIBUTE_PACKED pqisrc_bmic_flush_cache_t; /* for halt_event member of pqisrc_bmic_flush_cache_t */ enum pqisrc_flush_cache_event_type { PQISRC_NONE_CACHE_FLUSH_ONLY = 0, PQISRC_SHUTDOWN = 1, PQISRC_HIBERNATE = 2, PQISRC_SUSPEND = 3, PQISRC_RESTART = 4 }; struct request_container_block; typedef void (*success_callback)(struct pqisrc_softstate *, struct request_container_block *); typedef void (*error_callback)(struct pqisrc_softstate *, struct request_container_block *, uint16_t); /* Request container block */ typedef struct request_container_block { void *req; void *error_info; - REQUEST_STATUS_T status; + int status; uint32_t tag; sgt_t *sg_chain_virt; dma_addr_t sg_chain_dma; uint32_t data_dir; pqi_scsi_dev_t *dvp; struct pqisrc_softstate *softs; success_callback success_cmp_callback; error_callback error_cmp_callback; - uint8_t *cdbp; + uint8_t *cdbp; /* points to either the bypass_cdb below or original host cdb */ + uint8_t bypass_cdb[16]; /* bypass cmds will use this cdb memory */ int cmdlen; uint32_t bcount; /* buffer size in byte */ uint32_t ioaccel_handle; boolean_t encrypt_enable; struct pqi_enc_info enc_info; + uint32_t row_num; + uint32_t blocks_per_row; + uint32_t raid_map_index; + uint32_t raid_map_row; ib_queue_t *req_q; - int path; + IO_PATH_T path; int resp_qid; boolean_t req_pending; + uint32_t it_nexus[PQISRC_MAX_SUPPORTED_MIRRORS]; boolean_t timedout; int tm_req; int aio_retry; + boolean_t is_abort_cmd_from_host; /* true if this is a TMF abort */ + boolean_t host_wants_to_abort_this; /* set to true to ID the request targeted by TMF */ + uint64_t submit_time_user_secs; /* host submit time in user seconds */ + uint64_t host_timeout_ms; /* original host timeout value in msec */ int cm_flags; void *cm_data; /* pointer to data in kernel space */ bus_dmamap_t cm_datamap; uint32_t nseg; union ccb *cm_ccb; sgt_t *sgt; /* sg table */ - }rcb_t; -typedef struct tid_pool { - int tid[PQI_MAX_PHYSICALS]; - int index; -}tid_pool_t; +typedef struct bit_map { + boolean_t bit_vector[MAX_TARGET_BIT]; +}bit_map_t; -struct pqisrc_softstate { +typedef enum _io_type +{ + UNKNOWN_IO_TYPE, /* IO Type is TBD or cannot be determined */ + NON_RW_IO_TYPE, /* IO Type is non-Read/Write opcode (could separate BMIC, etc. if we wanted) */ + READ_IO_TYPE, /* IO Type is SCSI Read */ + WRITE_IO_TYPE, /* IO Type is SCSI Write */ +} io_type_t; + +typedef enum _counter_types +{ + UNKNOWN_COUNTER, + HBA_COUNTER, + RAID0_COUNTER, + RAID1_COUNTER, + RAID5_COUNTER, + RAID6_COUNTER, + MAX_IO_COUNTER, +} counter_types_t; + +typedef struct _io_counters +{ + OS_ATOMIC64_T raid_read_cnt; + OS_ATOMIC64_T raid_write_cnt; + OS_ATOMIC64_T aio_read_cnt; + OS_ATOMIC64_T aio_write_cnt; + OS_ATOMIC64_T raid_non_read_write; + OS_ATOMIC64_T aio_non_read_write; +} io_counters_t; + +typedef struct pqisrc_softstate { OS_SPECIFIC_T os_specific; struct ioa_registers *ioa_reg; struct pqi_registers *pqi_reg; uint8_t *pci_mem_base_vaddr; PCI_ACC_HANDLE_T pci_mem_handle; struct pqi_cap pqi_cap; struct pqi_pref_settings pref_settings; char fw_version[11]; uint16_t fw_build_number; uint32_t card; /* index to aac_cards */ uint16_t vendid; /* vendor id */ uint16_t subvendid; /* sub vendor id */ uint16_t devid; /* device id */ uint16_t subsysid; /* sub system id */ controller_state_t ctlr_state; struct dma_mem err_buf_dma_mem; - struct dma_mem admin_queue_dma_mem; - struct dma_mem op_ibq_dma_mem; - struct dma_mem op_obq_dma_mem; - struct dma_mem event_q_dma_mem; - struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ]; + struct dma_mem sg_dma_desc[PQISRC_MAX_OUTSTANDING_REQ + 1]; ib_queue_t admin_ib_queue; ob_queue_t admin_ob_queue; ob_queue_t event_q; ob_queue_t op_ob_q[PQISRC_MAX_SUPPORTED_OP_OB_Q - 1];/* 1 event queue */ ib_queue_t op_raid_ib_q[PQISRC_MAX_SUPPORTED_OP_RAID_IB_Q]; ib_queue_t op_aio_ib_q[PQISRC_MAX_SUPPORTED_OP_AIO_IB_Q]; uint32_t max_outstanding_io; uint32_t max_io_for_scsi_ml; uint32_t num_op_raid_ibq; uint32_t num_op_aio_ibq; uint32_t num_op_obq; uint32_t num_elem_per_op_ibq; uint32_t num_elem_per_op_obq; - uint32_t ibq_elem_size; - uint32_t obq_elem_size; + uint32_t max_ibq_elem_size; + uint32_t max_obq_elem_size; pqi_dev_cap_t pqi_dev_cap; uint16_t max_ib_iu_length_per_fw; - uint16_t max_ib_iu_length; - unsigned max_sg_per_iu; + uint16_t max_ib_iu_length; /* should be 1152 */ + uint16_t max_spanning_elems; /* should be 9 spanning elements */ + unsigned max_sg_per_single_iu_element; /* should be 8 */ + unsigned max_sg_per_spanning_cmd; /* should be 68, 67 with AIO writes */ uint8_t ib_spanning_supported : 1; uint8_t ob_spanning_supported : 1; pqi_event_config_t event_config; struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS]; int intr_type; int intr_count; int num_cpus_online; + int num_devs; boolean_t share_opq_and_eventq; rcb_t *rcb; #ifndef LOCKFREE_STACK pqi_taglist_t taglist; #else lockless_stack_t taglist; #endif /* LOCKFREE_STACK */ boolean_t devlist_lockcreated; OS_LOCK_T devlist_lock OS_ATTRIBUTE_ALIGNED(8); char devlist_lock_name[LOCKNAME_SIZE]; pqi_scsi_dev_t *device_list[PQI_MAX_DEVICES][PQI_MAX_MULTILUN]; + pqi_scsi_dev_t *dev_list[PQI_MAX_DEVICES]; OS_SEMA_LOCK_T scan_lock; uint8_t lun_count[PQI_MAX_DEVICES]; uint64_t target_sas_addr[PQI_MAX_EXT_TARGETS]; + uint64_t phys_list_pos; uint64_t prev_heartbeat_count; uint64_t *heartbeat_counter_abs_addr; uint64_t heartbeat_counter_off; - uint8_t *fw_features_section_abs_addr; - uint64_t fw_features_section_off; uint32_t bus_id; uint32_t device_id; uint32_t func_id; + uint8_t adapter_num; /* globally unique adapter number */ char *os_name; boolean_t ctrl_online; uint8_t pqi_reset_quiesce_allowed : 1; boolean_t ctrl_in_pqi_mode; - tid_pool_t tid_pool; + bit_map_t bit_map; uint32_t adapterQDepth; uint32_t dma_mem_consumed; + boolean_t adv_aio_capable; + boolean_t aio_raid1_write_bypass; + boolean_t aio_raid5_write_bypass; + boolean_t aio_raid6_write_bypass; + boolean_t enable_stream_detection; + uint16_t max_aio_write_raid5_6; /* bytes */ + uint16_t max_aio_write_raid1_10_2drv; /* bytes */ + uint16_t max_aio_write_raid1_10_3drv; /* bytes */ + uint16_t max_aio_rw_xfer_crypto_nvme; /* bytes */ + uint16_t max_aio_rw_xfer_crypto_sas_sata; /* bytes */ + io_counters_t counters[MAX_IO_COUNTER]; + boolean_t log_io_counters; + boolean_t ld_rescan; + +#ifdef PQI_NEED_RESCAN_TIMER_FOR_RBOD_HOTPLUG + reportlun_data_ext_t *log_dev_list; + size_t log_dev_data_length; + uint32_t num_ptraid_targets; +#endif boolean_t timeout_in_passthrough; boolean_t timeout_in_tmf; -}; + boolean_t sata_unique_wwn; + boolean_t page83id_in_rpl; + boolean_t err_resp_verbose; + +#ifdef DEVICE_HINT + device_hint hint; +#endif + +}pqisrc_softstate_t; + +struct pqi_config_table { + uint8_t signature[8]; /* "CFGTABLE" */ + uint32_t first_section_offset; /* offset in bytes from the base */ + /* address of this table to the */ + /* first section */ +}OS_ATTRIBUTE_PACKED; + +struct pqi_config_table_section_header { + uint16_t section_id; /* as defined by the */ + /* PQI_CONFIG_TABLE_SECTION_* */ + /* manifest constants above */ + uint16_t next_section_offset; /* offset in bytes from base */ + /* address of the table of the */ + /* next section or 0 if last entry */ +}OS_ATTRIBUTE_PACKED; + +struct pqi_config_table_general_info { + struct pqi_config_table_section_header header; + uint32_t section_length; /* size of this section in bytes */ + /* including the section header */ + uint32_t max_outstanding_requests; /* max. outstanding */ + /* commands supported by */ + /* the controller */ + uint32_t max_sg_size; /* max. transfer size of a single */ + /* command */ + uint32_t max_sg_per_request; /* max. number of scatter-gather */ + /* entries supported in a single */ + /* command */ +}OS_ATTRIBUTE_PACKED; + +struct pqi_config_table_firmware_features { + struct pqi_config_table_section_header header; + uint16_t num_elements; + uint8_t features_supported[]; +/* u8 features_requested_by_host[]; */ +/* u8 features_enabled[]; */ +/* The 2 fields below are only valid if the MAX_KNOWN_FEATURE bit is set. */ +/* uint16_t firmware_max_known_feature; */ +/* uint16_t host_max_known_feature; */ +}OS_ATTRIBUTE_PACKED; + +typedef struct pqi_vendor_general_request { + iu_header_t header; /* bytes 0-3 */ + uint16_t response_id; /* bytes 4-5 */ + uint16_t work; /* bytes 6-7 */ + uint16_t request_id; + uint16_t function_code; + union { + struct { + uint16_t first_section; + uint16_t last_section; + uint8_t reserved[48]; + } OS_ATTRIBUTE_PACKED config_table_update; + + struct { + uint64_t buffer_address; + uint32_t buffer_length; + uint8_t reserved[40]; + } OS_ATTRIBUTE_PACKED ofa_memory_allocation; + } data; +}OS_ATTRIBUTE_PACKED pqi_vendor_general_request_t; typedef struct vpd_logical_volume_status { uint8_t peripheral_info; uint8_t page_code; uint8_t reserved; uint8_t page_length; uint8_t volume_status; uint8_t reserved2[3]; uint32_t flags; }vpd_volume_status; #endif diff --git a/sys/dev/smartpqi/smartpqi_tag.c b/sys/dev/smartpqi/smartpqi_tag.c index 624eaf576da1..214b1bd3ce7d 100644 --- a/sys/dev/smartpqi/smartpqi_tag.c +++ b/sys/dev/smartpqi/smartpqi_tag.c @@ -1,271 +1,271 @@ /*- - * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. + * Copyright 2016-2023 Microchip Technology, Inc. and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "smartpqi_includes.h" #ifndef LOCKFREE_STACK /* * Function used to release the tag from taglist. */ void pqisrc_put_tag(pqi_taglist_t *taglist, uint32_t elem) { OS_ACQUIRE_SPINLOCK(&(taglist->lock)); - DBG_FUNC("IN\n"); +/* DBG_FUNC("IN\n");*/ ASSERT(taglist->num_elem < taglist->max_elem); if (taglist->num_elem < taglist->max_elem) { taglist->elem_array[taglist->tail] = elem; taglist->num_elem++; taglist->tail = (taglist->tail + 1) % taglist->max_elem; } OS_RELEASE_SPINLOCK(&taglist->lock); - DBG_FUNC("OUT\n"); +/* DBG_FUNC("OUT\n");*/ } /* * Function used to get an unoccupied tag from the tag list. */ uint32_t pqisrc_get_tag(pqi_taglist_t *taglist) { uint32_t elem = INVALID_ELEM; /* DBG_FUNC("IN\n");*/ OS_ACQUIRE_SPINLOCK(&taglist->lock); ASSERT(taglist->num_elem > 0); if (taglist->num_elem > 0) { elem = taglist->elem_array[taglist->head]; taglist->num_elem--; taglist->head = (taglist->head + 1) % taglist->max_elem; } OS_RELEASE_SPINLOCK(&taglist->lock); /* DBG_FUNC("OUT got %d\n", elem);*/ return elem; } /* * Initialize circular queue implementation of tag list. */ int pqisrc_init_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist, uint32_t max_elem) { int ret = PQI_STATUS_SUCCESS; int i = 0; DBG_FUNC("IN\n"); taglist->max_elem = max_elem; taglist->num_elem = 0; taglist->head = 0; taglist->tail = 0; taglist->elem_array = os_mem_alloc(softs, (max_elem * sizeof(uint32_t))); if (!(taglist->elem_array)) { DBG_FUNC("Unable to allocate memory for taglist\n"); ret = PQI_STATUS_FAILURE; goto err_out; } - os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE); - ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname); - if(ret){ - DBG_ERR("tag lock initialization failed\n"); - taglist->lockcreated=false; - goto err_lock; + os_strlcpy(taglist->lockname, "tag_lock", LOCKNAME_SIZE); + ret = os_init_spinlock(softs, &taglist->lock, taglist->lockname); + if(ret){ + DBG_ERR("tag lock initialization failed\n"); + taglist->lockcreated=false; + goto err_lock; } - taglist->lockcreated = true; + taglist->lockcreated = true; /* indices 1 to max_elem are considered as valid tags */ for (i=1; i <= max_elem; i++) { softs->rcb[i].tag = INVALID_ELEM; pqisrc_put_tag(taglist, i); } DBG_FUNC("OUT\n"); return ret; err_lock: - os_mem_free(softs, (char *)taglist->elem_array, - (taglist->max_elem * sizeof(uint32_t))); + os_mem_free(softs, (char *)taglist->elem_array, + (taglist->max_elem * sizeof(uint32_t))); taglist->elem_array = NULL; err_out: DBG_FUNC("OUT failed\n"); return ret; } /* * Destroy circular queue implementation of tag list. */ void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, pqi_taglist_t *taglist) { DBG_FUNC("IN\n"); os_mem_free(softs, (char *)taglist->elem_array, (taglist->max_elem * sizeof(uint32_t))); taglist->elem_array = NULL; - if(taglist->lockcreated==true){ - os_uninit_spinlock(&taglist->lock); - taglist->lockcreated = false; - } + if(taglist->lockcreated==true){ + os_uninit_spinlock(&taglist->lock); + taglist->lockcreated = false; + } DBG_FUNC("OUT\n"); } #else /* LOCKFREE_STACK */ /* * Initialize circular queue implementation of tag list. */ int pqisrc_init_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack, uint32_t max_elem) { int ret = PQI_STATUS_SUCCESS; int index = 0; DBG_FUNC("IN\n"); /* indices 1 to max_elem are considered as valid tags */ stack->max_elem = max_elem + 1; stack->head.data = 0; DBG_INFO("Stack head address :%p\n",&stack->head); /*Allocate memory for stack*/ stack->next_index_array = (uint32_t*)os_mem_alloc(softs, (stack->max_elem * sizeof(uint32_t))); if (!(stack->next_index_array)) { DBG_ERR("Unable to allocate memory for stack\n"); ret = PQI_STATUS_FAILURE; goto err_out; } /* push all the entries to the stack */ for (index = 1; index < stack->max_elem ; index++) { softs->rcb[index].tag = INVALID_ELEM; pqisrc_put_tag(stack, index); } DBG_FUNC("OUT\n"); return ret; err_out: DBG_FUNC("Failed OUT\n"); return ret; } /* * Destroy circular queue implementation of tag list. */ void pqisrc_destroy_taglist(pqisrc_softstate_t *softs, lockless_stack_t *stack) { DBG_FUNC("IN\n"); /* de-allocate stack memory */ if (stack->next_index_array) { os_mem_free(softs,(char*)stack->next_index_array, (stack->max_elem * sizeof(uint32_t))); stack->next_index_array = NULL; } DBG_FUNC("OUT\n"); } /* * Function used to release the tag from taglist. */ void pqisrc_put_tag(lockless_stack_t *stack, uint32_t index) { union head_list cur_head, new_head; DBG_FUNC("IN\n"); - DBG_INFO("push tag :%d\n",index); + DBG_INFO("push tag :%u\n",index); if (index >= stack->max_elem) { ASSERT(false); DBG_INFO("Pushed Invalid index\n"); /* stack full */ return; } if (stack->next_index_array[index] != 0) { ASSERT(false); DBG_INFO("Index already present as tag in the stack\n"); return; } do { cur_head = stack->head; /* increment seq_no */ new_head.top.seq_no = cur_head.top.seq_no + 1; /* update the index at the top of the stack with the new index */ new_head.top.index = index; /* Create a link to the previous index */ stack->next_index_array[index] = cur_head.top.index; }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data)); stack->num_elem++; DBG_FUNC("OUT\n"); return; } /* * Function used to get an unoccupied tag from the tag list. */ uint32_t pqisrc_get_tag(lockless_stack_t *stack) { union head_list cur_head, new_head; DBG_FUNC("IN\n"); do { cur_head = stack->head; if (cur_head.top.index == 0) /* stack empty */ return INVALID_ELEM; /* increment seq_no field */ new_head.top.seq_no = cur_head.top.seq_no + 1; /* update the index at the top of the stack with the next index */ new_head.top.index = stack->next_index_array[cur_head.top.index]; }while(!os_atomic64_cas(&stack->head.data,cur_head.data,new_head.data)); stack->next_index_array[cur_head.top.index] = 0; stack->num_elem--; - DBG_INFO("pop tag: %d\n",cur_head.top.index); + DBG_INFO("pop tag: %u\n",cur_head.top.index); DBG_FUNC("OUT\n"); return cur_head.top.index; /*tag*/ } #endif /* LOCKFREE_STACK */ diff --git a/sys/modules/smartpqi/Makefile b/sys/modules/smartpqi/Makefile index 3666698eef73..080ca34d07e2 100644 --- a/sys/modules/smartpqi/Makefile +++ b/sys/modules/smartpqi/Makefile @@ -1,11 +1,11 @@ # 5/10/2017 KMOD = smartpqi .PATH: ${SRCTOP}/sys/dev/${KMOD} -SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c +SRCS=smartpqi_mem.c smartpqi_intr.c smartpqi_main.c smartpqi_cam.c smartpqi_ioctl.c smartpqi_misc.c smartpqi_sis.c smartpqi_init.c smartpqi_queue.c smartpqi_tag.c smartpqi_cmd.c smartpqi_request.c smartpqi_response.c smartpqi_event.c smartpqi_helper.c smartpqi_discovery.c smartpqi_features.c SRCS+= device_if.h bus_if.h pci_if.h opt_scsi.h opt_cam.h .include