Index: head/sys/dev/bge/if_bgereg.h =================================================================== --- head/sys/dev/bge/if_bgereg.h (revision 346385) +++ head/sys/dev/bge/if_bgereg.h (revision 346386) @@ -1,3069 +1,3077 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * BCM570x memory map. The internal memory layout varies somewhat * depending on whether or not we have external SSRAM attached. * The BCM5700 can have up to 16MB of external memory. The BCM5701 * is apparently not designed to use external SSRAM. The mappings * up to the first 4 send rings are the same for both internal and * external memory configurations. Note that mini RX ring space is * only available with external SSRAM configurations, which means * the mini RX ring is not supported on the BCM5701. * * The NIC's memory can be accessed by the host in one of 3 ways: * * 1) Indirect register access. The MEMWIN_BASEADDR and MEMWIN_DATA * registers in PCI config space can be used to read any 32-bit * address within the NIC's memory. * * 2) Memory window access. The MEMWIN_BASEADDR register in PCI config * space can be used in conjunction with the memory window in the * device register space at offset 0x8000 to read any 32K chunk * of NIC memory. * * 3) Flat mode. If the 'flat mode' bit in the PCI state register is * set, the device I/O mapping consumes 32MB of host address space, * allowing all of the registers and internal NIC memory to be * accessed directly. NIC memory addresses are offset by 0x01000000. * Flat mode consumes so much host address space that it is not * recommended. */ #define BGE_PAGE_ZERO 0x00000000 #define BGE_PAGE_ZERO_END 0x000000FF #define BGE_SEND_RING_RCB 0x00000100 #define BGE_SEND_RING_RCB_END 0x000001FF #define BGE_RX_RETURN_RING_RCB 0x00000200 #define BGE_RX_RETURN_RING_RCB_END 0x000002FF #define BGE_STATS_BLOCK 0x00000300 #define BGE_STATS_BLOCK_END 0x00000AFF #define BGE_STATUS_BLOCK 0x00000B00 #define BGE_STATUS_BLOCK_END 0x00000B4F #define BGE_SRAM_FW_MB 0x00000B50 #define BGE_SRAM_DATA_SIG 0x00000B54 #define BGE_SRAM_DATA_CFG 0x00000B58 #define BGE_SRAM_FW_CMD_MB 0x00000B78 #define BGE_SRAM_FW_CMD_LEN_MB 0x00000B7C #define BGE_SRAM_FW_CMD_DATA_MB 0x00000B80 #define BGE_SRAM_FW_DRV_STATE_MB 0x00000C04 #define BGE_SRAM_MAC_ADDR_HIGH_MB 0x00000C14 #define BGE_SRAM_MAC_ADDR_LOW_MB 0x00000C18 #define BGE_SOFTWARE_GENCOMM_END 0x00000FFF #define BGE_UNMAPPED 0x00001000 #define BGE_UNMAPPED_END 0x00001FFF #define BGE_DMA_DESCRIPTORS 0x00002000 #define BGE_DMA_DESCRIPTORS_END 0x00003FFF #define BGE_SEND_RING_5717 0x00004000 #define BGE_SEND_RING_1_TO_4 0x00004000 #define BGE_SEND_RING_1_TO_4_END 0x00005FFF /* Firmware interface */ #define BGE_SRAM_DATA_SIG_MAGIC 0x4B657654 /* 'KevT' */ #define BGE_FW_CMD_DRV_ALIVE 0x00000001 #define BGE_FW_CMD_PAUSE 0x00000002 #define BGE_FW_CMD_IPV4_ADDR_CHANGE 0x00000003 #define BGE_FW_CMD_IPV6_ADDR_CHANGE 0x00000004 #define BGE_FW_CMD_LINK_UPDATE 0x0000000C #define BGE_FW_CMD_DRV_ALIVE2 0x0000000D #define BGE_FW_CMD_DRV_ALIVE3 0x0000000E #define BGE_FW_HB_TIMEOUT_SEC 3 #define BGE_FW_DRV_STATE_START 0x00000001 #define BGE_FW_DRV_STATE_START_DONE 0x80000001 #define BGE_FW_DRV_STATE_UNLOAD 0x00000002 #define BGE_FW_DRV_STATE_UNLOAD_DONE 0x80000002 #define BGE_FW_DRV_STATE_WOL 0x00000003 #define BGE_FW_DRV_STATE_SUSPEND 0x00000004 /* Mappings for internal memory configuration */ #define BGE_STD_RX_RINGS 0x00006000 #define BGE_STD_RX_RINGS_END 0x00006FFF #define BGE_JUMBO_RX_RINGS 0x00007000 #define BGE_JUMBO_RX_RINGS_END 0x00007FFF #define BGE_BUFFPOOL_1 0x00008000 #define BGE_BUFFPOOL_1_END 0x0000FFFF #define BGE_BUFFPOOL_2 0x00010000 /* or expansion ROM */ #define BGE_BUFFPOOL_2_END 0x00017FFF #define BGE_BUFFPOOL_3 0x00018000 /* or expansion ROM */ #define BGE_BUFFPOOL_3_END 0x0001FFFF #define BGE_STD_RX_RINGS_5717 0x00040000 #define BGE_JUMBO_RX_RINGS_5717 0x00044400 /* Mappings for external SSRAM configurations */ #define BGE_SEND_RING_5_TO_6 0x00006000 #define BGE_SEND_RING_5_TO_6_END 0x00006FFF #define BGE_SEND_RING_7_TO_8 0x00007000 #define BGE_SEND_RING_7_TO_8_END 0x00007FFF #define BGE_SEND_RING_9_TO_16 0x00008000 #define BGE_SEND_RING_9_TO_16_END 0x0000BFFF #define BGE_EXT_STD_RX_RINGS 0x0000C000 #define BGE_EXT_STD_RX_RINGS_END 0x0000CFFF #define BGE_EXT_JUMBO_RX_RINGS 0x0000D000 #define BGE_EXT_JUMBO_RX_RINGS_END 0x0000DFFF #define BGE_MINI_RX_RINGS 0x0000E000 #define BGE_MINI_RX_RINGS_END 0x0000FFFF #define BGE_AVAIL_REGION1 0x00010000 /* or expansion ROM */ #define BGE_AVAIL_REGION1_END 0x00017FFF #define BGE_AVAIL_REGION2 0x00018000 /* or expansion ROM */ #define BGE_AVAIL_REGION2_END 0x0001FFFF #define BGE_EXT_SSRAM 0x00020000 #define BGE_EXT_SSRAM_END 0x000FFFFF /* * BCM570x register offsets. These are memory mapped registers * which can be accessed with the CSR_READ_4()/CSR_WRITE_4() macros. * Each register must be accessed using 32 bit operations. * * All registers are accessed through a 32K shared memory block. * The first group of registers are actually copies of the PCI * configuration space registers. */ /* * PCI registers defined in the PCI 2.2 spec. */ #define BGE_PCI_VID 0x00 #define BGE_PCI_DID 0x02 #define BGE_PCI_CMD 0x04 #define BGE_PCI_STS 0x06 #define BGE_PCI_REV 0x08 #define BGE_PCI_CLASS 0x09 #define BGE_PCI_CACHESZ 0x0C #define BGE_PCI_LATTIMER 0x0D #define BGE_PCI_HDRTYPE 0x0E #define BGE_PCI_BIST 0x0F #define BGE_PCI_BAR0 0x10 #define BGE_PCI_BAR1 0x14 #define BGE_PCI_SUBSYS 0x2C #define BGE_PCI_SUBVID 0x2E #define BGE_PCI_ROMBASE 0x30 #define BGE_PCI_CAPPTR 0x34 #define BGE_PCI_INTLINE 0x3C #define BGE_PCI_INTPIN 0x3D #define BGE_PCI_MINGNT 0x3E #define BGE_PCI_MAXLAT 0x3F #define BGE_PCI_PCIXCAP 0x40 #define BGE_PCI_NEXTPTR_PM 0x41 #define BGE_PCI_PCIX_CMD 0x42 #define BGE_PCI_PCIX_STS 0x44 #define BGE_PCI_PWRMGMT_CAPID 0x48 #define BGE_PCI_NEXTPTR_VPD 0x49 #define BGE_PCI_PWRMGMT_CAPS 0x4A #define BGE_PCI_PWRMGMT_CMD 0x4C #define BGE_PCI_PWRMGMT_STS 0x4D #define BGE_PCI_PWRMGMT_DATA 0x4F #define BGE_PCI_VPD_CAPID 0x50 #define BGE_PCI_NEXTPTR_MSI 0x51 #define BGE_PCI_VPD_ADDR 0x52 #define BGE_PCI_VPD_DATA 0x54 #define BGE_PCI_MSI_CAPID 0x58 #define BGE_PCI_NEXTPTR_NONE 0x59 #define BGE_PCI_MSI_CTL 0x5A #define BGE_PCI_MSI_ADDR_HI 0x5C #define BGE_PCI_MSI_ADDR_LO 0x60 #define BGE_PCI_MSI_DATA 0x64 /* * PCI Express definitions * According to * PCI Express base specification, REV. 1.0a */ /* PCI Express device control, 16bits */ #define BGE_PCIE_DEVCTL 0x08 #define BGE_PCIE_DEVCTL_MAX_READRQ_MASK 0x7000 #define BGE_PCIE_DEVCTL_MAX_READRQ_128 0x0000 #define BGE_PCIE_DEVCTL_MAX_READRQ_256 0x1000 #define BGE_PCIE_DEVCTL_MAX_READRQ_512 0x2000 #define BGE_PCIE_DEVCTL_MAX_READRQ_1024 0x3000 #define BGE_PCIE_DEVCTL_MAX_READRQ_2048 0x4000 #define BGE_PCIE_DEVCTL_MAX_READRQ_4096 0x5000 /* PCI MSI. ??? */ #define BGE_PCIE_CAPID_REG 0xD0 #define BGE_PCIE_CAPID 0x10 /* * PCI registers specific to the BCM570x family. */ #define BGE_PCI_MISC_CTL 0x68 #define BGE_PCI_DMA_RW_CTL 0x6C #define BGE_PCI_PCISTATE 0x70 #define BGE_PCI_CLKCTL 0x74 #define BGE_PCI_REG_BASEADDR 0x78 #define BGE_PCI_MEMWIN_BASEADDR 0x7C #define BGE_PCI_REG_DATA 0x80 #define BGE_PCI_MEMWIN_DATA 0x84 #define BGE_PCI_MODECTL 0x88 #define BGE_PCI_MISC_CFG 0x8C #define BGE_PCI_MISC_LOCALCTL 0x90 #define BGE_PCI_UNDI_RX_STD_PRODIDX_HI 0x98 #define BGE_PCI_UNDI_RX_STD_PRODIDX_LO 0x9C #define BGE_PCI_UNDI_RX_RTN_CONSIDX_HI 0xA0 #define BGE_PCI_UNDI_RX_RTN_CONSIDX_LO 0xA4 #define BGE_PCI_UNDI_TX_BD_PRODIDX_HI 0xA8 #define BGE_PCI_UNDI_TX_BD_PRODIDX_LO 0xAC #define BGE_PCI_ISR_MBX_HI 0xB0 #define BGE_PCI_ISR_MBX_LO 0xB4 #define BGE_PCI_PRODID_ASICREV 0xBC #define BGE_PCI_GEN2_PRODID_ASICREV 0xF4 #define BGE_PCI_GEN15_PRODID_ASICREV 0xFC /* PCI Misc. Host control register */ #define BGE_PCIMISCCTL_CLEAR_INTA 0x00000001 #define BGE_PCIMISCCTL_MASK_PCI_INTR 0x00000002 #define BGE_PCIMISCCTL_ENDIAN_BYTESWAP 0x00000004 #define BGE_PCIMISCCTL_ENDIAN_WORDSWAP 0x00000008 #define BGE_PCIMISCCTL_PCISTATE_RW 0x00000010 #define BGE_PCIMISCCTL_CLOCKCTL_RW 0x00000020 #define BGE_PCIMISCCTL_REG_WORDSWAP 0x00000040 #define BGE_PCIMISCCTL_INDIRECT_ACCESS 0x00000080 #define BGE_PCIMISCCTL_TAGGED_STATUS 0x00000200 #define BGE_PCIMISCCTL_ASICREV 0xFFFF0000 #define BGE_PCIMISCCTL_ASICREV_SHIFT 16 #define BGE_HIF_SWAP_OPTIONS (BGE_PCIMISCCTL_ENDIAN_WORDSWAP) #define BGE_INIT \ (BGE_HIF_SWAP_OPTIONS|BGE_PCIMISCCTL_CLEAR_INTA| \ BGE_PCIMISCCTL_MASK_PCI_INTR|BGE_PCIMISCCTL_INDIRECT_ACCESS) #define BGE_CHIPID_TIGON_I 0x4000 #define BGE_CHIPID_TIGON_II 0x6000 #define BGE_CHIPID_BCM5700_A0 0x7000 #define BGE_CHIPID_BCM5700_A1 0x7001 #define BGE_CHIPID_BCM5700_B0 0x7100 #define BGE_CHIPID_BCM5700_B1 0x7101 #define BGE_CHIPID_BCM5700_B2 0x7102 #define BGE_CHIPID_BCM5700_B3 0x7103 #define BGE_CHIPID_BCM5700_ALTIMA 0x7104 #define BGE_CHIPID_BCM5700_C0 0x7200 #define BGE_CHIPID_BCM5701_A0 0x0000 /* grrrr */ #define BGE_CHIPID_BCM5701_B0 0x0100 #define BGE_CHIPID_BCM5701_B2 0x0102 #define BGE_CHIPID_BCM5701_B5 0x0105 #define BGE_CHIPID_BCM5703_A0 0x1000 #define BGE_CHIPID_BCM5703_A1 0x1001 #define BGE_CHIPID_BCM5703_A2 0x1002 #define BGE_CHIPID_BCM5703_A3 0x1003 #define BGE_CHIPID_BCM5703_B0 0x1100 #define BGE_CHIPID_BCM5704_A0 0x2000 #define BGE_CHIPID_BCM5704_A1 0x2001 #define BGE_CHIPID_BCM5704_A2 0x2002 #define BGE_CHIPID_BCM5704_A3 0x2003 #define BGE_CHIPID_BCM5704_B0 0x2100 #define BGE_CHIPID_BCM5705_A0 0x3000 #define BGE_CHIPID_BCM5705_A1 0x3001 #define BGE_CHIPID_BCM5705_A2 0x3002 #define BGE_CHIPID_BCM5705_A3 0x3003 #define BGE_CHIPID_BCM5750_A0 0x4000 #define BGE_CHIPID_BCM5750_A1 0x4001 #define BGE_CHIPID_BCM5750_A3 0x4000 #define BGE_CHIPID_BCM5750_B0 0x4100 #define BGE_CHIPID_BCM5750_B1 0x4101 #define BGE_CHIPID_BCM5750_C0 0x4200 #define BGE_CHIPID_BCM5750_C1 0x4201 #define BGE_CHIPID_BCM5750_C2 0x4202 #define BGE_CHIPID_BCM5714_A0 0x5000 #define BGE_CHIPID_BCM5752_A0 0x6000 #define BGE_CHIPID_BCM5752_A1 0x6001 #define BGE_CHIPID_BCM5752_A2 0x6002 #define BGE_CHIPID_BCM5714_B0 0x8000 #define BGE_CHIPID_BCM5714_B3 0x8003 #define BGE_CHIPID_BCM5715_A0 0x9000 #define BGE_CHIPID_BCM5715_A1 0x9001 #define BGE_CHIPID_BCM5715_A3 0x9003 #define BGE_CHIPID_BCM5755_A0 0xa000 #define BGE_CHIPID_BCM5755_A1 0xa001 #define BGE_CHIPID_BCM5755_A2 0xa002 #define BGE_CHIPID_BCM5722_A0 0xa200 #define BGE_CHIPID_BCM5754_A0 0xb000 #define BGE_CHIPID_BCM5754_A1 0xb001 #define BGE_CHIPID_BCM5754_A2 0xb002 #define BGE_CHIPID_BCM5761_A0 0x5761000 #define BGE_CHIPID_BCM5761_A1 0x5761100 #define BGE_CHIPID_BCM5784_A0 0x5784000 #define BGE_CHIPID_BCM5784_A1 0x5784100 #define BGE_CHIPID_BCM5787_A0 0xb000 #define BGE_CHIPID_BCM5787_A1 0xb001 #define BGE_CHIPID_BCM5787_A2 0xb002 #define BGE_CHIPID_BCM5906_A0 0xc000 #define BGE_CHIPID_BCM5906_A1 0xc001 #define BGE_CHIPID_BCM5906_A2 0xc002 #define BGE_CHIPID_BCM57780_A0 0x57780000 #define BGE_CHIPID_BCM57780_A1 0x57780001 #define BGE_CHIPID_BCM5717_A0 0x05717000 #define BGE_CHIPID_BCM5717_B0 0x05717100 #define BGE_CHIPID_BCM5717_C0 0x05717200 #define BGE_CHIPID_BCM5719_A0 0x05719000 #define BGE_CHIPID_BCM5720_A0 0x05720000 #define BGE_CHIPID_BCM5762_A0 0x05762000 #define BGE_CHIPID_BCM57765_A0 0x57785000 #define BGE_CHIPID_BCM57765_B0 0x57785100 /* shorthand one */ #define BGE_ASICREV(x) ((x) >> 12) #define BGE_ASICREV_BCM5701 0x00 #define BGE_ASICREV_BCM5703 0x01 #define BGE_ASICREV_BCM5704 0x02 #define BGE_ASICREV_BCM5705 0x03 #define BGE_ASICREV_BCM5750 0x04 #define BGE_ASICREV_BCM5714_A0 0x05 #define BGE_ASICREV_BCM5752 0x06 #define BGE_ASICREV_BCM5700 0x07 #define BGE_ASICREV_BCM5780 0x08 #define BGE_ASICREV_BCM5714 0x09 #define BGE_ASICREV_BCM5755 0x0a #define BGE_ASICREV_BCM5754 0x0b #define BGE_ASICREV_BCM5787 0x0b #define BGE_ASICREV_BCM5906 0x0c /* Should consult BGE_PCI_PRODID_ASICREV for ChipID */ #define BGE_ASICREV_USE_PRODID_REG 0x0f /* BGE_PCI_PRODID_ASICREV ASIC rev. identifiers. */ #define BGE_ASICREV_BCM5717 0x5717 #define BGE_ASICREV_BCM5719 0x5719 #define BGE_ASICREV_BCM5720 0x5720 #define BGE_ASICREV_BCM5761 0x5761 #define BGE_ASICREV_BCM5762 0x5762 #define BGE_ASICREV_BCM5784 0x5784 #define BGE_ASICREV_BCM5785 0x5785 #define BGE_ASICREV_BCM57765 0x57785 #define BGE_ASICREV_BCM57766 0x57766 #define BGE_ASICREV_BCM57780 0x57780 /* chip revisions */ #define BGE_CHIPREV(x) ((x) >> 8) #define BGE_CHIPREV_5700_AX 0x70 #define BGE_CHIPREV_5700_BX 0x71 #define BGE_CHIPREV_5700_CX 0x72 #define BGE_CHIPREV_5701_AX 0x00 #define BGE_CHIPREV_5703_AX 0x10 #define BGE_CHIPREV_5704_AX 0x20 #define BGE_CHIPREV_5704_BX 0x21 #define BGE_CHIPREV_5750_AX 0x40 #define BGE_CHIPREV_5750_BX 0x41 /* BGE_PCI_PRODID_ASICREV chip rev. identifiers. */ #define BGE_CHIPREV_5717_AX 0x57170 #define BGE_CHIPREV_5717_BX 0x57171 #define BGE_CHIPREV_5761_AX 0x57611 #define BGE_CHIPREV_57765_AX 0x577850 #define BGE_CHIPREV_5784_AX 0x57841 /* PCI DMA Read/Write Control register */ #define BGE_PCIDMARWCTL_MINDMA 0x000000FF #define BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT 0x00000001 #define BGE_PCIDMARWCTL_RDADRR_BNDRY 0x00000700 #define BGE_PCIDMARWCTL_WRADDR_BNDRY 0x00003800 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE 0x0000C000 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL 0x00004000 #define BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL 0x00008000 #define BGE_PCIDMARWCTL_RD_WAT 0x00070000 #define BGE_PCIDMARWCTL_WR_WAT 0x00380000 #define BGE_PCIDMARWCTL_USE_MRM 0x00400000 #define BGE_PCIDMARWCTL_ASRT_ALL_BE 0x00800000 #define BGE_PCIDMARWCTL_DFLT_PCI_RD_CMD 0x0F000000 #define BGE_PCIDMARWCTL_DFLT_PCI_WR_CMD 0xF0000000 #define BGE_PCIDMARWCTL_RD_WAT_SHIFT(x) ((x) << 16) #define BGE_PCIDMARWCTL_WR_WAT_SHIFT(x) ((x) << 19) #define BGE_PCIDMARWCTL_RD_CMD_SHIFT(x) ((x) << 24) #define BGE_PCIDMARWCTL_WR_CMD_SHIFT(x) ((x) << 28) #define BGE_PCIDMARWCTL_TAGGED_STATUS_WA 0x00000080 #define BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK 0x00000380 #define BGE_PCI_READ_BNDRY_DISABLE 0x00000000 #define BGE_PCI_READ_BNDRY_16BYTES 0x00000100 #define BGE_PCI_READ_BNDRY_32BYTES 0x00000200 #define BGE_PCI_READ_BNDRY_64BYTES 0x00000300 #define BGE_PCI_READ_BNDRY_128BYTES 0x00000400 #define BGE_PCI_READ_BNDRY_256BYTES 0x00000500 #define BGE_PCI_READ_BNDRY_512BYTES 0x00000600 #define BGE_PCI_READ_BNDRY_1024BYTES 0x00000700 #define BGE_PCI_WRITE_BNDRY_DISABLE 0x00000000 #define BGE_PCI_WRITE_BNDRY_16BYTES 0x00000800 #define BGE_PCI_WRITE_BNDRY_32BYTES 0x00001000 #define BGE_PCI_WRITE_BNDRY_64BYTES 0x00001800 #define BGE_PCI_WRITE_BNDRY_128BYTES 0x00002000 #define BGE_PCI_WRITE_BNDRY_256BYTES 0x00002800 #define BGE_PCI_WRITE_BNDRY_512BYTES 0x00003000 #define BGE_PCI_WRITE_BNDRY_1024BYTES 0x00003800 /* * PCI state register -- note, this register is read only * unless the PCISTATE_WR bit of the PCI Misc. Host Control * register is set. */ #define BGE_PCISTATE_FORCE_RESET 0x00000001 #define BGE_PCISTATE_INTR_STATE 0x00000002 #define BGE_PCISTATE_PCI_BUSMODE 0x00000004 /* 1 = PCI, 0 = PCI-X */ #define BGE_PCISTATE_PCI_BUSSPEED 0x00000008 /* 1 = 66/133, 0 = 33/66 */ #define BGE_PCISTATE_32BIT_BUS 0x00000010 /* 1 = 32bit, 0 = 64bit */ #define BGE_PCISTATE_ROM_ENABLE 0x00000020 #define BGE_PCISTATE_ROM_RETRY_ENABLE 0x00000040 #define BGE_PCISTATE_FLATVIEW_MODE 0x00000100 #define BGE_PCISTATE_PCI_TGT_RETRY_MAX 0x00000E00 #define BGE_PCISTATE_RETRY_SAME_DMA 0x00002000 #define BGE_PCISTATE_ALLOW_APE_CTLSPC_WR 0x00010000 #define BGE_PCISTATE_ALLOW_APE_SHMEM_WR 0x00020000 #define BGE_PCISTATE_ALLOW_APE_PSPACE_WR 0x00040000 /* * PCI Clock Control register -- note, this register is read only * unless the CLOCKCTL_RW bit of the PCI Misc. Host Control * register is set. */ #define BGE_PCICLOCKCTL_DETECTED_SPEED 0x0000000F #define BGE_PCICLOCKCTL_M66EN 0x00000080 #define BGE_PCICLOCKCTL_LOWPWR_CLKMODE 0x00000200 #define BGE_PCICLOCKCTL_RXCPU_CLK_DIS 0x00000400 #define BGE_PCICLOCKCTL_TXCPU_CLK_DIS 0x00000800 #define BGE_PCICLOCKCTL_ALTCLK 0x00001000 #define BGE_PCICLOCKCTL_ALTCLK_SRC 0x00002000 #define BGE_PCICLOCKCTL_PCIPLL_DISABLE 0x00004000 #define BGE_PCICLOCKCTL_SYSPLL_DISABLE 0x00008000 #define BGE_PCICLOCKCTL_BIST_ENABLE 0x00010000 #ifndef PCIM_CMD_MWIEN #define PCIM_CMD_MWIEN 0x0010 #endif #ifndef PCIM_CMD_INTxDIS #define PCIM_CMD_INTxDIS 0x0400 #endif /* BAR0 (MAC) Register Definitions */ /* * High priority mailbox registers * Each mailbox is 64-bits wide, though we only use the * lower 32 bits. To write a 64-bit value, write the upper 32 bits * first. The NIC will load the mailbox after the lower 32 bit word * has been updated. */ #define BGE_MBX_IRQ0_HI 0x0200 #define BGE_MBX_IRQ0_LO 0x0204 #define BGE_MBX_IRQ1_HI 0x0208 #define BGE_MBX_IRQ1_LO 0x020C #define BGE_MBX_IRQ2_HI 0x0210 #define BGE_MBX_IRQ2_LO 0x0214 #define BGE_MBX_IRQ3_HI 0x0218 #define BGE_MBX_IRQ3_LO 0x021C #define BGE_MBX_GEN0_HI 0x0220 #define BGE_MBX_GEN0_LO 0x0224 #define BGE_MBX_GEN1_HI 0x0228 #define BGE_MBX_GEN1_LO 0x022C #define BGE_MBX_GEN2_HI 0x0230 #define BGE_MBX_GEN2_LO 0x0234 #define BGE_MBX_GEN3_HI 0x0228 #define BGE_MBX_GEN3_LO 0x022C #define BGE_MBX_GEN4_HI 0x0240 #define BGE_MBX_GEN4_LO 0x0244 #define BGE_MBX_GEN5_HI 0x0248 #define BGE_MBX_GEN5_LO 0x024C #define BGE_MBX_GEN6_HI 0x0250 #define BGE_MBX_GEN6_LO 0x0254 #define BGE_MBX_GEN7_HI 0x0258 #define BGE_MBX_GEN7_LO 0x025C #define BGE_MBX_RELOAD_STATS_HI 0x0260 #define BGE_MBX_RELOAD_STATS_LO 0x0264 #define BGE_MBX_RX_STD_PROD_HI 0x0268 #define BGE_MBX_RX_STD_PROD_LO 0x026C #define BGE_MBX_RX_JUMBO_PROD_HI 0x0270 #define BGE_MBX_RX_JUMBO_PROD_LO 0x0274 #define BGE_MBX_RX_MINI_PROD_HI 0x0278 #define BGE_MBX_RX_MINI_PROD_LO 0x027C #define BGE_MBX_RX_CONS0_HI 0x0280 #define BGE_MBX_RX_CONS0_LO 0x0284 #define BGE_MBX_RX_CONS1_HI 0x0288 #define BGE_MBX_RX_CONS1_LO 0x028C #define BGE_MBX_RX_CONS2_HI 0x0290 #define BGE_MBX_RX_CONS2_LO 0x0294 #define BGE_MBX_RX_CONS3_HI 0x0298 #define BGE_MBX_RX_CONS3_LO 0x029C #define BGE_MBX_RX_CONS4_HI 0x02A0 #define BGE_MBX_RX_CONS4_LO 0x02A4 #define BGE_MBX_RX_CONS5_HI 0x02A8 #define BGE_MBX_RX_CONS5_LO 0x02AC #define BGE_MBX_RX_CONS6_HI 0x02B0 #define BGE_MBX_RX_CONS6_LO 0x02B4 #define BGE_MBX_RX_CONS7_HI 0x02B8 #define BGE_MBX_RX_CONS7_LO 0x02BC #define BGE_MBX_RX_CONS8_HI 0x02C0 #define BGE_MBX_RX_CONS8_LO 0x02C4 #define BGE_MBX_RX_CONS9_HI 0x02C8 #define BGE_MBX_RX_CONS9_LO 0x02CC #define BGE_MBX_RX_CONS10_HI 0x02D0 #define BGE_MBX_RX_CONS10_LO 0x02D4 #define BGE_MBX_RX_CONS11_HI 0x02D8 #define BGE_MBX_RX_CONS11_LO 0x02DC #define BGE_MBX_RX_CONS12_HI 0x02E0 #define BGE_MBX_RX_CONS12_LO 0x02E4 #define BGE_MBX_RX_CONS13_HI 0x02E8 #define BGE_MBX_RX_CONS13_LO 0x02EC #define BGE_MBX_RX_CONS14_HI 0x02F0 #define BGE_MBX_RX_CONS14_LO 0x02F4 #define BGE_MBX_RX_CONS15_HI 0x02F8 #define BGE_MBX_RX_CONS15_LO 0x02FC #define BGE_MBX_TX_HOST_PROD0_HI 0x0300 #define BGE_MBX_TX_HOST_PROD0_LO 0x0304 #define BGE_MBX_TX_HOST_PROD1_HI 0x0308 #define BGE_MBX_TX_HOST_PROD1_LO 0x030C #define BGE_MBX_TX_HOST_PROD2_HI 0x0310 #define BGE_MBX_TX_HOST_PROD2_LO 0x0314 #define BGE_MBX_TX_HOST_PROD3_HI 0x0318 #define BGE_MBX_TX_HOST_PROD3_LO 0x031C #define BGE_MBX_TX_HOST_PROD4_HI 0x0320 #define BGE_MBX_TX_HOST_PROD4_LO 0x0324 #define BGE_MBX_TX_HOST_PROD5_HI 0x0328 #define BGE_MBX_TX_HOST_PROD5_LO 0x032C #define BGE_MBX_TX_HOST_PROD6_HI 0x0330 #define BGE_MBX_TX_HOST_PROD6_LO 0x0334 #define BGE_MBX_TX_HOST_PROD7_HI 0x0338 #define BGE_MBX_TX_HOST_PROD7_LO 0x033C #define BGE_MBX_TX_HOST_PROD8_HI 0x0340 #define BGE_MBX_TX_HOST_PROD8_LO 0x0344 #define BGE_MBX_TX_HOST_PROD9_HI 0x0348 #define BGE_MBX_TX_HOST_PROD9_LO 0x034C #define BGE_MBX_TX_HOST_PROD10_HI 0x0350 #define BGE_MBX_TX_HOST_PROD10_LO 0x0354 #define BGE_MBX_TX_HOST_PROD11_HI 0x0358 #define BGE_MBX_TX_HOST_PROD11_LO 0x035C #define BGE_MBX_TX_HOST_PROD12_HI 0x0360 #define BGE_MBX_TX_HOST_PROD12_LO 0x0364 #define BGE_MBX_TX_HOST_PROD13_HI 0x0368 #define BGE_MBX_TX_HOST_PROD13_LO 0x036C #define BGE_MBX_TX_HOST_PROD14_HI 0x0370 #define BGE_MBX_TX_HOST_PROD14_LO 0x0374 #define BGE_MBX_TX_HOST_PROD15_HI 0x0378 #define BGE_MBX_TX_HOST_PROD15_LO 0x037C #define BGE_MBX_TX_NIC_PROD0_HI 0x0380 #define BGE_MBX_TX_NIC_PROD0_LO 0x0384 #define BGE_MBX_TX_NIC_PROD1_HI 0x0388 #define BGE_MBX_TX_NIC_PROD1_LO 0x038C #define BGE_MBX_TX_NIC_PROD2_HI 0x0390 #define BGE_MBX_TX_NIC_PROD2_LO 0x0394 #define BGE_MBX_TX_NIC_PROD3_HI 0x0398 #define BGE_MBX_TX_NIC_PROD3_LO 0x039C #define BGE_MBX_TX_NIC_PROD4_HI 0x03A0 #define BGE_MBX_TX_NIC_PROD4_LO 0x03A4 #define BGE_MBX_TX_NIC_PROD5_HI 0x03A8 #define BGE_MBX_TX_NIC_PROD5_LO 0x03AC #define BGE_MBX_TX_NIC_PROD6_HI 0x03B0 #define BGE_MBX_TX_NIC_PROD6_LO 0x03B4 #define BGE_MBX_TX_NIC_PROD7_HI 0x03B8 #define BGE_MBX_TX_NIC_PROD7_LO 0x03BC #define BGE_MBX_TX_NIC_PROD8_HI 0x03C0 #define BGE_MBX_TX_NIC_PROD8_LO 0x03C4 #define BGE_MBX_TX_NIC_PROD9_HI 0x03C8 #define BGE_MBX_TX_NIC_PROD9_LO 0x03CC #define BGE_MBX_TX_NIC_PROD10_HI 0x03D0 #define BGE_MBX_TX_NIC_PROD10_LO 0x03D4 #define BGE_MBX_TX_NIC_PROD11_HI 0x03D8 #define BGE_MBX_TX_NIC_PROD11_LO 0x03DC #define BGE_MBX_TX_NIC_PROD12_HI 0x03E0 #define BGE_MBX_TX_NIC_PROD12_LO 0x03E4 #define BGE_MBX_TX_NIC_PROD13_HI 0x03E8 #define BGE_MBX_TX_NIC_PROD13_LO 0x03EC #define BGE_MBX_TX_NIC_PROD14_HI 0x03F0 #define BGE_MBX_TX_NIC_PROD14_LO 0x03F4 #define BGE_MBX_TX_NIC_PROD15_HI 0x03F8 #define BGE_MBX_TX_NIC_PROD15_LO 0x03FC #define BGE_TX_RINGS_MAX 4 #define BGE_TX_RINGS_EXTSSRAM_MAX 16 #define BGE_RX_RINGS_MAX 16 #define BGE_RX_RINGS_MAX_5717 17 /* Ethernet MAC control registers */ #define BGE_MAC_MODE 0x0400 #define BGE_MAC_STS 0x0404 #define BGE_MAC_EVT_ENB 0x0408 #define BGE_MAC_LED_CTL 0x040C #define BGE_MAC_ADDR1_LO 0x0410 #define BGE_MAC_ADDR1_HI 0x0414 #define BGE_MAC_ADDR2_LO 0x0418 #define BGE_MAC_ADDR2_HI 0x041C #define BGE_MAC_ADDR3_LO 0x0420 #define BGE_MAC_ADDR3_HI 0x0424 #define BGE_MAC_ADDR4_LO 0x0428 #define BGE_MAC_ADDR4_HI 0x042C #define BGE_WOL_PATPTR 0x0430 #define BGE_WOL_PATCFG 0x0434 #define BGE_TX_RANDOM_BACKOFF 0x0438 #define BGE_RX_MTU 0x043C #define BGE_GBIT_PCS_TEST 0x0440 #define BGE_TX_TBI_AUTONEG 0x0444 #define BGE_RX_TBI_AUTONEG 0x0448 #define BGE_MI_COMM 0x044C #define BGE_MI_STS 0x0450 #define BGE_MI_MODE 0x0454 #define BGE_AUTOPOLL_STS 0x0458 #define BGE_TX_MODE 0x045C #define BGE_TX_STS 0x0460 #define BGE_TX_LENGTHS 0x0464 #define BGE_RX_MODE 0x0468 #define BGE_RX_STS 0x046C #define BGE_MAR0 0x0470 #define BGE_MAR1 0x0474 #define BGE_MAR2 0x0478 #define BGE_MAR3 0x047C #define BGE_RX_BD_RULES_CTL0 0x0480 #define BGE_RX_BD_RULES_MASKVAL0 0x0484 #define BGE_RX_BD_RULES_CTL1 0x0488 #define BGE_RX_BD_RULES_MASKVAL1 0x048C #define BGE_RX_BD_RULES_CTL2 0x0490 #define BGE_RX_BD_RULES_MASKVAL2 0x0494 #define BGE_RX_BD_RULES_CTL3 0x0498 #define BGE_RX_BD_RULES_MASKVAL3 0x049C #define BGE_RX_BD_RULES_CTL4 0x04A0 #define BGE_RX_BD_RULES_MASKVAL4 0x04A4 #define BGE_RX_BD_RULES_CTL5 0x04A8 #define BGE_RX_BD_RULES_MASKVAL5 0x04AC #define BGE_RX_BD_RULES_CTL6 0x04B0 #define BGE_RX_BD_RULES_MASKVAL6 0x04B4 #define BGE_RX_BD_RULES_CTL7 0x04B8 #define BGE_RX_BD_RULES_MASKVAL7 0x04BC #define BGE_RX_BD_RULES_CTL8 0x04C0 #define BGE_RX_BD_RULES_MASKVAL8 0x04C4 #define BGE_RX_BD_RULES_CTL9 0x04C8 #define BGE_RX_BD_RULES_MASKVAL9 0x04CC #define BGE_RX_BD_RULES_CTL10 0x04D0 #define BGE_RX_BD_RULES_MASKVAL10 0x04D4 #define BGE_RX_BD_RULES_CTL11 0x04D8 #define BGE_RX_BD_RULES_MASKVAL11 0x04DC #define BGE_RX_BD_RULES_CTL12 0x04E0 #define BGE_RX_BD_RULES_MASKVAL12 0x04E4 #define BGE_RX_BD_RULES_CTL13 0x04E8 #define BGE_RX_BD_RULES_MASKVAL13 0x04EC #define BGE_RX_BD_RULES_CTL14 0x04F0 #define BGE_RX_BD_RULES_MASKVAL14 0x04F4 #define BGE_RX_BD_RULES_CTL15 0x04F8 #define BGE_RX_BD_RULES_MASKVAL15 0x04FC #define BGE_RX_RULES_CFG 0x0500 #define BGE_MAX_RX_FRAME_LOWAT 0x0504 #define BGE_SERDES_CFG 0x0590 #define BGE_SERDES_STS 0x0594 #define BGE_SGDIG_CFG 0x05B0 #define BGE_SGDIG_STS 0x05B4 #define BGE_TX_MAC_STATS_OCTETS 0x0800 #define BGE_TX_MAC_STATS_RESERVE_0 0x0804 #define BGE_TX_MAC_STATS_COLLS 0x0808 #define BGE_TX_MAC_STATS_XON_SENT 0x080C #define BGE_TX_MAC_STATS_XOFF_SENT 0x0810 #define BGE_TX_MAC_STATS_RESERVE_1 0x0814 #define BGE_TX_MAC_STATS_ERRORS 0x0818 #define BGE_TX_MAC_STATS_SINGLE_COLL 0x081C #define BGE_TX_MAC_STATS_MULTI_COLL 0x0820 #define BGE_TX_MAC_STATS_DEFERRED 0x0824 #define BGE_TX_MAC_STATS_RESERVE_2 0x0828 #define BGE_TX_MAC_STATS_EXCESS_COLL 0x082C #define BGE_TX_MAC_STATS_LATE_COLL 0x0830 #define BGE_TX_MAC_STATS_RESERVE_3 0x0834 #define BGE_TX_MAC_STATS_RESERVE_4 0x0838 #define BGE_TX_MAC_STATS_RESERVE_5 0x083C #define BGE_TX_MAC_STATS_RESERVE_6 0x0840 #define BGE_TX_MAC_STATS_RESERVE_7 0x0844 #define BGE_TX_MAC_STATS_RESERVE_8 0x0848 #define BGE_TX_MAC_STATS_RESERVE_9 0x084C #define BGE_TX_MAC_STATS_RESERVE_10 0x0850 #define BGE_TX_MAC_STATS_RESERVE_11 0x0854 #define BGE_TX_MAC_STATS_RESERVE_12 0x0858 #define BGE_TX_MAC_STATS_RESERVE_13 0x085C #define BGE_TX_MAC_STATS_RESERVE_14 0x0860 #define BGE_TX_MAC_STATS_RESERVE_15 0x0864 #define BGE_TX_MAC_STATS_RESERVE_16 0x0868 #define BGE_TX_MAC_STATS_UCAST 0x086C #define BGE_TX_MAC_STATS_MCAST 0x0870 #define BGE_TX_MAC_STATS_BCAST 0x0874 #define BGE_TX_MAC_STATS_RESERVE_17 0x0878 #define BGE_TX_MAC_STATS_RESERVE_18 0x087C #define BGE_RX_MAC_STATS_OCTESTS 0x0880 #define BGE_RX_MAC_STATS_RESERVE_0 0x0884 #define BGE_RX_MAC_STATS_FRAGMENTS 0x0888 #define BGE_RX_MAC_STATS_UCAST 0x088C #define BGE_RX_MAC_STATS_MCAST 0x0890 #define BGE_RX_MAC_STATS_BCAST 0x0894 #define BGE_RX_MAC_STATS_FCS_ERRORS 0x0898 #define BGE_RX_MAC_STATS_ALGIN_ERRORS 0x089C #define BGE_RX_MAC_STATS_XON_RCVD 0x08A0 #define BGE_RX_MAC_STATS_XOFF_RCVD 0x08A4 #define BGE_RX_MAC_STATS_CTRL_RCVD 0x08A8 #define BGE_RX_MAC_STATS_XOFF_ENTERED 0x08AC #define BGE_RX_MAC_STATS_FRAME_TOO_LONG 0x08B0 #define BGE_RX_MAC_STATS_JABBERS 0x08B4 #define BGE_RX_MAC_STATS_UNDERSIZE 0x08B8 /* Ethernet MAC Mode register */ #define BGE_MACMODE_RESET 0x00000001 #define BGE_MACMODE_HALF_DUPLEX 0x00000002 #define BGE_MACMODE_PORTMODE 0x0000000C #define BGE_MACMODE_LOOPBACK 0x00000010 #define BGE_MACMODE_RX_TAGGEDPKT 0x00000080 #define BGE_MACMODE_TX_BURST_ENB 0x00000100 #define BGE_MACMODE_MAX_DEFER 0x00000200 #define BGE_MACMODE_LINK_POLARITY 0x00000400 #define BGE_MACMODE_RX_STATS_ENB 0x00000800 #define BGE_MACMODE_RX_STATS_CLEAR 0x00001000 #define BGE_MACMODE_RX_STATS_FLUSH 0x00002000 #define BGE_MACMODE_TX_STATS_ENB 0x00004000 #define BGE_MACMODE_TX_STATS_CLEAR 0x00008000 #define BGE_MACMODE_TX_STATS_FLUSH 0x00010000 #define BGE_MACMODE_TBI_SEND_CFGS 0x00020000 #define BGE_MACMODE_MAGIC_PKT_ENB 0x00040000 #define BGE_MACMODE_ACPI_PWRON_ENB 0x00080000 #define BGE_MACMODE_MIP_ENB 0x00100000 #define BGE_MACMODE_TXDMA_ENB 0x00200000 #define BGE_MACMODE_RXDMA_ENB 0x00400000 #define BGE_MACMODE_FRMHDR_DMA_ENB 0x00800000 #define BGE_MACMODE_APE_RX_EN 0x08000000 #define BGE_MACMODE_APE_TX_EN 0x10000000 #define BGE_PORTMODE_NONE 0x00000000 #define BGE_PORTMODE_MII 0x00000004 #define BGE_PORTMODE_GMII 0x00000008 #define BGE_PORTMODE_TBI 0x0000000C /* MAC Status register */ #define BGE_MACSTAT_TBI_PCS_SYNCHED 0x00000001 #define BGE_MACSTAT_TBI_SIGNAL_DETECT 0x00000002 #define BGE_MACSTAT_RX_CFG 0x00000004 #define BGE_MACSTAT_CFG_CHANGED 0x00000008 #define BGE_MACSTAT_SYNC_CHANGED 0x00000010 #define BGE_MACSTAT_PORT_DECODE_ERROR 0x00000400 #define BGE_MACSTAT_LINK_CHANGED 0x00001000 #define BGE_MACSTAT_MI_COMPLETE 0x00400000 #define BGE_MACSTAT_MI_INTERRUPT 0x00800000 #define BGE_MACSTAT_AUTOPOLL_ERROR 0x01000000 #define BGE_MACSTAT_ODI_ERROR 0x02000000 #define BGE_MACSTAT_RXSTAT_OFLOW 0x04000000 #define BGE_MACSTAT_TXSTAT_OFLOW 0x08000000 /* MAC Event Enable Register */ #define BGE_EVTENB_PORT_DECODE_ERROR 0x00000400 #define BGE_EVTENB_LINK_CHANGED 0x00001000 #define BGE_EVTENB_MI_COMPLETE 0x00400000 #define BGE_EVTENB_MI_INTERRUPT 0x00800000 #define BGE_EVTENB_AUTOPOLL_ERROR 0x01000000 #define BGE_EVTENB_ODI_ERROR 0x02000000 #define BGE_EVTENB_RXSTAT_OFLOW 0x04000000 #define BGE_EVTENB_TXSTAT_OFLOW 0x08000000 /* LED Control Register */ #define BGE_LEDCTL_LINKLED_OVERRIDE 0x00000001 #define BGE_LEDCTL_1000MBPS_LED 0x00000002 #define BGE_LEDCTL_100MBPS_LED 0x00000004 #define BGE_LEDCTL_10MBPS_LED 0x00000008 #define BGE_LEDCTL_TRAFLED_OVERRIDE 0x00000010 #define BGE_LEDCTL_TRAFLED_BLINK 0x00000020 #define BGE_LEDCTL_TRAFLED_BLINK_2 0x00000040 #define BGE_LEDCTL_1000MBPS_STS 0x00000080 #define BGE_LEDCTL_100MBPS_STS 0x00000100 #define BGE_LEDCTL_10MBPS_STS 0x00000200 #define BGE_LEDCTL_TRAFLED_STS 0x00000400 #define BGE_LEDCTL_BLINKPERIOD 0x7FF80000 #define BGE_LEDCTL_BLINKPERIOD_OVERRIDE 0x80000000 /* TX backoff seed register */ #define BGE_TX_BACKOFF_SEED_MASK 0x3FF /* Autopoll status register */ #define BGE_AUTOPOLLSTS_ERROR 0x00000001 /* Transmit MAC mode register */ #define BGE_TXMODE_RESET 0x00000001 #define BGE_TXMODE_ENABLE 0x00000002 #define BGE_TXMODE_FLOWCTL_ENABLE 0x00000010 #define BGE_TXMODE_BIGBACKOFF_ENABLE 0x00000020 #define BGE_TXMODE_LONGPAUSE_ENABLE 0x00000040 #define BGE_TXMODE_MBUF_LOCKUP_FIX 0x00000100 #define BGE_TXMODE_JMB_FRM_LEN 0x00400000 #define BGE_TXMODE_CNT_DN_MODE 0x00800000 /* Transmit MAC status register */ #define BGE_TXSTAT_RX_XOFFED 0x00000001 #define BGE_TXSTAT_SENT_XOFF 0x00000002 #define BGE_TXSTAT_SENT_XON 0x00000004 #define BGE_TXSTAT_LINK_UP 0x00000008 #define BGE_TXSTAT_ODI_UFLOW 0x00000010 #define BGE_TXSTAT_ODI_OFLOW 0x00000020 /* Transmit MAC lengths register */ #define BGE_TXLEN_SLOTTIME 0x000000FF #define BGE_TXLEN_IPG 0x00000F00 #define BGE_TXLEN_CRS 0x00003000 #define BGE_TXLEN_JMB_FRM_LEN_MSK 0x00FF0000 #define BGE_TXLEN_CNT_DN_VAL_MSK 0xFF000000 /* Receive MAC mode register */ #define BGE_RXMODE_RESET 0x00000001 #define BGE_RXMODE_ENABLE 0x00000002 #define BGE_RXMODE_FLOWCTL_ENABLE 0x00000004 #define BGE_RXMODE_RX_GIANTS 0x00000020 #define BGE_RXMODE_RX_RUNTS 0x00000040 #define BGE_RXMODE_8022_LENCHECK 0x00000080 #define BGE_RXMODE_RX_PROMISC 0x00000100 #define BGE_RXMODE_RX_NO_CRC_CHECK 0x00000200 #define BGE_RXMODE_RX_KEEP_VLAN_DIAG 0x00000400 #define BGE_RXMODE_IPV6_ENABLE 0x01000000 #define BGE_RXMODE_IPV4_FRAG_FIX 0x02000000 /* Receive MAC status register */ #define BGE_RXSTAT_REMOTE_XOFFED 0x00000001 #define BGE_RXSTAT_RCVD_XOFF 0x00000002 #define BGE_RXSTAT_RCVD_XON 0x00000004 /* Receive Rules Control register */ #define BGE_RXRULECTL_OFFSET 0x000000FF #define BGE_RXRULECTL_CLASS 0x00001F00 #define BGE_RXRULECTL_HDRTYPE 0x0000E000 #define BGE_RXRULECTL_COMPARE_OP 0x00030000 #define BGE_RXRULECTL_MAP 0x01000000 #define BGE_RXRULECTL_DISCARD 0x02000000 #define BGE_RXRULECTL_MASK 0x04000000 #define BGE_RXRULECTL_ACTIVATE_PROC3 0x08000000 #define BGE_RXRULECTL_ACTIVATE_PROC2 0x10000000 #define BGE_RXRULECTL_ACTIVATE_PROC1 0x20000000 #define BGE_RXRULECTL_ANDWITHNEXT 0x40000000 /* Receive Rules Mask register */ #define BGE_RXRULEMASK_VALUE 0x0000FFFF #define BGE_RXRULEMASK_MASKVAL 0xFFFF0000 /* SERDES configuration register */ #define BGE_SERDESCFG_RXR 0x00000007 /* phase interpolator */ #define BGE_SERDESCFG_RXG 0x00000018 /* rx gain setting */ #define BGE_SERDESCFG_RXEDGESEL 0x00000040 /* rising/falling egde */ #define BGE_SERDESCFG_TX_BIAS 0x00000380 /* TXDAC bias setting */ #define BGE_SERDESCFG_IBMAX 0x00000400 /* bias current +25% */ #define BGE_SERDESCFG_IBMIN 0x00000800 /* bias current -25% */ #define BGE_SERDESCFG_TXMODE 0x00001000 #define BGE_SERDESCFG_TXEDGESEL 0x00002000 /* rising/falling edge */ #define BGE_SERDESCFG_MODE 0x00004000 /* TXCP/TXCN disabled */ #define BGE_SERDESCFG_PLLTEST 0x00008000 /* PLL test mode */ #define BGE_SERDESCFG_CDET 0x00010000 /* comma detect enable */ #define BGE_SERDESCFG_TBILOOP 0x00020000 /* local loopback */ #define BGE_SERDESCFG_REMLOOP 0x00040000 /* remote loopback */ #define BGE_SERDESCFG_INVPHASE 0x00080000 /* Reverse 125Mhz clock */ #define BGE_SERDESCFG_12REGCTL 0x00300000 /* 1.2v regulator ctl */ #define BGE_SERDESCFG_REGCTL 0x00C00000 /* regulator ctl (2.5v) */ /* SERDES status register */ #define BGE_SERDESSTS_RXSTAT 0x0000000F /* receive status bits */ #define BGE_SERDESSTS_CDET 0x00000010 /* comma code detected */ /* SGDIG config (not documented) */ #define BGE_SGDIGCFG_PAUSE_CAP 0x00000800 #define BGE_SGDIGCFG_ASYM_PAUSE 0x00001000 #define BGE_SGDIGCFG_SEND 0x40000000 #define BGE_SGDIGCFG_AUTO 0x80000000 /* SGDIG status (not documented) */ #define BGE_SGDIGSTS_DONE 0x00000002 #define BGE_SGDIGSTS_IS_SERDES 0x00000100 #define BGE_SGDIGSTS_PAUSE_CAP 0x00080000 #define BGE_SGDIGSTS_ASYM_PAUSE 0x00100000 /* MI communication register */ #define BGE_MICOMM_DATA 0x0000FFFF #define BGE_MICOMM_REG 0x001F0000 #define BGE_MICOMM_PHY 0x03E00000 #define BGE_MICOMM_CMD 0x0C000000 #define BGE_MICOMM_READFAIL 0x10000000 #define BGE_MICOMM_BUSY 0x20000000 #define BGE_MIREG(x) ((x & 0x1F) << 16) #define BGE_MIPHY(x) ((x & 0x1F) << 21) #define BGE_MICMD_WRITE 0x04000000 #define BGE_MICMD_READ 0x08000000 /* MI status register */ #define BGE_MISTS_LINK 0x00000001 #define BGE_MISTS_10MBPS 0x00000002 #define BGE_MIMODE_CLK_10MHZ 0x00000001 #define BGE_MIMODE_SHORTPREAMBLE 0x00000002 #define BGE_MIMODE_AUTOPOLL 0x00000010 #define BGE_MIMODE_CLKCNT 0x001F0000 #define BGE_MIMODE_500KHZ_CONST 0x00008000 #define BGE_MIMODE_BASE 0x000C0000 /* * Send data initiator control registers. */ #define BGE_SDI_MODE 0x0C00 #define BGE_SDI_STATUS 0x0C04 #define BGE_SDI_STATS_CTL 0x0C08 #define BGE_SDI_STATS_ENABLE_MASK 0x0C0C #define BGE_SDI_STATS_INCREMENT_MASK 0x0C10 #define BGE_ISO_PKT_TX 0x0C20 #define BGE_LOCSTATS_COS0 0x0C80 #define BGE_LOCSTATS_COS1 0x0C84 #define BGE_LOCSTATS_COS2 0x0C88 #define BGE_LOCSTATS_COS3 0x0C8C #define BGE_LOCSTATS_COS4 0x0C90 #define BGE_LOCSTATS_COS5 0x0C84 #define BGE_LOCSTATS_COS6 0x0C98 #define BGE_LOCSTATS_COS7 0x0C9C #define BGE_LOCSTATS_COS8 0x0CA0 #define BGE_LOCSTATS_COS9 0x0CA4 #define BGE_LOCSTATS_COS10 0x0CA8 #define BGE_LOCSTATS_COS11 0x0CAC #define BGE_LOCSTATS_COS12 0x0CB0 #define BGE_LOCSTATS_COS13 0x0CB4 #define BGE_LOCSTATS_COS14 0x0CB8 #define BGE_LOCSTATS_COS15 0x0CBC #define BGE_LOCSTATS_DMA_RQ_FULL 0x0CC0 #define BGE_LOCSTATS_DMA_HIPRIO_RQ_FULL 0x0CC4 #define BGE_LOCSTATS_SDC_QUEUE_FULL 0x0CC8 #define BGE_LOCSTATS_NIC_SENDPROD_SET 0x0CCC #define BGE_LOCSTATS_STATS_UPDATED 0x0CD0 #define BGE_LOCSTATS_IRQS 0x0CD4 #define BGE_LOCSTATS_AVOIDED_IRQS 0x0CD8 #define BGE_LOCSTATS_TX_THRESH_HIT 0x0CDC /* Send Data Initiator mode register */ #define BGE_SDIMODE_RESET 0x00000001 #define BGE_SDIMODE_ENABLE 0x00000002 #define BGE_SDIMODE_STATS_OFLOW_ATTN 0x00000004 #define BGE_SDIMODE_HW_LSO_PRE_DMA 0x00000008 /* Send Data Initiator stats register */ #define BGE_SDISTAT_STATS_OFLOW_ATTN 0x00000004 /* Send Data Initiator stats control register */ #define BGE_SDISTATSCTL_ENABLE 0x00000001 #define BGE_SDISTATSCTL_FASTER 0x00000002 #define BGE_SDISTATSCTL_CLEAR 0x00000004 #define BGE_SDISTATSCTL_FORCEFLUSH 0x00000008 #define BGE_SDISTATSCTL_FORCEZERO 0x00000010 /* * Send Data Completion Control registers */ #define BGE_SDC_MODE 0x1000 #define BGE_SDC_STATUS 0x1004 /* Send Data completion mode register */ #define BGE_SDCMODE_RESET 0x00000001 #define BGE_SDCMODE_ENABLE 0x00000002 #define BGE_SDCMODE_ATTN 0x00000004 #define BGE_SDCMODE_CDELAY 0x00000010 /* Send Data completion status register */ #define BGE_SDCSTAT_ATTN 0x00000004 /* * Send BD Ring Selector Control registers */ #define BGE_SRS_MODE 0x1400 #define BGE_SRS_STATUS 0x1404 #define BGE_SRS_HWDIAG 0x1408 #define BGE_SRS_LOC_NIC_CONS0 0x1440 #define BGE_SRS_LOC_NIC_CONS1 0x1444 #define BGE_SRS_LOC_NIC_CONS2 0x1448 #define BGE_SRS_LOC_NIC_CONS3 0x144C #define BGE_SRS_LOC_NIC_CONS4 0x1450 #define BGE_SRS_LOC_NIC_CONS5 0x1454 #define BGE_SRS_LOC_NIC_CONS6 0x1458 #define BGE_SRS_LOC_NIC_CONS7 0x145C #define BGE_SRS_LOC_NIC_CONS8 0x1460 #define BGE_SRS_LOC_NIC_CONS9 0x1464 #define BGE_SRS_LOC_NIC_CONS10 0x1468 #define BGE_SRS_LOC_NIC_CONS11 0x146C #define BGE_SRS_LOC_NIC_CONS12 0x1470 #define BGE_SRS_LOC_NIC_CONS13 0x1474 #define BGE_SRS_LOC_NIC_CONS14 0x1478 #define BGE_SRS_LOC_NIC_CONS15 0x147C /* Send BD Ring Selector Mode register */ #define BGE_SRSMODE_RESET 0x00000001 #define BGE_SRSMODE_ENABLE 0x00000002 #define BGE_SRSMODE_ATTN 0x00000004 /* Send BD Ring Selector Status register */ #define BGE_SRSSTAT_ERROR 0x00000004 /* Send BD Ring Selector HW Diagnostics register */ #define BGE_SRSHWDIAG_STATE 0x0000000F #define BGE_SRSHWDIAG_CURRINGNUM 0x000000F0 #define BGE_SRSHWDIAG_STAGEDRINGNUM 0x00000F00 #define BGE_SRSHWDIAG_RINGNUM_IN_MBX 0x0000F000 /* * Send BD Initiator Selector Control registers */ #define BGE_SBDI_MODE 0x1800 #define BGE_SBDI_STATUS 0x1804 #define BGE_SBDI_LOC_NIC_PROD0 0x1808 #define BGE_SBDI_LOC_NIC_PROD1 0x180C #define BGE_SBDI_LOC_NIC_PROD2 0x1810 #define BGE_SBDI_LOC_NIC_PROD3 0x1814 #define BGE_SBDI_LOC_NIC_PROD4 0x1818 #define BGE_SBDI_LOC_NIC_PROD5 0x181C #define BGE_SBDI_LOC_NIC_PROD6 0x1820 #define BGE_SBDI_LOC_NIC_PROD7 0x1824 #define BGE_SBDI_LOC_NIC_PROD8 0x1828 #define BGE_SBDI_LOC_NIC_PROD9 0x182C #define BGE_SBDI_LOC_NIC_PROD10 0x1830 #define BGE_SBDI_LOC_NIC_PROD11 0x1834 #define BGE_SBDI_LOC_NIC_PROD12 0x1838 #define BGE_SBDI_LOC_NIC_PROD13 0x183C #define BGE_SBDI_LOC_NIC_PROD14 0x1840 #define BGE_SBDI_LOC_NIC_PROD15 0x1844 /* Send BD Initiator Mode register */ #define BGE_SBDIMODE_RESET 0x00000001 #define BGE_SBDIMODE_ENABLE 0x00000002 #define BGE_SBDIMODE_ATTN 0x00000004 /* Send BD Initiator Status register */ #define BGE_SBDISTAT_ERROR 0x00000004 /* * Send BD Completion Control registers */ #define BGE_SBDC_MODE 0x1C00 #define BGE_SBDC_STATUS 0x1C04 /* Send BD Completion Control Mode register */ #define BGE_SBDCMODE_RESET 0x00000001 #define BGE_SBDCMODE_ENABLE 0x00000002 #define BGE_SBDCMODE_ATTN 0x00000004 /* Send BD Completion Control Status register */ #define BGE_SBDCSTAT_ATTN 0x00000004 /* * Receive List Placement Control registers */ #define BGE_RXLP_MODE 0x2000 #define BGE_RXLP_STATUS 0x2004 #define BGE_RXLP_SEL_LIST_LOCK 0x2008 #define BGE_RXLP_SEL_NON_EMPTY_BITS 0x200C #define BGE_RXLP_CFG 0x2010 #define BGE_RXLP_STATS_CTL 0x2014 #define BGE_RXLP_STATS_ENABLE_MASK 0x2018 #define BGE_RXLP_STATS_INCREMENT_MASK 0x201C #define BGE_RXLP_HEAD0 0x2100 #define BGE_RXLP_TAIL0 0x2104 #define BGE_RXLP_COUNT0 0x2108 #define BGE_RXLP_HEAD1 0x2110 #define BGE_RXLP_TAIL1 0x2114 #define BGE_RXLP_COUNT1 0x2118 #define BGE_RXLP_HEAD2 0x2120 #define BGE_RXLP_TAIL2 0x2124 #define BGE_RXLP_COUNT2 0x2128 #define BGE_RXLP_HEAD3 0x2130 #define BGE_RXLP_TAIL3 0x2134 #define BGE_RXLP_COUNT3 0x2138 #define BGE_RXLP_HEAD4 0x2140 #define BGE_RXLP_TAIL4 0x2144 #define BGE_RXLP_COUNT4 0x2148 #define BGE_RXLP_HEAD5 0x2150 #define BGE_RXLP_TAIL5 0x2154 #define BGE_RXLP_COUNT5 0x2158 #define BGE_RXLP_HEAD6 0x2160 #define BGE_RXLP_TAIL6 0x2164 #define BGE_RXLP_COUNT6 0x2168 #define BGE_RXLP_HEAD7 0x2170 #define BGE_RXLP_TAIL7 0x2174 #define BGE_RXLP_COUNT7 0x2178 #define BGE_RXLP_HEAD8 0x2180 #define BGE_RXLP_TAIL8 0x2184 #define BGE_RXLP_COUNT8 0x2188 #define BGE_RXLP_HEAD9 0x2190 #define BGE_RXLP_TAIL9 0x2194 #define BGE_RXLP_COUNT9 0x2198 #define BGE_RXLP_HEAD10 0x21A0 #define BGE_RXLP_TAIL10 0x21A4 #define BGE_RXLP_COUNT10 0x21A8 #define BGE_RXLP_HEAD11 0x21B0 #define BGE_RXLP_TAIL11 0x21B4 #define BGE_RXLP_COUNT11 0x21B8 #define BGE_RXLP_HEAD12 0x21C0 #define BGE_RXLP_TAIL12 0x21C4 #define BGE_RXLP_COUNT12 0x21C8 #define BGE_RXLP_HEAD13 0x21D0 #define BGE_RXLP_TAIL13 0x21D4 #define BGE_RXLP_COUNT13 0x21D8 #define BGE_RXLP_HEAD14 0x21E0 #define BGE_RXLP_TAIL14 0x21E4 #define BGE_RXLP_COUNT14 0x21E8 #define BGE_RXLP_HEAD15 0x21F0 #define BGE_RXLP_TAIL15 0x21F4 #define BGE_RXLP_COUNT15 0x21F8 #define BGE_RXLP_LOCSTAT_COS0 0x2200 #define BGE_RXLP_LOCSTAT_COS1 0x2204 #define BGE_RXLP_LOCSTAT_COS2 0x2208 #define BGE_RXLP_LOCSTAT_COS3 0x220C #define BGE_RXLP_LOCSTAT_COS4 0x2210 #define BGE_RXLP_LOCSTAT_COS5 0x2214 #define BGE_RXLP_LOCSTAT_COS6 0x2218 #define BGE_RXLP_LOCSTAT_COS7 0x221C #define BGE_RXLP_LOCSTAT_COS8 0x2220 #define BGE_RXLP_LOCSTAT_COS9 0x2224 #define BGE_RXLP_LOCSTAT_COS10 0x2228 #define BGE_RXLP_LOCSTAT_COS11 0x222C #define BGE_RXLP_LOCSTAT_COS12 0x2230 #define BGE_RXLP_LOCSTAT_COS13 0x2234 #define BGE_RXLP_LOCSTAT_COS14 0x2238 #define BGE_RXLP_LOCSTAT_COS15 0x223C #define BGE_RXLP_LOCSTAT_FILTDROP 0x2240 #define BGE_RXLP_LOCSTAT_DMA_WRQ_FULL 0x2244 #define BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL 0x2248 #define BGE_RXLP_LOCSTAT_OUT_OF_BDS 0x224C #define BGE_RXLP_LOCSTAT_IFIN_DROPS 0x2250 #define BGE_RXLP_LOCSTAT_IFIN_ERRORS 0x2254 #define BGE_RXLP_LOCSTAT_RXTHRESH_HIT 0x2258 /* Receive List Placement mode register */ #define BGE_RXLPMODE_RESET 0x00000001 #define BGE_RXLPMODE_ENABLE 0x00000002 #define BGE_RXLPMODE_CLASS0_ATTN 0x00000004 #define BGE_RXLPMODE_MAPOUTRANGE_ATTN 0x00000008 #define BGE_RXLPMODE_STATSOFLOW_ATTN 0x00000010 /* Receive List Placement Status register */ #define BGE_RXLPSTAT_CLASS0_ATTN 0x00000004 #define BGE_RXLPSTAT_MAPOUTRANGE_ATTN 0x00000008 #define BGE_RXLPSTAT_STATSOFLOW_ATTN 0x00000010 /* * Receive Data and Receive BD Initiator Control Registers */ #define BGE_RDBDI_MODE 0x2400 #define BGE_RDBDI_STATUS 0x2404 #define BGE_RX_JUMBO_RCB_HADDR_HI 0x2440 #define BGE_RX_JUMBO_RCB_HADDR_LO 0x2444 #define BGE_RX_JUMBO_RCB_MAXLEN_FLAGS 0x2448 #define BGE_RX_JUMBO_RCB_NICADDR 0x244C #define BGE_RX_STD_RCB_HADDR_HI 0x2450 #define BGE_RX_STD_RCB_HADDR_LO 0x2454 #define BGE_RX_STD_RCB_MAXLEN_FLAGS 0x2458 #define BGE_RX_STD_RCB_NICADDR 0x245C #define BGE_RX_MINI_RCB_HADDR_HI 0x2460 #define BGE_RX_MINI_RCB_HADDR_LO 0x2464 #define BGE_RX_MINI_RCB_MAXLEN_FLAGS 0x2468 #define BGE_RX_MINI_RCB_NICADDR 0x246C #define BGE_RDBDI_JUMBO_RX_CONS 0x2470 #define BGE_RDBDI_STD_RX_CONS 0x2474 #define BGE_RDBDI_MINI_RX_CONS 0x2478 #define BGE_RDBDI_RETURN_PROD0 0x2480 #define BGE_RDBDI_RETURN_PROD1 0x2484 #define BGE_RDBDI_RETURN_PROD2 0x2488 #define BGE_RDBDI_RETURN_PROD3 0x248C #define BGE_RDBDI_RETURN_PROD4 0x2490 #define BGE_RDBDI_RETURN_PROD5 0x2494 #define BGE_RDBDI_RETURN_PROD6 0x2498 #define BGE_RDBDI_RETURN_PROD7 0x249C #define BGE_RDBDI_RETURN_PROD8 0x24A0 #define BGE_RDBDI_RETURN_PROD9 0x24A4 #define BGE_RDBDI_RETURN_PROD10 0x24A8 #define BGE_RDBDI_RETURN_PROD11 0x24AC #define BGE_RDBDI_RETURN_PROD12 0x24B0 #define BGE_RDBDI_RETURN_PROD13 0x24B4 #define BGE_RDBDI_RETURN_PROD14 0x24B8 #define BGE_RDBDI_RETURN_PROD15 0x24BC #define BGE_RDBDI_HWDIAG 0x24C0 /* Receive Data and Receive BD Initiator Mode register */ #define BGE_RDBDIMODE_RESET 0x00000001 #define BGE_RDBDIMODE_ENABLE 0x00000002 #define BGE_RDBDIMODE_JUMBO_ATTN 0x00000004 #define BGE_RDBDIMODE_GIANT_ATTN 0x00000008 #define BGE_RDBDIMODE_BADRINGSZ_ATTN 0x00000010 /* Receive Data and Receive BD Initiator Status register */ #define BGE_RDBDISTAT_JUMBO_ATTN 0x00000004 #define BGE_RDBDISTAT_GIANT_ATTN 0x00000008 #define BGE_RDBDISTAT_BADRINGSZ_ATTN 0x00000010 /* * Receive Data Completion Control registers */ #define BGE_RDC_MODE 0x2800 /* Receive Data Completion Mode register */ #define BGE_RDCMODE_RESET 0x00000001 #define BGE_RDCMODE_ENABLE 0x00000002 #define BGE_RDCMODE_ATTN 0x00000004 /* * Receive BD Initiator Control registers */ #define BGE_RBDI_MODE 0x2C00 #define BGE_RBDI_STATUS 0x2C04 #define BGE_RBDI_NIC_JUMBO_BD_PROD 0x2C08 #define BGE_RBDI_NIC_STD_BD_PROD 0x2C0C #define BGE_RBDI_NIC_MINI_BD_PROD 0x2C10 #define BGE_RBDI_MINI_REPL_THRESH 0x2C14 #define BGE_RBDI_STD_REPL_THRESH 0x2C18 #define BGE_RBDI_JUMBO_REPL_THRESH 0x2C1C #define BGE_STD_REPLENISH_LWM 0x2D00 #define BGE_JMB_REPLENISH_LWM 0x2D04 /* Receive BD Initiator Mode register */ #define BGE_RBDIMODE_RESET 0x00000001 #define BGE_RBDIMODE_ENABLE 0x00000002 #define BGE_RBDIMODE_ATTN 0x00000004 /* Receive BD Initiator Status register */ #define BGE_RBDISTAT_ATTN 0x00000004 /* * Receive BD Completion Control registers */ #define BGE_RBDC_MODE 0x3000 #define BGE_RBDC_STATUS 0x3004 #define BGE_RBDC_JUMBO_BD_PROD 0x3008 #define BGE_RBDC_STD_BD_PROD 0x300C #define BGE_RBDC_MINI_BD_PROD 0x3010 /* Receive BD completion mode register */ #define BGE_RBDCMODE_RESET 0x00000001 #define BGE_RBDCMODE_ENABLE 0x00000002 #define BGE_RBDCMODE_ATTN 0x00000004 /* Receive BD completion status register */ #define BGE_RBDCSTAT_ERROR 0x00000004 /* * Receive List Selector Control registers */ #define BGE_RXLS_MODE 0x3400 #define BGE_RXLS_STATUS 0x3404 /* Receive List Selector Mode register */ #define BGE_RXLSMODE_RESET 0x00000001 #define BGE_RXLSMODE_ENABLE 0x00000002 #define BGE_RXLSMODE_ATTN 0x00000004 /* Receive List Selector Status register */ #define BGE_RXLSSTAT_ERROR 0x00000004 #define BGE_CPMU_CTRL 0x3600 #define BGE_CPMU_LSPD_10MB_CLK 0x3604 #define BGE_CPMU_LSPD_1000MB_CLK 0x360C #define BGE_CPMU_LNK_AWARE_PWRMD 0x3610 #define BGE_CPMU_HST_ACC 0x361C #define BGE_CPMU_CLCK_ORIDE 0x3624 #define BGE_CPMU_CLCK_STAT 0x3630 #define BGE_CPMU_MUTEX_REQ 0x365C #define BGE_CPMU_MUTEX_GNT 0x3660 #define BGE_CPMU_PHY_STRAP 0x3664 #define BGE_CPMU_PADRNG_CTL 0x3668 /* Central Power Management Unit (CPMU) register */ #define BGE_CPMU_CTRL_LINK_IDLE_MODE 0x00000200 #define BGE_CPMU_CTRL_LINK_AWARE_MODE 0x00000400 #define BGE_CPMU_CTRL_LINK_SPEED_MODE 0x00004000 #define BGE_CPMU_CTRL_GPHY_10MB_RXONLY 0x00010000 /* Link Speed 10MB/No Link Power Mode Clock Policy register */ #define BGE_CPMU_LSPD_10MB_MACCLK_MASK 0x001F0000 #define BGE_CPMU_LSPD_10MB_MACCLK_6_25 0x00130000 /* Link Speed 1000MB Power Mode Clock Policy register */ #define BGE_CPMU_LSPD_1000MB_MACCLK_62_5 0x00000000 #define BGE_CPMU_LSPD_1000MB_MACCLK_12_5 0x00110000 #define BGE_CPMU_LSPD_1000MB_MACCLK_MASK 0x001F0000 /* Link Aware Power Mode Clock Policy register */ #define BGE_CPMU_LNK_AWARE_MACCLK_MASK 0x001F0000 #define BGE_CPMU_LNK_AWARE_MACCLK_6_25 0x00130000 #define BGE_CPMU_HST_ACC_MACCLK_MASK 0x001F0000 #define BGE_CPMU_HST_ACC_MACCLK_6_25 0x00130000 /* Clock Speed Override Policy register */ #define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000 /* CPMU Clock Status register */ #define BGE_CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001F0000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_12_5 0x00110000 #define BGE_CPMU_CLCK_STAT_MAC_CLCK_6_25 0x00130000 /* CPMU Mutex Request register */ #define BGE_CPMU_MUTEX_REQ_DRIVER 0x00001000 #define BGE_CPMU_MUTEX_GNT_DRIVER 0x00001000 /* CPMU GPHY Strap register */ #define BGE_CPMU_PHY_STRAP_IS_SERDES 0x00000020 /* CPMU Padring Control register */ #define BGE_CPMU_PADRNG_CTL_RDIV2 0x00040000 /* * Mbuf Cluster Free registers (has nothing to do with BSD mbufs) */ #define BGE_MBCF_MODE 0x3800 #define BGE_MBCF_STATUS 0x3804 /* Mbuf Cluster Free mode register */ #define BGE_MBCFMODE_RESET 0x00000001 #define BGE_MBCFMODE_ENABLE 0x00000002 #define BGE_MBCFMODE_ATTN 0x00000004 /* Mbuf Cluster Free status register */ #define BGE_MBCFSTAT_ERROR 0x00000004 /* * Host Coalescing Control registers */ #define BGE_HCC_MODE 0x3C00 #define BGE_HCC_STATUS 0x3C04 #define BGE_HCC_RX_COAL_TICKS 0x3C08 #define BGE_HCC_TX_COAL_TICKS 0x3C0C #define BGE_HCC_RX_MAX_COAL_BDS 0x3C10 #define BGE_HCC_TX_MAX_COAL_BDS 0x3C14 #define BGE_HCC_RX_COAL_TICKS_INT 0x3C18 /* ticks during interrupt */ #define BGE_HCC_TX_COAL_TICKS_INT 0x3C1C /* ticks during interrupt */ #define BGE_HCC_RX_MAX_COAL_BDS_INT 0x3C20 /* BDs during interrupt */ #define BGE_HCC_TX_MAX_COAL_BDS_INT 0x3C24 /* BDs during interrupt */ #define BGE_HCC_STATS_TICKS 0x3C28 #define BGE_HCC_STATS_ADDR_HI 0x3C30 #define BGE_HCC_STATS_ADDR_LO 0x3C34 #define BGE_HCC_STATUSBLK_ADDR_HI 0x3C38 #define BGE_HCC_STATUSBLK_ADDR_LO 0x3C3C #define BGE_HCC_STATS_BASEADDR 0x3C40 /* address in NIC memory */ #define BGE_HCC_STATUSBLK_BASEADDR 0x3C44 /* address in NIC memory */ #define BGE_FLOW_ATTN 0x3C48 #define BGE_HCC_JUMBO_BD_CONS 0x3C50 #define BGE_HCC_STD_BD_CONS 0x3C54 #define BGE_HCC_MINI_BD_CONS 0x3C58 #define BGE_HCC_RX_RETURN_PROD0 0x3C80 #define BGE_HCC_RX_RETURN_PROD1 0x3C84 #define BGE_HCC_RX_RETURN_PROD2 0x3C88 #define BGE_HCC_RX_RETURN_PROD3 0x3C8C #define BGE_HCC_RX_RETURN_PROD4 0x3C90 #define BGE_HCC_RX_RETURN_PROD5 0x3C94 #define BGE_HCC_RX_RETURN_PROD6 0x3C98 #define BGE_HCC_RX_RETURN_PROD7 0x3C9C #define BGE_HCC_RX_RETURN_PROD8 0x3CA0 #define BGE_HCC_RX_RETURN_PROD9 0x3CA4 #define BGE_HCC_RX_RETURN_PROD10 0x3CA8 #define BGE_HCC_RX_RETURN_PROD11 0x3CAC #define BGE_HCC_RX_RETURN_PROD12 0x3CB0 #define BGE_HCC_RX_RETURN_PROD13 0x3CB4 #define BGE_HCC_RX_RETURN_PROD14 0x3CB8 #define BGE_HCC_RX_RETURN_PROD15 0x3CBC #define BGE_HCC_TX_BD_CONS0 0x3CC0 #define BGE_HCC_TX_BD_CONS1 0x3CC4 #define BGE_HCC_TX_BD_CONS2 0x3CC8 #define BGE_HCC_TX_BD_CONS3 0x3CCC #define BGE_HCC_TX_BD_CONS4 0x3CD0 #define BGE_HCC_TX_BD_CONS5 0x3CD4 #define BGE_HCC_TX_BD_CONS6 0x3CD8 #define BGE_HCC_TX_BD_CONS7 0x3CDC #define BGE_HCC_TX_BD_CONS8 0x3CE0 #define BGE_HCC_TX_BD_CONS9 0x3CE4 #define BGE_HCC_TX_BD_CONS10 0x3CE8 #define BGE_HCC_TX_BD_CONS11 0x3CEC #define BGE_HCC_TX_BD_CONS12 0x3CF0 #define BGE_HCC_TX_BD_CONS13 0x3CF4 #define BGE_HCC_TX_BD_CONS14 0x3CF8 #define BGE_HCC_TX_BD_CONS15 0x3CFC /* Host coalescing mode register */ #define BGE_HCCMODE_RESET 0x00000001 #define BGE_HCCMODE_ENABLE 0x00000002 #define BGE_HCCMODE_ATTN 0x00000004 #define BGE_HCCMODE_COAL_NOW 0x00000008 #define BGE_HCCMODE_MSI_BITS 0x00000070 #define BGE_HCCMODE_STATBLK_SIZE 0x00000180 #define BGE_STATBLKSZ_FULL 0x00000000 #define BGE_STATBLKSZ_64BYTE 0x00000080 #define BGE_STATBLKSZ_32BYTE 0x00000100 /* Host coalescing status register */ #define BGE_HCCSTAT_ERROR 0x00000004 /* Flow attention register */ #define BGE_FLOWATTN_MB_LOWAT 0x00000040 #define BGE_FLOWATTN_MEMARB 0x00000080 #define BGE_FLOWATTN_HOSTCOAL 0x00008000 #define BGE_FLOWATTN_DMADONE_DISCARD 0x00010000 #define BGE_FLOWATTN_RCB_INVAL 0x00020000 #define BGE_FLOWATTN_RXDATA_CORRUPT 0x00040000 #define BGE_FLOWATTN_RDBDI 0x00080000 #define BGE_FLOWATTN_RXLS 0x00100000 #define BGE_FLOWATTN_RXLP 0x00200000 #define BGE_FLOWATTN_RBDC 0x00400000 #define BGE_FLOWATTN_RBDI 0x00800000 #define BGE_FLOWATTN_SDC 0x08000000 #define BGE_FLOWATTN_SDI 0x10000000 #define BGE_FLOWATTN_SRS 0x20000000 #define BGE_FLOWATTN_SBDC 0x40000000 #define BGE_FLOWATTN_SBDI 0x80000000 /* * Memory arbiter registers */ #define BGE_MARB_MODE 0x4000 #define BGE_MARB_STATUS 0x4004 #define BGE_MARB_TRAPADDR_HI 0x4008 #define BGE_MARB_TRAPADDR_LO 0x400C /* Memory arbiter mode register */ #define BGE_MARBMODE_RESET 0x00000001 #define BGE_MARBMODE_ENABLE 0x00000002 #define BGE_MARBMODE_TX_ADDR_TRAP 0x00000004 #define BGE_MARBMODE_RX_ADDR_TRAP 0x00000008 #define BGE_MARBMODE_DMAW1_TRAP 0x00000010 #define BGE_MARBMODE_DMAR1_TRAP 0x00000020 #define BGE_MARBMODE_RXRISC_TRAP 0x00000040 #define BGE_MARBMODE_TXRISC_TRAP 0x00000080 #define BGE_MARBMODE_PCI_TRAP 0x00000100 #define BGE_MARBMODE_DMAR2_TRAP 0x00000200 #define BGE_MARBMODE_RXQ_TRAP 0x00000400 #define BGE_MARBMODE_RXDI1_TRAP 0x00000800 #define BGE_MARBMODE_RXDI2_TRAP 0x00001000 #define BGE_MARBMODE_DC_GRPMEM_TRAP 0x00002000 #define BGE_MARBMODE_HCOAL_TRAP 0x00004000 #define BGE_MARBMODE_MBUF_TRAP 0x00008000 #define BGE_MARBMODE_TXDI_TRAP 0x00010000 #define BGE_MARBMODE_SDC_DMAC_TRAP 0x00020000 #define BGE_MARBMODE_TXBD_TRAP 0x00040000 #define BGE_MARBMODE_BUFFMAN_TRAP 0x00080000 #define BGE_MARBMODE_DMAW2_TRAP 0x00100000 #define BGE_MARBMODE_XTSSRAM_ROFLO_TRAP 0x00200000 #define BGE_MARBMODE_XTSSRAM_RUFLO_TRAP 0x00400000 #define BGE_MARBMODE_XTSSRAM_WOFLO_TRAP 0x00800000 #define BGE_MARBMODE_XTSSRAM_WUFLO_TRAP 0x01000000 #define BGE_MARBMODE_XTSSRAM_PERR_TRAP 0x02000000 /* Memory arbiter status register */ #define BGE_MARBSTAT_TX_ADDR_TRAP 0x00000004 #define BGE_MARBSTAT_RX_ADDR_TRAP 0x00000008 #define BGE_MARBSTAT_DMAW1_TRAP 0x00000010 #define BGE_MARBSTAT_DMAR1_TRAP 0x00000020 #define BGE_MARBSTAT_RXRISC_TRAP 0x00000040 #define BGE_MARBSTAT_TXRISC_TRAP 0x00000080 #define BGE_MARBSTAT_PCI_TRAP 0x00000100 #define BGE_MARBSTAT_DMAR2_TRAP 0x00000200 #define BGE_MARBSTAT_RXQ_TRAP 0x00000400 #define BGE_MARBSTAT_RXDI1_TRAP 0x00000800 #define BGE_MARBSTAT_RXDI2_TRAP 0x00001000 #define BGE_MARBSTAT_DC_GRPMEM_TRAP 0x00002000 #define BGE_MARBSTAT_HCOAL_TRAP 0x00004000 #define BGE_MARBSTAT_MBUF_TRAP 0x00008000 #define BGE_MARBSTAT_TXDI_TRAP 0x00010000 #define BGE_MARBSTAT_SDC_DMAC_TRAP 0x00020000 #define BGE_MARBSTAT_TXBD_TRAP 0x00040000 #define BGE_MARBSTAT_BUFFMAN_TRAP 0x00080000 #define BGE_MARBSTAT_DMAW2_TRAP 0x00100000 #define BGE_MARBSTAT_XTSSRAM_ROFLO_TRAP 0x00200000 #define BGE_MARBSTAT_XTSSRAM_RUFLO_TRAP 0x00400000 #define BGE_MARBSTAT_XTSSRAM_WOFLO_TRAP 0x00800000 #define BGE_MARBSTAT_XTSSRAM_WUFLO_TRAP 0x01000000 #define BGE_MARBSTAT_XTSSRAM_PERR_TRAP 0x02000000 /* * Buffer manager control registers */ #define BGE_BMAN_MODE 0x4400 #define BGE_BMAN_STATUS 0x4404 #define BGE_BMAN_MBUFPOOL_BASEADDR 0x4408 #define BGE_BMAN_MBUFPOOL_LEN 0x440C #define BGE_BMAN_MBUFPOOL_READDMA_LOWAT 0x4410 #define BGE_BMAN_MBUFPOOL_MACRX_LOWAT 0x4414 #define BGE_BMAN_MBUFPOOL_HIWAT 0x4418 #define BGE_BMAN_RXCPU_MBALLOC_REQ 0x441C #define BGE_BMAN_RXCPU_MBALLOC_RESP 0x4420 #define BGE_BMAN_TXCPU_MBALLOC_REQ 0x4424 #define BGE_BMAN_TXCPU_MBALLOC_RESP 0x4428 #define BGE_BMAN_DMA_DESCPOOL_BASEADDR 0x442C #define BGE_BMAN_DMA_DESCPOOL_LEN 0x4430 #define BGE_BMAN_DMA_DESCPOOL_LOWAT 0x4434 #define BGE_BMAN_DMA_DESCPOOL_HIWAT 0x4438 #define BGE_BMAN_RXCPU_DMAALLOC_REQ 0x443C #define BGE_BMAN_RXCPU_DMAALLOC_RESP 0x4440 #define BGE_BMAN_TXCPU_DMAALLOC_REQ 0x4444 #define BGE_BMAN_TXCPU_DMALLLOC_RESP 0x4448 #define BGE_BMAN_HWDIAG_1 0x444C #define BGE_BMAN_HWDIAG_2 0x4450 #define BGE_BMAN_HWDIAG_3 0x4454 /* Buffer manager mode register */ #define BGE_BMANMODE_RESET 0x00000001 #define BGE_BMANMODE_ENABLE 0x00000002 #define BGE_BMANMODE_ATTN 0x00000004 #define BGE_BMANMODE_TESTMODE 0x00000008 #define BGE_BMANMODE_LOMBUF_ATTN 0x00000010 #define BGE_BMANMODE_NO_TX_UNDERRUN 0x80000000 /* Buffer manager status register */ #define BGE_BMANSTAT_ERRO 0x00000004 #define BGE_BMANSTAT_LOWMBUF_ERROR 0x00000010 /* * Read DMA Control registers */ #define BGE_RDMA_MODE 0x4800 #define BGE_RDMA_STATUS 0x4804 #define BGE_RDMA_RSRVCTRL_REG2 0x4890 #define BGE_RDMA_LSO_CRPTEN_CTRL_REG2 0x48A0 #define BGE_RDMA_RSRVCTRL 0x4900 #define BGE_RDMA_LSO_CRPTEN_CTRL 0x4910 /* Read DMA mode register */ #define BGE_RDMAMODE_RESET 0x00000001 #define BGE_RDMAMODE_ENABLE 0x00000002 #define BGE_RDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_RDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_RDMAMODE_PCI_PERR_ATTN 0x00000010 #define BGE_RDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_RDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_RDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_RDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_RDMAMODE_LOCWRITE_TOOBIG 0x00000200 #define BGE_RDMAMODE_ALL_ATTNS 0x000003FC #define BGE_RDMAMODE_BD_SBD_CRPT_ATTN 0x00000800 #define BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN 0x00001000 #define BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN 0x00002000 #define BGE_RDMAMODE_FIFO_SIZE_128 0x00020000 #define BGE_RDMAMODE_FIFO_LONG_BURST 0x00030000 #define BGE_RDMAMODE_MULT_DMA_RD_DIS 0x01000000 #define BGE_RDMAMODE_TSO4_ENABLE 0x08000000 #define BGE_RDMAMODE_TSO6_ENABLE 0x10000000 #define BGE_RDMAMODE_H2BNC_VLAN_DET 0x20000000 /* Read DMA status register */ #define BGE_RDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_RDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_RDMASTAT_PCI_PERR_ATTN 0x00000010 #define BGE_RDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_RDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_RDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_RDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_RDMASTAT_LOCWRITE_TOOBIG 0x00000200 /* Read DMA Reserved Control register */ #define BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 #define BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000C00 #define BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000C0000 #define BGE_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 #define BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000FF0 #define BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000FF000 #define BGE_RDMA_RSRVCTRL_TXMRGN_MASK 0xFFE00000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 0x00020000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K 0x00030000 #define BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K 0x000C0000 #define BGE_RDMA_TX_LENGTH_WA_5719 0x02000000 #define BGE_RDMA_TX_LENGTH_WA_5720 0x00200000 /* BD Read DMA Mode register */ #define BGE_RDMA_BD_MODE 0x4A00 /* BD Read DMA Mode status register */ #define BGE_RDMA_BD_STATUS 0x4A04 #define BGE_RDMA_BD_MODE_RESET 0x00000001 #define BGE_RDMA_BD_MODE_ENABLE 0x00000002 /* Non-LSO Read DMA Mode register */ #define BGE_RDMA_NON_LSO_MODE 0x4B00 /* Non-LSO Read DMA Mode status register */ #define BGE_RDMA_NON_LSO_STATUS 0x4B04 #define BGE_RDMA_NON_LSO_MODE_RESET 0x00000001 #define BGE_RDMA_NON_LSO_MODE_ENABLE 0x00000002 #define BGE_RDMA_LENGTH 0x4BE0 #define BGE_NUM_RDMA_CHANNELS 4 /* * Write DMA control registers */ #define BGE_WDMA_MODE 0x4C00 #define BGE_WDMA_STATUS 0x4C04 /* Write DMA mode register */ #define BGE_WDMAMODE_RESET 0x00000001 #define BGE_WDMAMODE_ENABLE 0x00000002 #define BGE_WDMAMODE_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_WDMAMODE_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_WDMAMODE_PCI_PERR_ATTN 0x00000010 #define BGE_WDMAMODE_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_WDMAMODE_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_WDMAMODE_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_WDMAMODE_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_WDMAMODE_LOCREAD_TOOBIG 0x00000200 #define BGE_WDMAMODE_ALL_ATTNS 0x000003FC #define BGE_WDMAMODE_STATUS_TAG_FIX 0x20000000 #define BGE_WDMAMODE_BURST_ALL_DATA 0xC0000000 /* Write DMA status register */ #define BGE_WDMASTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_WDMASTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_WDMASTAT_PCI_PERR_ATTN 0x00000010 #define BGE_WDMASTAT_PCI_ADDROFLOW_ATTN 0x00000020 #define BGE_WDMASTAT_PCI_FIFOOFLOW_ATTN 0x00000040 #define BGE_WDMASTAT_PCI_FIFOUFLOW_ATTN 0x00000080 #define BGE_WDMASTAT_PCI_FIFOOREAD_ATTN 0x00000100 #define BGE_WDMASTAT_LOCREAD_TOOBIG 0x00000200 /* * RX CPU registers */ #define BGE_RXCPU_MODE 0x5000 #define BGE_RXCPU_STATUS 0x5004 #define BGE_RXCPU_PC 0x501C /* RX CPU mode register */ #define BGE_RXCPUMODE_RESET 0x00000001 #define BGE_RXCPUMODE_SINGLESTEP 0x00000002 #define BGE_RXCPUMODE_P0_DATAHLT_ENB 0x00000004 #define BGE_RXCPUMODE_P0_INSTRHLT_ENB 0x00000008 #define BGE_RXCPUMODE_WR_POSTBUF_ENB 0x00000010 #define BGE_RXCPUMODE_DATACACHE_ENB 0x00000020 #define BGE_RXCPUMODE_ROMFAIL 0x00000040 #define BGE_RXCPUMODE_WATCHDOG_ENB 0x00000080 #define BGE_RXCPUMODE_INSTRCACHE_PRF 0x00000100 #define BGE_RXCPUMODE_INSTRCACHE_FLUSH 0x00000200 #define BGE_RXCPUMODE_HALTCPU 0x00000400 #define BGE_RXCPUMODE_INVDATAHLT_ENB 0x00000800 #define BGE_RXCPUMODE_MADDRTRAPHLT_ENB 0x00001000 #define BGE_RXCPUMODE_RADDRTRAPHLT_ENB 0x00002000 /* RX CPU status register */ #define BGE_RXCPUSTAT_HW_BREAKPOINT 0x00000001 #define BGE_RXCPUSTAT_HLTINSTR_EXECUTED 0x00000002 #define BGE_RXCPUSTAT_INVALID_INSTR 0x00000004 #define BGE_RXCPUSTAT_P0_DATAREF 0x00000008 #define BGE_RXCPUSTAT_P0_INSTRREF 0x00000010 #define BGE_RXCPUSTAT_INVALID_DATAACC 0x00000020 #define BGE_RXCPUSTAT_INVALID_INSTRFTCH 0x00000040 #define BGE_RXCPUSTAT_BAD_MEMALIGN 0x00000080 #define BGE_RXCPUSTAT_MADDR_TRAP 0x00000100 #define BGE_RXCPUSTAT_REGADDR_TRAP 0x00000200 #define BGE_RXCPUSTAT_DATAACC_STALL 0x00001000 #define BGE_RXCPUSTAT_INSTRFETCH_STALL 0x00002000 #define BGE_RXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000 #define BGE_RXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000 #define BGE_RXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000 #define BGE_RXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000 #define BGE_RXCPUSTAT_BLOCKING_READ 0x80000000 /* * V? CPU registers */ #define BGE_VCPU_STATUS 0x5100 #define BGE_VCPU_EXT_CTRL 0x6890 #define BGE_VCPU_STATUS_INIT_DONE 0x04000000 #define BGE_VCPU_STATUS_DRV_RESET 0x08000000 #define BGE_VCPU_EXT_CTRL_HALT_CPU 0x00400000 #define BGE_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 /* * TX CPU registers */ #define BGE_TXCPU_MODE 0x5400 #define BGE_TXCPU_STATUS 0x5404 #define BGE_TXCPU_PC 0x541C /* TX CPU mode register */ #define BGE_TXCPUMODE_RESET 0x00000001 #define BGE_TXCPUMODE_SINGLESTEP 0x00000002 #define BGE_TXCPUMODE_P0_DATAHLT_ENB 0x00000004 #define BGE_TXCPUMODE_P0_INSTRHLT_ENB 0x00000008 #define BGE_TXCPUMODE_WR_POSTBUF_ENB 0x00000010 #define BGE_TXCPUMODE_DATACACHE_ENB 0x00000020 #define BGE_TXCPUMODE_ROMFAIL 0x00000040 #define BGE_TXCPUMODE_WATCHDOG_ENB 0x00000080 #define BGE_TXCPUMODE_INSTRCACHE_PRF 0x00000100 #define BGE_TXCPUMODE_INSTRCACHE_FLUSH 0x00000200 #define BGE_TXCPUMODE_HALTCPU 0x00000400 #define BGE_TXCPUMODE_INVDATAHLT_ENB 0x00000800 #define BGE_TXCPUMODE_MADDRTRAPHLT_ENB 0x00001000 /* TX CPU status register */ #define BGE_TXCPUSTAT_HW_BREAKPOINT 0x00000001 #define BGE_TXCPUSTAT_HLTINSTR_EXECUTED 0x00000002 #define BGE_TXCPUSTAT_INVALID_INSTR 0x00000004 #define BGE_TXCPUSTAT_P0_DATAREF 0x00000008 #define BGE_TXCPUSTAT_P0_INSTRREF 0x00000010 #define BGE_TXCPUSTAT_INVALID_DATAACC 0x00000020 #define BGE_TXCPUSTAT_INVALID_INSTRFTCH 0x00000040 #define BGE_TXCPUSTAT_BAD_MEMALIGN 0x00000080 #define BGE_TXCPUSTAT_MADDR_TRAP 0x00000100 #define BGE_TXCPUSTAT_REGADDR_TRAP 0x00000200 #define BGE_TXCPUSTAT_DATAACC_STALL 0x00001000 #define BGE_TXCPUSTAT_INSTRFETCH_STALL 0x00002000 #define BGE_TXCPUSTAT_MA_WR_FIFOOFLOW 0x08000000 #define BGE_TXCPUSTAT_MA_RD_FIFOOFLOW 0x10000000 #define BGE_TXCPUSTAT_MA_DATAMASK_OFLOW 0x20000000 #define BGE_TXCPUSTAT_MA_REQ_FIFOOFLOW 0x40000000 #define BGE_TXCPUSTAT_BLOCKING_READ 0x80000000 /* * Low priority mailbox registers */ #define BGE_LPMBX_IRQ0_HI 0x5800 #define BGE_LPMBX_IRQ0_LO 0x5804 #define BGE_LPMBX_IRQ1_HI 0x5808 #define BGE_LPMBX_IRQ1_LO 0x580C #define BGE_LPMBX_IRQ2_HI 0x5810 #define BGE_LPMBX_IRQ2_LO 0x5814 #define BGE_LPMBX_IRQ3_HI 0x5818 #define BGE_LPMBX_IRQ3_LO 0x581C #define BGE_LPMBX_GEN0_HI 0x5820 #define BGE_LPMBX_GEN0_LO 0x5824 #define BGE_LPMBX_GEN1_HI 0x5828 #define BGE_LPMBX_GEN1_LO 0x582C #define BGE_LPMBX_GEN2_HI 0x5830 #define BGE_LPMBX_GEN2_LO 0x5834 #define BGE_LPMBX_GEN3_HI 0x5828 #define BGE_LPMBX_GEN3_LO 0x582C #define BGE_LPMBX_GEN4_HI 0x5840 #define BGE_LPMBX_GEN4_LO 0x5844 #define BGE_LPMBX_GEN5_HI 0x5848 #define BGE_LPMBX_GEN5_LO 0x584C #define BGE_LPMBX_GEN6_HI 0x5850 #define BGE_LPMBX_GEN6_LO 0x5854 #define BGE_LPMBX_GEN7_HI 0x5858 #define BGE_LPMBX_GEN7_LO 0x585C #define BGE_LPMBX_RELOAD_STATS_HI 0x5860 #define BGE_LPMBX_RELOAD_STATS_LO 0x5864 #define BGE_LPMBX_RX_STD_PROD_HI 0x5868 #define BGE_LPMBX_RX_STD_PROD_LO 0x586C #define BGE_LPMBX_RX_JUMBO_PROD_HI 0x5870 #define BGE_LPMBX_RX_JUMBO_PROD_LO 0x5874 #define BGE_LPMBX_RX_MINI_PROD_HI 0x5878 #define BGE_LPMBX_RX_MINI_PROD_LO 0x587C #define BGE_LPMBX_RX_CONS0_HI 0x5880 #define BGE_LPMBX_RX_CONS0_LO 0x5884 #define BGE_LPMBX_RX_CONS1_HI 0x5888 #define BGE_LPMBX_RX_CONS1_LO 0x588C #define BGE_LPMBX_RX_CONS2_HI 0x5890 #define BGE_LPMBX_RX_CONS2_LO 0x5894 #define BGE_LPMBX_RX_CONS3_HI 0x5898 #define BGE_LPMBX_RX_CONS3_LO 0x589C #define BGE_LPMBX_RX_CONS4_HI 0x58A0 #define BGE_LPMBX_RX_CONS4_LO 0x58A4 #define BGE_LPMBX_RX_CONS5_HI 0x58A8 #define BGE_LPMBX_RX_CONS5_LO 0x58AC #define BGE_LPMBX_RX_CONS6_HI 0x58B0 #define BGE_LPMBX_RX_CONS6_LO 0x58B4 #define BGE_LPMBX_RX_CONS7_HI 0x58B8 #define BGE_LPMBX_RX_CONS7_LO 0x58BC #define BGE_LPMBX_RX_CONS8_HI 0x58C0 #define BGE_LPMBX_RX_CONS8_LO 0x58C4 #define BGE_LPMBX_RX_CONS9_HI 0x58C8 #define BGE_LPMBX_RX_CONS9_LO 0x58CC #define BGE_LPMBX_RX_CONS10_HI 0x58D0 #define BGE_LPMBX_RX_CONS10_LO 0x58D4 #define BGE_LPMBX_RX_CONS11_HI 0x58D8 #define BGE_LPMBX_RX_CONS11_LO 0x58DC #define BGE_LPMBX_RX_CONS12_HI 0x58E0 #define BGE_LPMBX_RX_CONS12_LO 0x58E4 #define BGE_LPMBX_RX_CONS13_HI 0x58E8 #define BGE_LPMBX_RX_CONS13_LO 0x58EC #define BGE_LPMBX_RX_CONS14_HI 0x58F0 #define BGE_LPMBX_RX_CONS14_LO 0x58F4 #define BGE_LPMBX_RX_CONS15_HI 0x58F8 #define BGE_LPMBX_RX_CONS15_LO 0x58FC #define BGE_LPMBX_TX_HOST_PROD0_HI 0x5900 #define BGE_LPMBX_TX_HOST_PROD0_LO 0x5904 #define BGE_LPMBX_TX_HOST_PROD1_HI 0x5908 #define BGE_LPMBX_TX_HOST_PROD1_LO 0x590C #define BGE_LPMBX_TX_HOST_PROD2_HI 0x5910 #define BGE_LPMBX_TX_HOST_PROD2_LO 0x5914 #define BGE_LPMBX_TX_HOST_PROD3_HI 0x5918 #define BGE_LPMBX_TX_HOST_PROD3_LO 0x591C #define BGE_LPMBX_TX_HOST_PROD4_HI 0x5920 #define BGE_LPMBX_TX_HOST_PROD4_LO 0x5924 #define BGE_LPMBX_TX_HOST_PROD5_HI 0x5928 #define BGE_LPMBX_TX_HOST_PROD5_LO 0x592C #define BGE_LPMBX_TX_HOST_PROD6_HI 0x5930 #define BGE_LPMBX_TX_HOST_PROD6_LO 0x5934 #define BGE_LPMBX_TX_HOST_PROD7_HI 0x5938 #define BGE_LPMBX_TX_HOST_PROD7_LO 0x593C #define BGE_LPMBX_TX_HOST_PROD8_HI 0x5940 #define BGE_LPMBX_TX_HOST_PROD8_LO 0x5944 #define BGE_LPMBX_TX_HOST_PROD9_HI 0x5948 #define BGE_LPMBX_TX_HOST_PROD9_LO 0x594C #define BGE_LPMBX_TX_HOST_PROD10_HI 0x5950 #define BGE_LPMBX_TX_HOST_PROD10_LO 0x5954 #define BGE_LPMBX_TX_HOST_PROD11_HI 0x5958 #define BGE_LPMBX_TX_HOST_PROD11_LO 0x595C #define BGE_LPMBX_TX_HOST_PROD12_HI 0x5960 #define BGE_LPMBX_TX_HOST_PROD12_LO 0x5964 #define BGE_LPMBX_TX_HOST_PROD13_HI 0x5968 #define BGE_LPMBX_TX_HOST_PROD13_LO 0x596C #define BGE_LPMBX_TX_HOST_PROD14_HI 0x5970 #define BGE_LPMBX_TX_HOST_PROD14_LO 0x5974 #define BGE_LPMBX_TX_HOST_PROD15_HI 0x5978 #define BGE_LPMBX_TX_HOST_PROD15_LO 0x597C #define BGE_LPMBX_TX_NIC_PROD0_HI 0x5980 #define BGE_LPMBX_TX_NIC_PROD0_LO 0x5984 #define BGE_LPMBX_TX_NIC_PROD1_HI 0x5988 #define BGE_LPMBX_TX_NIC_PROD1_LO 0x598C #define BGE_LPMBX_TX_NIC_PROD2_HI 0x5990 #define BGE_LPMBX_TX_NIC_PROD2_LO 0x5994 #define BGE_LPMBX_TX_NIC_PROD3_HI 0x5998 #define BGE_LPMBX_TX_NIC_PROD3_LO 0x599C #define BGE_LPMBX_TX_NIC_PROD4_HI 0x59A0 #define BGE_LPMBX_TX_NIC_PROD4_LO 0x59A4 #define BGE_LPMBX_TX_NIC_PROD5_HI 0x59A8 #define BGE_LPMBX_TX_NIC_PROD5_LO 0x59AC #define BGE_LPMBX_TX_NIC_PROD6_HI 0x59B0 #define BGE_LPMBX_TX_NIC_PROD6_LO 0x59B4 #define BGE_LPMBX_TX_NIC_PROD7_HI 0x59B8 #define BGE_LPMBX_TX_NIC_PROD7_LO 0x59BC #define BGE_LPMBX_TX_NIC_PROD8_HI 0x59C0 #define BGE_LPMBX_TX_NIC_PROD8_LO 0x59C4 #define BGE_LPMBX_TX_NIC_PROD9_HI 0x59C8 #define BGE_LPMBX_TX_NIC_PROD9_LO 0x59CC #define BGE_LPMBX_TX_NIC_PROD10_HI 0x59D0 #define BGE_LPMBX_TX_NIC_PROD10_LO 0x59D4 #define BGE_LPMBX_TX_NIC_PROD11_HI 0x59D8 #define BGE_LPMBX_TX_NIC_PROD11_LO 0x59DC #define BGE_LPMBX_TX_NIC_PROD12_HI 0x59E0 #define BGE_LPMBX_TX_NIC_PROD12_LO 0x59E4 #define BGE_LPMBX_TX_NIC_PROD13_HI 0x59E8 #define BGE_LPMBX_TX_NIC_PROD13_LO 0x59EC #define BGE_LPMBX_TX_NIC_PROD14_HI 0x59F0 #define BGE_LPMBX_TX_NIC_PROD14_LO 0x59F4 #define BGE_LPMBX_TX_NIC_PROD15_HI 0x59F8 #define BGE_LPMBX_TX_NIC_PROD15_LO 0x59FC /* * Flow throw Queue reset register */ #define BGE_FTQ_RESET 0x5C00 #define BGE_FTQRESET_DMAREAD 0x00000002 #define BGE_FTQRESET_DMAHIPRIO_RD 0x00000004 #define BGE_FTQRESET_DMADONE 0x00000010 #define BGE_FTQRESET_SBDC 0x00000020 #define BGE_FTQRESET_SDI 0x00000040 #define BGE_FTQRESET_WDMA 0x00000080 #define BGE_FTQRESET_DMAHIPRIO_WR 0x00000100 #define BGE_FTQRESET_TYPE1_SOFTWARE 0x00000200 #define BGE_FTQRESET_SDC 0x00000400 #define BGE_FTQRESET_HCC 0x00000800 #define BGE_FTQRESET_TXFIFO 0x00001000 #define BGE_FTQRESET_MBC 0x00002000 #define BGE_FTQRESET_RBDC 0x00004000 #define BGE_FTQRESET_RXLP 0x00008000 #define BGE_FTQRESET_RDBDI 0x00010000 #define BGE_FTQRESET_RDC 0x00020000 #define BGE_FTQRESET_TYPE2_SOFTWARE 0x00040000 /* * Message Signaled Interrupt registers */ #define BGE_MSI_MODE 0x6000 #define BGE_MSI_STATUS 0x6004 #define BGE_MSI_FIFOACCESS 0x6008 /* MSI mode register */ #define BGE_MSIMODE_RESET 0x00000001 #define BGE_MSIMODE_ENABLE 0x00000002 #define BGE_MSIMODE_ONE_SHOT_DISABLE 0x00000020 #define BGE_MSIMODE_MULTIVEC_ENABLE 0x00000080 /* MSI status register */ #define BGE_MSISTAT_PCI_TGT_ABRT_ATTN 0x00000004 #define BGE_MSISTAT_PCI_MSTR_ABRT_ATTN 0x00000008 #define BGE_MSISTAT_PCI_PERR_ATTN 0x00000010 #define BGE_MSISTAT_MSI_FIFOUFLOW_ATTN 0x00000020 #define BGE_MSISTAT_MSI_FIFOOFLOW_ATTN 0x00000040 /* * DMA Completion registers */ #define BGE_DMAC_MODE 0x6400 /* DMA Completion mode register */ #define BGE_DMACMODE_RESET 0x00000001 #define BGE_DMACMODE_ENABLE 0x00000002 /* * General control registers. */ #define BGE_MODE_CTL 0x6800 #define BGE_MISC_CFG 0x6804 #define BGE_MISC_LOCAL_CTL 0x6808 #define BGE_RX_CPU_EVENT 0x6810 #define BGE_TX_CPU_EVENT 0x6820 #define BGE_EE_ADDR 0x6838 #define BGE_EE_DATA 0x683C #define BGE_EE_CTL 0x6840 #define BGE_MDI_CTL 0x6844 #define BGE_EE_DELAY 0x6848 #define BGE_FASTBOOT_PC 0x6894 #define BGE_RX_CPU_DRV_EVENT 0x00004000 /* * NVRAM Control registers */ #define BGE_NVRAM_CMD 0x7000 #define BGE_NVRAM_STAT 0x7004 #define BGE_NVRAM_WRDATA 0x7008 #define BGE_NVRAM_ADDR 0x700c #define BGE_NVRAM_RDDATA 0x7010 #define BGE_NVRAM_CFG1 0x7014 #define BGE_NVRAM_CFG2 0x7018 #define BGE_NVRAM_CFG3 0x701c #define BGE_NVRAM_SWARB 0x7020 #define BGE_NVRAM_ACCESS 0x7024 #define BGE_NVRAM_WRITE1 0x7028 #define BGE_NVRAMCMD_RESET 0x00000001 #define BGE_NVRAMCMD_DONE 0x00000008 #define BGE_NVRAMCMD_START 0x00000010 #define BGE_NVRAMCMD_WR 0x00000020 /* 1 = wr, 0 = rd */ #define BGE_NVRAMCMD_ERASE 0x00000040 #define BGE_NVRAMCMD_FIRST 0x00000080 #define BGE_NVRAMCMD_LAST 0x00000100 #define BGE_NVRAM_READCMD \ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE) #define BGE_NVRAM_WRITECMD \ (BGE_NVRAMCMD_FIRST|BGE_NVRAMCMD_LAST| \ BGE_NVRAMCMD_START|BGE_NVRAMCMD_DONE|BGE_NVRAMCMD_WR) #define BGE_NVRAMSWARB_SET0 0x00000001 #define BGE_NVRAMSWARB_SET1 0x00000002 #define BGE_NVRAMSWARB_SET2 0x00000003 #define BGE_NVRAMSWARB_SET3 0x00000004 #define BGE_NVRAMSWARB_CLR0 0x00000010 #define BGE_NVRAMSWARB_CLR1 0x00000020 #define BGE_NVRAMSWARB_CLR2 0x00000040 #define BGE_NVRAMSWARB_CLR3 0x00000080 #define BGE_NVRAMSWARB_GNT0 0x00000100 #define BGE_NVRAMSWARB_GNT1 0x00000200 #define BGE_NVRAMSWARB_GNT2 0x00000400 #define BGE_NVRAMSWARB_GNT3 0x00000800 #define BGE_NVRAMSWARB_REQ0 0x00001000 #define BGE_NVRAMSWARB_REQ1 0x00002000 #define BGE_NVRAMSWARB_REQ2 0x00004000 #define BGE_NVRAMSWARB_REQ3 0x00008000 #define BGE_NVRAMACC_ENABLE 0x00000001 #define BGE_NVRAMACC_WRENABLE 0x00000002 /* Mode control register */ #define BGE_MODECTL_INT_SNDCOAL_ONLY 0x00000001 #define BGE_MODECTL_BYTESWAP_NONFRAME 0x00000002 #define BGE_MODECTL_WORDSWAP_NONFRAME 0x00000004 #define BGE_MODECTL_BYTESWAP_DATA 0x00000010 #define BGE_MODECTL_WORDSWAP_DATA 0x00000020 #define BGE_MODECTL_BYTESWAP_B2HRX_DATA 0x00000040 #define BGE_MODECTL_WORDSWAP_B2HRX_DATA 0x00000080 #define BGE_MODECTL_NO_FRAME_CRACKING 0x00000200 #define BGE_MODECTL_NO_RX_CRC 0x00000400 #define BGE_MODECTL_RX_BADFRAMES 0x00000800 #define BGE_MODECTL_NO_TX_INTR 0x00002000 #define BGE_MODECTL_NO_RX_INTR 0x00004000 #define BGE_MODECTL_FORCE_PCI32 0x00008000 #define BGE_MODECTL_B2HRX_ENABLE 0x00008000 #define BGE_MODECTL_STACKUP 0x00010000 #define BGE_MODECTL_HOST_SEND_BDS 0x00020000 #define BGE_MODECTL_HTX2B_ENABLE 0x00040000 #define BGE_MODECTL_TX_NO_PHDR_CSUM 0x00100000 #define BGE_MODECTL_RX_NO_PHDR_CSUM 0x00800000 #define BGE_MODECTL_TX_ATTN_INTR 0x01000000 #define BGE_MODECTL_RX_ATTN_INTR 0x02000000 #define BGE_MODECTL_MAC_ATTN_INTR 0x04000000 #define BGE_MODECTL_DMA_ATTN_INTR 0x08000000 #define BGE_MODECTL_FLOWCTL_ATTN_INTR 0x10000000 #define BGE_MODECTL_4X_SENDRING_SZ 0x20000000 #define BGE_MODECTL_FW_PROCESS_MCASTS 0x40000000 /* Misc. config register */ #define BGE_MISCCFG_RESET_CORE_CLOCKS 0x00000001 #define BGE_MISCCFG_TIMER_PRESCALER 0x000000FE #define BGE_MISCCFG_BOARD_ID_MASK 0x0001E000 #define BGE_MISCCFG_BOARD_ID_5704 0x00000000 #define BGE_MISCCFG_BOARD_ID_5704CIOBE 0x00004000 #define BGE_MISCCFG_BOARD_ID_5788 0x00010000 #define BGE_MISCCFG_BOARD_ID_5788M 0x00018000 #define BGE_MISCCFG_EPHY_IDDQ 0x00200000 #define BGE_MISCCFG_GPHY_PD_OVERRIDE 0x04000000 #define BGE_32BITTIME_66MHZ (0x41 << 1) /* Misc. Local Control */ #define BGE_MLC_INTR_STATE 0x00000001 #define BGE_MLC_INTR_CLR 0x00000002 #define BGE_MLC_INTR_SET 0x00000004 #define BGE_MLC_INTR_ONATTN 0x00000008 #define BGE_MLC_MISCIO_IN0 0x00000100 #define BGE_MLC_MISCIO_IN1 0x00000200 #define BGE_MLC_MISCIO_IN2 0x00000400 #define BGE_MLC_MISCIO_OUTEN0 0x00000800 #define BGE_MLC_MISCIO_OUTEN1 0x00001000 #define BGE_MLC_MISCIO_OUTEN2 0x00002000 #define BGE_MLC_MISCIO_OUT0 0x00004000 #define BGE_MLC_MISCIO_OUT1 0x00008000 #define BGE_MLC_MISCIO_OUT2 0x00010000 #define BGE_MLC_EXTRAM_ENB 0x00020000 #define BGE_MLC_SRAM_SIZE 0x001C0000 #define BGE_MLC_BANK_SEL 0x00200000 /* 0 = 2 banks, 1 == 1 */ #define BGE_MLC_SSRAM_TYPE 0x00400000 /* 1 = ZBT, 0 = standard */ #define BGE_MLC_SSRAM_CYC_DESEL 0x00800000 #define BGE_MLC_AUTO_EEPROM 0x01000000 #define BGE_SSRAMSIZE_256KB 0x00000000 #define BGE_SSRAMSIZE_512KB 0x00040000 #define BGE_SSRAMSIZE_1MB 0x00080000 #define BGE_SSRAMSIZE_2MB 0x000C0000 #define BGE_SSRAMSIZE_4MB 0x00100000 #define BGE_SSRAMSIZE_8MB 0x00140000 #define BGE_SSRAMSIZE_16M 0x00180000 /* EEPROM address register */ #define BGE_EEADDR_ADDRESS 0x0000FFFC #define BGE_EEADDR_HALFCLK 0x01FF0000 #define BGE_EEADDR_START 0x02000000 #define BGE_EEADDR_DEVID 0x1C000000 #define BGE_EEADDR_RESET 0x20000000 #define BGE_EEADDR_DONE 0x40000000 #define BGE_EEADDR_RW 0x80000000 /* 1 = rd, 0 = wr */ #define BGE_EEDEVID(x) ((x & 7) << 26) #define BGE_EEHALFCLK(x) ((x & 0x1FF) << 16) #define BGE_HALFCLK_384SCL 0x60 #define BGE_EE_READCMD \ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \ BGE_EEADDR_START|BGE_EEADDR_RW|BGE_EEADDR_DONE) #define BGE_EE_WRCMD \ (BGE_EEHALFCLK(BGE_HALFCLK_384SCL)|BGE_EEDEVID(0)| \ BGE_EEADDR_START|BGE_EEADDR_DONE) /* EEPROM Control register */ #define BGE_EECTL_CLKOUT_TRISTATE 0x00000001 #define BGE_EECTL_CLKOUT 0x00000002 #define BGE_EECTL_CLKIN 0x00000004 #define BGE_EECTL_DATAOUT_TRISTATE 0x00000008 #define BGE_EECTL_DATAOUT 0x00000010 #define BGE_EECTL_DATAIN 0x00000020 /* MDI (MII/GMII) access register */ #define BGE_MDI_DATA 0x00000001 #define BGE_MDI_DIR 0x00000002 #define BGE_MDI_SEL 0x00000004 #define BGE_MDI_CLK 0x00000008 #define BGE_MEMWIN_START 0x00008000 #define BGE_MEMWIN_END 0x0000FFFF /* BAR1 (APE) Register Definitions */ #define BGE_APE_GPIO_MSG 0x0008 #define BGE_APE_EVENT 0x000C #define BGE_APE_LOCK_REQ 0x002C #define BGE_APE_LOCK_GRANT 0x004C #define BGE_APE_GPIO_MSG_SHIFT 4 #define BGE_APE_EVENT_1 0x00000001 #define BGE_APE_LOCK_REQ_DRIVER0 0x00001000 #define BGE_APE_LOCK_GRANT_DRIVER0 0x00001000 /* APE Shared Memory block (writable by APE only) */ #define BGE_APE_SEG_SIG 0x4000 #define BGE_APE_FW_STATUS 0x400C #define BGE_APE_FW_FEATURES 0x4010 #define BGE_APE_FW_BEHAVIOR 0x4014 #define BGE_APE_FW_VERSION 0x4018 #define BGE_APE_FW_HEARTBEAT_INTERVAL 0x4024 #define BGE_APE_FW_HEARTBEAT 0x4028 #define BGE_APE_FW_ERROR_FLAGS 0x4074 #define BGE_APE_SEG_SIG_MAGIC 0x41504521 #define BGE_APE_FW_STATUS_READY 0x00000100 #define BGE_APE_FW_FEATURE_DASH 0x00000001 #define BGE_APE_FW_FEATURE_NCSI 0x00000002 #define BGE_APE_FW_VERSION_MAJMSK 0xFF000000 #define BGE_APE_FW_VERSION_MAJSFT 24 #define BGE_APE_FW_VERSION_MINMSK 0x00FF0000 #define BGE_APE_FW_VERSION_MINSFT 16 #define BGE_APE_FW_VERSION_REVMSK 0x0000FF00 #define BGE_APE_FW_VERSION_REVSFT 8 #define BGE_APE_FW_VERSION_BLDMSK 0x000000FF /* Host Shared Memory block (writable by host only) */ #define BGE_APE_HOST_SEG_SIG 0x4200 #define BGE_APE_HOST_SEG_LEN 0x4204 #define BGE_APE_HOST_INIT_COUNT 0x4208 #define BGE_APE_HOST_DRIVER_ID 0x420C #define BGE_APE_HOST_BEHAVIOR 0x4210 #define BGE_APE_HOST_HEARTBEAT_INT_MS 0x4214 #define BGE_APE_HOST_HEARTBEAT_COUNT 0x4218 #define BGE_APE_HOST_DRVR_STATE 0x421C #define BGE_APE_HOST_WOL_SPEED 0x4224 #define BGE_APE_HOST_SEG_SIG_MAGIC 0x484F5354 #define BGE_APE_HOST_SEG_LEN_MAGIC 0x00000020 #define BGE_APE_HOST_DRIVER_ID_FBSD 0xF6000000 #define BGE_APE_HOST_DRIVER_ID_MAGIC(maj, min) \ (BGE_APE_HOST_DRIVER_ID_FBSD | \ ((maj) & 0xffd) << 16 | ((min) & 0xff) << 8) #define BGE_APE_HOST_BEHAV_NO_PHYLOCK 0x00000001 #define BGE_APE_HOST_HEARTBEAT_INT_DISABLE 0 #define BGE_APE_HOST_HEARTBEAT_INT_5SEC 5000 #define BGE_APE_HOST_DRVR_STATE_START 0x00000001 #define BGE_APE_HOST_DRVR_STATE_UNLOAD 0x00000002 #define BGE_APE_HOST_DRVR_STATE_WOL 0x00000003 #define BGE_APE_HOST_DRVR_STATE_SUSPEND 0x00000004 #define BGE_APE_HOST_WOL_SPEED_AUTO 0x00008000 #define BGE_APE_EVENT_STATUS 0x4300 #define BGE_APE_EVENT_STATUS_DRIVER_EVNT 0x00000010 #define BGE_APE_EVENT_STATUS_STATE_CHNGE 0x00000500 #define BGE_APE_EVENT_STATUS_STATE_START 0x00010000 #define BGE_APE_EVENT_STATUS_STATE_UNLOAD 0x00020000 #define BGE_APE_EVENT_STATUS_STATE_WOL 0x00030000 #define BGE_APE_EVENT_STATUS_STATE_SUSPEND 0x00040000 #define BGE_APE_EVENT_STATUS_EVENT_PENDING 0x80000000 #define BGE_APE_DEBUG_LOG 0x4E00 #define BGE_APE_DEBUG_LOG_LEN 0x0100 #define BGE_APE_PER_LOCK_REQ 0x8400 #define BGE_APE_PER_LOCK_GRANT 0x8420 #define BGE_APE_LOCK_PER_REQ_DRIVER0 0x00001000 #define BGE_APE_LOCK_PER_REQ_DRIVER1 0x00000002 #define BGE_APE_LOCK_PER_REQ_DRIVER2 0x00000004 #define BGE_APE_LOCK_PER_REQ_DRIVER3 0x00000008 #define BGE_APE_PER_LOCK_GRANT_DRIVER0 0x00001000 #define BGE_APE_PER_LOCK_GRANT_DRIVER1 0x00000002 #define BGE_APE_PER_LOCK_GRANT_DRIVER2 0x00000004 #define BGE_APE_PER_LOCK_GRANT_DRIVER3 0x00000008 /* APE Mutex Resources */ #define BGE_APE_LOCK_PHY0 0 #define BGE_APE_LOCK_GRC 1 #define BGE_APE_LOCK_PHY1 2 #define BGE_APE_LOCK_PHY2 3 #define BGE_APE_LOCK_MEM 4 #define BGE_APE_LOCK_PHY3 5 #define BGE_APE_LOCK_GPIO 7 #define BGE_MEMWIN_READ(sc, x, val) \ do { \ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \ (0xFFFF0000 & x), 4); \ val = CSR_READ_4(sc, BGE_MEMWIN_START + (x & 0xFFFF)); \ } while(0) #define BGE_MEMWIN_WRITE(sc, x, val) \ do { \ pci_write_config(sc->bge_dev, BGE_PCI_MEMWIN_BASEADDR, \ (0xFFFF0000 & x), 4); \ CSR_WRITE_4(sc, BGE_MEMWIN_START + (x & 0xFFFF), val); \ } while(0) /* * This magic number is written to the firmware mailbox at 0xb50 * before a software reset is issued. After the internal firmware * has completed its initialization it will write the opposite of * this value, ~BGE_SRAM_FW_MB_MAGIC, to the same location, * allowing the driver to synchronize with the firmware. */ #define BGE_SRAM_FW_MB_MAGIC 0x4B657654 typedef struct { uint32_t bge_addr_hi; uint32_t bge_addr_lo; } bge_hostaddr; #define BGE_HOSTADDR(x, y) \ do { \ (x).bge_addr_lo = ((uint64_t) (y) & 0xffffffff); \ (x).bge_addr_hi = ((uint64_t) (y) >> 32); \ } while(0) #define BGE_ADDR_LO(y) \ ((uint64_t) (y) & 0xFFFFFFFF) #define BGE_ADDR_HI(y) \ ((uint64_t) (y) >> 32) /* Ring control block structure */ struct bge_rcb { bge_hostaddr bge_hostaddr; uint32_t bge_maxlen_flags; uint32_t bge_nicaddr; }; #define RCB_WRITE_4(sc, rcb, offset, val) \ bus_write_4(sc->bge_res, rcb + offsetof(struct bge_rcb, offset), val) #define BGE_RCB_MAXLEN_FLAGS(maxlen, flags) ((maxlen) << 16 | (flags)) #define BGE_RCB_FLAG_USE_EXT_RX_BD 0x0001 #define BGE_RCB_FLAG_RING_DISABLED 0x0002 struct bge_tx_bd { bge_hostaddr bge_addr; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_flags; uint16_t bge_len; uint16_t bge_vlan_tag; uint16_t bge_mss; #else uint16_t bge_len; uint16_t bge_flags; uint16_t bge_mss; uint16_t bge_vlan_tag; #endif }; #define BGE_TXBDFLAG_TCP_UDP_CSUM 0x0001 #define BGE_TXBDFLAG_IP_CSUM 0x0002 #define BGE_TXBDFLAG_END 0x0004 #define BGE_TXBDFLAG_IP_FRAG 0x0008 #define BGE_TXBDFLAG_JUMBO_FRAME 0x0008 /* 5717 */ #define BGE_TXBDFLAG_IP_FRAG_END 0x0010 #define BGE_TXBDFLAG_HDRLEN_BIT2 0x0010 /* 5717 */ #define BGE_TXBDFLAG_SNAP 0x0020 /* 5717 */ #define BGE_TXBDFLAG_VLAN_TAG 0x0040 #define BGE_TXBDFLAG_COAL_NOW 0x0080 #define BGE_TXBDFLAG_CPU_PRE_DMA 0x0100 #define BGE_TXBDFLAG_CPU_POST_DMA 0x0200 #define BGE_TXBDFLAG_HDRLEN_BIT3 0x0400 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT4 0x0800 /* 5717 */ #define BGE_TXBDFLAG_INSERT_SRC_ADDR 0x1000 #define BGE_TXBDFLAG_HDRLEN_BIT5 0x1000 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT6 0x2000 /* 5717 */ #define BGE_TXBDFLAG_HDRLEN_BIT7 0x4000 /* 5717 */ #define BGE_TXBDFLAG_CHOOSE_SRC_ADDR 0x6000 #define BGE_TXBDFLAG_NO_CRC 0x8000 #define BGE_TXBDFLAG_MSS_SIZE_MASK 0x3FFF /* 5717 */ /* Bits [1:0] of the MSS header length. */ #define BGE_TXBDFLAG_MSS_HDRLEN_MASK 0xC000 /* 5717 */ #define BGE_NIC_TXRING_ADDR(ringno, size) \ BGE_SEND_RING_1_TO_4 + \ ((ringno * sizeof(struct bge_tx_bd) * size) / 4) struct bge_rx_bd { bge_hostaddr bge_addr; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len; uint16_t bge_idx; uint16_t bge_flags; uint16_t bge_type; uint16_t bge_tcp_udp_csum; uint16_t bge_ip_csum; uint16_t bge_vlan_tag; uint16_t bge_error_flag; #else uint16_t bge_idx; uint16_t bge_len; uint16_t bge_type; uint16_t bge_flags; uint16_t bge_ip_csum; uint16_t bge_tcp_udp_csum; uint16_t bge_error_flag; uint16_t bge_vlan_tag; #endif uint32_t bge_rsvd; uint32_t bge_opaque; }; struct bge_extrx_bd { bge_hostaddr bge_addr1; bge_hostaddr bge_addr2; bge_hostaddr bge_addr3; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len2; uint16_t bge_len1; uint16_t bge_rsvd1; uint16_t bge_len3; #else uint16_t bge_len1; uint16_t bge_len2; uint16_t bge_len3; uint16_t bge_rsvd1; #endif bge_hostaddr bge_addr0; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_len0; uint16_t bge_idx; uint16_t bge_flags; uint16_t bge_type; uint16_t bge_tcp_udp_csum; uint16_t bge_ip_csum; uint16_t bge_vlan_tag; uint16_t bge_error_flag; #else uint16_t bge_idx; uint16_t bge_len0; uint16_t bge_type; uint16_t bge_flags; uint16_t bge_ip_csum; uint16_t bge_tcp_udp_csum; uint16_t bge_error_flag; uint16_t bge_vlan_tag; #endif uint32_t bge_rsvd0; uint32_t bge_opaque; }; #define BGE_RXBDFLAG_END 0x0004 #define BGE_RXBDFLAG_JUMBO_RING 0x0020 #define BGE_RXBDFLAG_VLAN_TAG 0x0040 #define BGE_RXBDFLAG_ERROR 0x0400 #define BGE_RXBDFLAG_MINI_RING 0x0800 #define BGE_RXBDFLAG_IP_CSUM 0x1000 #define BGE_RXBDFLAG_TCP_UDP_CSUM 0x2000 #define BGE_RXBDFLAG_TCP_UDP_IS_TCP 0x4000 #define BGE_RXBDFLAG_IPV6 0x8000 #define BGE_RXERRFLAG_BAD_CRC 0x0001 #define BGE_RXERRFLAG_COLL_DETECT 0x0002 #define BGE_RXERRFLAG_LINK_LOST 0x0004 #define BGE_RXERRFLAG_PHY_DECODE_ERR 0x0008 #define BGE_RXERRFLAG_MAC_ABORT 0x0010 #define BGE_RXERRFLAG_RUNT 0x0020 #define BGE_RXERRFLAG_TRUNC_NO_RSRCS 0x0040 #define BGE_RXERRFLAG_GIANT 0x0080 #define BGE_RXERRFLAG_IP_CSUM_NOK 0x1000 /* 5717 */ struct bge_sts_idx { #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_rx_prod_idx; uint16_t bge_tx_cons_idx; #else uint16_t bge_tx_cons_idx; uint16_t bge_rx_prod_idx; #endif }; struct bge_status_block { uint32_t bge_status; uint32_t bge_status_tag; #if BYTE_ORDER == LITTLE_ENDIAN uint16_t bge_rx_jumbo_cons_idx; uint16_t bge_rx_std_cons_idx; uint16_t bge_rx_mini_cons_idx; uint16_t bge_rsvd1; #else uint16_t bge_rx_std_cons_idx; uint16_t bge_rx_jumbo_cons_idx; uint16_t bge_rsvd1; uint16_t bge_rx_mini_cons_idx; #endif struct bge_sts_idx bge_idx[16]; }; #define BGE_STATFLAG_UPDATED 0x00000001 #define BGE_STATFLAG_LINKSTATE_CHANGED 0x00000002 #define BGE_STATFLAG_ERROR 0x00000004 /* * Broadcom Vendor ID * (Note: the BCM570x still defaults to the Alteon PCI vendor ID * even though they're now manufactured by Broadcom) */ #define BCOM_VENDORID 0x14E4 #define BCOM_DEVICEID_BCM5700 0x1644 #define BCOM_DEVICEID_BCM5701 0x1645 #define BCOM_DEVICEID_BCM5702 0x1646 #define BCOM_DEVICEID_BCM5702X 0x16A6 #define BCOM_DEVICEID_BCM5702_ALT 0x16C6 #define BCOM_DEVICEID_BCM5703 0x1647 #define BCOM_DEVICEID_BCM5703X 0x16A7 #define BCOM_DEVICEID_BCM5703_ALT 0x16C7 #define BCOM_DEVICEID_BCM5704C 0x1648 #define BCOM_DEVICEID_BCM5704S 0x16A8 #define BCOM_DEVICEID_BCM5704S_ALT 0x1649 #define BCOM_DEVICEID_BCM5705 0x1653 #define BCOM_DEVICEID_BCM5705K 0x1654 #define BCOM_DEVICEID_BCM5705F 0x166E #define BCOM_DEVICEID_BCM5705M 0x165D #define BCOM_DEVICEID_BCM5705M_ALT 0x165E #define BCOM_DEVICEID_BCM5714C 0x1668 #define BCOM_DEVICEID_BCM5714S 0x1669 #define BCOM_DEVICEID_BCM5715 0x1678 #define BCOM_DEVICEID_BCM5715S 0x1679 #define BCOM_DEVICEID_BCM5717 0x1655 #define BCOM_DEVICEID_BCM5717C 0x1665 #define BCOM_DEVICEID_BCM5718 0x1656 #define BCOM_DEVICEID_BCM5719 0x1657 #define BCOM_DEVICEID_BCM5720_PP 0x1658 /* Not released to public. */ #define BCOM_DEVICEID_BCM5720 0x165F #define BCOM_DEVICEID_BCM5721 0x1659 #define BCOM_DEVICEID_BCM5722 0x165A #define BCOM_DEVICEID_BCM5723 0x165B #define BCOM_DEVICEID_BCM5725 0x1643 #define BCOM_DEVICEID_BCM5727 0x16F3 #define BCOM_DEVICEID_BCM5750 0x1676 #define BCOM_DEVICEID_BCM5750M 0x167C #define BCOM_DEVICEID_BCM5751 0x1677 #define BCOM_DEVICEID_BCM5751F 0x167E #define BCOM_DEVICEID_BCM5751M 0x167D #define BCOM_DEVICEID_BCM5752 0x1600 #define BCOM_DEVICEID_BCM5752M 0x1601 #define BCOM_DEVICEID_BCM5753 0x16F7 #define BCOM_DEVICEID_BCM5753F 0x16FE #define BCOM_DEVICEID_BCM5753M 0x16FD #define BCOM_DEVICEID_BCM5754 0x167A #define BCOM_DEVICEID_BCM5754M 0x1672 #define BCOM_DEVICEID_BCM5755 0x167B #define BCOM_DEVICEID_BCM5755M 0x1673 #define BCOM_DEVICEID_BCM5756 0x1674 #define BCOM_DEVICEID_BCM5761 0x1681 #define BCOM_DEVICEID_BCM5761E 0x1680 #define BCOM_DEVICEID_BCM5761S 0x1688 #define BCOM_DEVICEID_BCM5761SE 0x1689 #define BCOM_DEVICEID_BCM5762 0x1687 #define BCOM_DEVICEID_BCM5764 0x1684 #define BCOM_DEVICEID_BCM5780 0x166A #define BCOM_DEVICEID_BCM5780S 0x166B #define BCOM_DEVICEID_BCM5781 0x16DD #define BCOM_DEVICEID_BCM5782 0x1696 #define BCOM_DEVICEID_BCM5784 0x1698 #define BCOM_DEVICEID_BCM5785F 0x16a0 #define BCOM_DEVICEID_BCM5785G 0x1699 #define BCOM_DEVICEID_BCM5786 0x169A #define BCOM_DEVICEID_BCM5787 0x169B #define BCOM_DEVICEID_BCM5787M 0x1693 #define BCOM_DEVICEID_BCM5787F 0x167f #define BCOM_DEVICEID_BCM5788 0x169C #define BCOM_DEVICEID_BCM5789 0x169D #define BCOM_DEVICEID_BCM5901 0x170D #define BCOM_DEVICEID_BCM5901A2 0x170E #define BCOM_DEVICEID_BCM5903M 0x16FF #define BCOM_DEVICEID_BCM5906 0x1712 #define BCOM_DEVICEID_BCM5906M 0x1713 #define BCOM_DEVICEID_BCM57760 0x1690 #define BCOM_DEVICEID_BCM57761 0x16B0 #define BCOM_DEVICEID_BCM57762 0x1682 #define BCOM_DEVICEID_BCM57764 0x1642 #define BCOM_DEVICEID_BCM57765 0x16B4 #define BCOM_DEVICEID_BCM57766 0x1686 #define BCOM_DEVICEID_BCM57767 0x1683 #define BCOM_DEVICEID_BCM57780 0x1692 #define BCOM_DEVICEID_BCM57781 0x16B1 #define BCOM_DEVICEID_BCM57782 0x16B7 #define BCOM_DEVICEID_BCM57785 0x16B5 #define BCOM_DEVICEID_BCM57786 0x16B3 #define BCOM_DEVICEID_BCM57787 0x1641 #define BCOM_DEVICEID_BCM57788 0x1691 #define BCOM_DEVICEID_BCM57790 0x1694 #define BCOM_DEVICEID_BCM57791 0x16B2 #define BCOM_DEVICEID_BCM57795 0x16B6 /* * Alteon AceNIC PCI vendor/device ID. */ #define ALTEON_VENDORID 0x12AE #define ALTEON_DEVICEID_ACENIC 0x0001 #define ALTEON_DEVICEID_ACENIC_COPPER 0x0002 #define ALTEON_DEVICEID_BCM5700 0x0003 #define ALTEON_DEVICEID_BCM5701 0x0004 /* * 3Com 3c996 PCI vendor/device ID. */ #define TC_VENDORID 0x10B7 #define TC_DEVICEID_3C996 0x0003 /* * SysKonnect PCI vendor ID */ #define SK_VENDORID 0x1148 #define SK_DEVICEID_ALTIMA 0x4400 #define SK_SUBSYSID_9D21 0x4421 #define SK_SUBSYSID_9D41 0x4441 /* * Altima PCI vendor/device ID. */ #define ALTIMA_VENDORID 0x173b #define ALTIMA_DEVICE_AC1000 0x03e8 #define ALTIMA_DEVICE_AC1002 0x03e9 #define ALTIMA_DEVICE_AC9100 0x03ea /* * Dell PCI vendor ID */ #define DELL_VENDORID 0x1028 /* * Apple PCI vendor ID. */ #define APPLE_VENDORID 0x106b #define APPLE_DEVICE_BCM5701 0x1645 /* * Sun PCI vendor ID */ #define SUN_VENDORID 0x108e /* * Fujitsu vendor/device IDs */ #define FJTSU_VENDORID 0x10cf #define FJTSU_DEVICEID_PW008GE5 0x11a1 #define FJTSU_DEVICEID_PW008GE4 0x11a2 #define FJTSU_DEVICEID_PP250450 0x11cc /* PRIMEPOWER250/450 LAN */ /* * Offset of MAC address inside EEPROM. */ #define BGE_EE_MAC_OFFSET 0x7C #define BGE_EE_MAC_OFFSET_5906 0x10 #define BGE_EE_HWCFG_OFFSET 0xC8 #define BGE_HWCFG_VOLTAGE 0x00000003 #define BGE_HWCFG_PHYLED_MODE 0x0000000C #define BGE_HWCFG_MEDIA 0x00000030 #define BGE_HWCFG_ASF 0x00000080 #define BGE_VOLTAGE_1POINT3 0x00000000 #define BGE_VOLTAGE_1POINT8 0x00000001 #define BGE_PHYLEDMODE_UNSPEC 0x00000000 #define BGE_PHYLEDMODE_TRIPLELED 0x00000004 #define BGE_PHYLEDMODE_SINGLELED 0x00000008 #define BGE_MEDIA_UNSPEC 0x00000000 #define BGE_MEDIA_COPPER 0x00000010 #define BGE_MEDIA_FIBER 0x00000020 #define BGE_TICKS_PER_SEC 1000000 /* * Ring size constants. */ #define BGE_EVENT_RING_CNT 256 #define BGE_CMD_RING_CNT 64 #define BGE_STD_RX_RING_CNT 512 #define BGE_JUMBO_RX_RING_CNT 256 #define BGE_MINI_RX_RING_CNT 1024 #define BGE_RETURN_RING_CNT 1024 /* 5705 has smaller return ring size */ #define BGE_RETURN_RING_CNT_5705 512 /* * Possible TX ring sizes. */ #define BGE_TX_RING_CNT_128 128 #define BGE_TX_RING_BASE_128 0x3800 #define BGE_TX_RING_CNT_256 256 #define BGE_TX_RING_BASE_256 0x3000 #define BGE_TX_RING_CNT_512 512 #define BGE_TX_RING_BASE_512 0x2000 #define BGE_TX_RING_CNT BGE_TX_RING_CNT_512 #define BGE_TX_RING_BASE BGE_TX_RING_BASE_512 /* * Tigon III statistics counters. */ /* Statistics maintained MAC Receive block. */ struct bge_rx_mac_stats { bge_hostaddr ifHCInOctets; bge_hostaddr Reserved1; bge_hostaddr etherStatsFragments; bge_hostaddr ifHCInUcastPkts; bge_hostaddr ifHCInMulticastPkts; bge_hostaddr ifHCInBroadcastPkts; bge_hostaddr dot3StatsFCSErrors; bge_hostaddr dot3StatsAlignmentErrors; bge_hostaddr xonPauseFramesReceived; bge_hostaddr xoffPauseFramesReceived; bge_hostaddr macControlFramesReceived; bge_hostaddr xoffStateEntered; bge_hostaddr dot3StatsFramesTooLong; bge_hostaddr etherStatsJabbers; bge_hostaddr etherStatsUndersizePkts; bge_hostaddr inRangeLengthError; bge_hostaddr outRangeLengthError; bge_hostaddr etherStatsPkts64Octets; bge_hostaddr etherStatsPkts65Octetsto127Octets; bge_hostaddr etherStatsPkts128Octetsto255Octets; bge_hostaddr etherStatsPkts256Octetsto511Octets; bge_hostaddr etherStatsPkts512Octetsto1023Octets; bge_hostaddr etherStatsPkts1024Octetsto1522Octets; bge_hostaddr etherStatsPkts1523Octetsto2047Octets; bge_hostaddr etherStatsPkts2048Octetsto4095Octets; bge_hostaddr etherStatsPkts4096Octetsto8191Octets; bge_hostaddr etherStatsPkts8192Octetsto9022Octets; }; /* Statistics maintained MAC Transmit block. */ struct bge_tx_mac_stats { bge_hostaddr ifHCOutOctets; bge_hostaddr Reserved2; bge_hostaddr etherStatsCollisions; bge_hostaddr outXonSent; bge_hostaddr outXoffSent; bge_hostaddr flowControlDone; bge_hostaddr dot3StatsInternalMacTransmitErrors; bge_hostaddr dot3StatsSingleCollisionFrames; bge_hostaddr dot3StatsMultipleCollisionFrames; bge_hostaddr dot3StatsDeferredTransmissions; bge_hostaddr Reserved3; bge_hostaddr dot3StatsExcessiveCollisions; bge_hostaddr dot3StatsLateCollisions; bge_hostaddr dot3Collided2Times; bge_hostaddr dot3Collided3Times; bge_hostaddr dot3Collided4Times; bge_hostaddr dot3Collided5Times; bge_hostaddr dot3Collided6Times; bge_hostaddr dot3Collided7Times; bge_hostaddr dot3Collided8Times; bge_hostaddr dot3Collided9Times; bge_hostaddr dot3Collided10Times; bge_hostaddr dot3Collided11Times; bge_hostaddr dot3Collided12Times; bge_hostaddr dot3Collided13Times; bge_hostaddr dot3Collided14Times; bge_hostaddr dot3Collided15Times; bge_hostaddr ifHCOutUcastPkts; bge_hostaddr ifHCOutMulticastPkts; bge_hostaddr ifHCOutBroadcastPkts; bge_hostaddr dot3StatsCarrierSenseErrors; bge_hostaddr ifOutDiscards; bge_hostaddr ifOutErrors; }; /* Stats counters access through registers */ struct bge_mac_stats { /* TX MAC statistics */ uint64_t ifHCOutOctets; uint64_t Reserved0; uint64_t etherStatsCollisions; uint64_t outXonSent; uint64_t outXoffSent; uint64_t Reserved1; uint64_t dot3StatsInternalMacTransmitErrors; uint64_t dot3StatsSingleCollisionFrames; uint64_t dot3StatsMultipleCollisionFrames; uint64_t dot3StatsDeferredTransmissions; uint64_t Reserved2; uint64_t dot3StatsExcessiveCollisions; uint64_t dot3StatsLateCollisions; uint64_t Reserved3[14]; uint64_t ifHCOutUcastPkts; uint64_t ifHCOutMulticastPkts; uint64_t ifHCOutBroadcastPkts; uint64_t Reserved4[2]; /* RX MAC statistics */ uint64_t ifHCInOctets; uint64_t Reserved5; uint64_t etherStatsFragments; uint64_t ifHCInUcastPkts; uint64_t ifHCInMulticastPkts; uint64_t ifHCInBroadcastPkts; uint64_t dot3StatsFCSErrors; uint64_t dot3StatsAlignmentErrors; uint64_t xonPauseFramesReceived; uint64_t xoffPauseFramesReceived; uint64_t macControlFramesReceived; uint64_t xoffStateEntered; uint64_t dot3StatsFramesTooLong; uint64_t etherStatsJabbers; uint64_t etherStatsUndersizePkts; /* Receive List Placement control */ uint64_t FramesDroppedDueToFilters; uint64_t DmaWriteQueueFull; uint64_t DmaWriteHighPriQueueFull; uint64_t NoMoreRxBDs; uint64_t InputDiscards; uint64_t InputErrors; uint64_t RecvThresholdHit; }; struct bge_stats { uint8_t Reserved0[256]; /* Statistics maintained by Receive MAC. */ struct bge_rx_mac_stats rxstats; bge_hostaddr Unused1[37]; /* Statistics maintained by Transmit MAC. */ struct bge_tx_mac_stats txstats; bge_hostaddr Unused2[31]; /* Statistics maintained by Receive List Placement. */ bge_hostaddr COSIfHCInPkts[16]; bge_hostaddr COSFramesDroppedDueToFilters; bge_hostaddr nicDmaWriteQueueFull; bge_hostaddr nicDmaWriteHighPriQueueFull; bge_hostaddr nicNoMoreRxBDs; bge_hostaddr ifInDiscards; bge_hostaddr ifInErrors; bge_hostaddr nicRecvThresholdHit; bge_hostaddr Unused3[9]; /* Statistics maintained by Send Data Initiator. */ bge_hostaddr COSIfHCOutPkts[16]; bge_hostaddr nicDmaReadQueueFull; bge_hostaddr nicDmaReadHighPriQueueFull; bge_hostaddr nicSendDataCompQueueFull; /* Statistics maintained by Host Coalescing. */ bge_hostaddr nicRingSetSendProdIndex; bge_hostaddr nicRingStatusUpdate; bge_hostaddr nicInterrupts; bge_hostaddr nicAvoidedInterrupts; bge_hostaddr nicSendThresholdHit; uint8_t Reserved4[320]; }; /* * Tigon general information block. This resides in host memory * and contains the status counters, ring control blocks and * producer pointers. */ struct bge_gib { struct bge_stats bge_stats; struct bge_rcb bge_tx_rcb[16]; struct bge_rcb bge_std_rx_rcb; struct bge_rcb bge_jumbo_rx_rcb; struct bge_rcb bge_mini_rx_rcb; struct bge_rcb bge_return_rcb; }; #define BGE_FRAMELEN 1518 #define BGE_MAX_FRAMELEN 1536 #define BGE_JUMBO_FRAMELEN 9018 #define BGE_JUMBO_MTU (BGE_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #define BGE_MIN_FRAMELEN 60 /* * Other utility macros. */ #define BGE_INC(x, y) (x) = (x + 1) % y /* * BAR0 MAC register access macros. The Tigon always uses memory mapped register * accesses and all registers must be accessed with 32 bit operations. */ #define CSR_WRITE_4(sc, reg, val) \ bus_write_4(sc->bge_res, reg, val) #define CSR_READ_4(sc, reg) \ bus_read_4(sc->bge_res, reg) #define BGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) | (x))) #define BGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) & ~(x))) /* BAR2 APE register access macros. */ #define APE_WRITE_4(sc, reg, val) \ bus_write_4(sc->bge_res2, reg, val) #define APE_READ_4(sc, reg) \ bus_read_4(sc->bge_res2, reg) #define APE_SETBIT(sc, reg, x) \ APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) | (x))) #define APE_CLRBIT(sc, reg, x) \ APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) & ~(x))) #define PCI_SETBIT(dev, reg, x, s) \ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) | (x)), s) #define PCI_CLRBIT(dev, reg, x, s) \ pci_write_config(dev, reg, (pci_read_config(dev, reg, s) & ~(x)), s) /* * Memory management stuff. */ #define BGE_NSEG_JUMBO 4 #define BGE_NSEG_NEW 35 #define BGE_TSOSEG_SZ 4096 /* Maximum DMA address for controllers that have 40bit DMA address bug. */ #if (BUS_SPACE_MAXADDR < 0xFFFFFFFFFF) #define BGE_DMA_MAXADDR BUS_SPACE_MAXADDR #else #define BGE_DMA_MAXADDR 0xFFFFFFFFFF #endif /* * Ring structures. Most of these reside in host memory and we tell * the NIC where they are via the ring control blocks. The exceptions * are the tx and command rings, which live in NIC memory and which * we access via the shared memory window. */ struct bge_ring_data { struct bge_rx_bd *bge_rx_std_ring; bus_addr_t bge_rx_std_ring_paddr; struct bge_extrx_bd *bge_rx_jumbo_ring; bus_addr_t bge_rx_jumbo_ring_paddr; struct bge_rx_bd *bge_rx_return_ring; bus_addr_t bge_rx_return_ring_paddr; struct bge_tx_bd *bge_tx_ring; bus_addr_t bge_tx_ring_paddr; struct bge_status_block *bge_status_block; bus_addr_t bge_status_block_paddr; struct bge_stats *bge_stats; bus_addr_t bge_stats_paddr; struct bge_gib bge_info; }; #define BGE_STD_RX_RING_SZ \ (sizeof(struct bge_rx_bd) * BGE_STD_RX_RING_CNT) #define BGE_JUMBO_RX_RING_SZ \ (sizeof(struct bge_extrx_bd) * BGE_JUMBO_RX_RING_CNT) #define BGE_TX_RING_SZ \ (sizeof(struct bge_tx_bd) * BGE_TX_RING_CNT) #define BGE_RX_RTN_RING_SZ(x) \ (sizeof(struct bge_rx_bd) * x->bge_return_ring_cnt) #define BGE_STATUS_BLK_SZ sizeof (struct bge_status_block) #define BGE_STATS_SZ sizeof (struct bge_stats) /* * Mbuf pointers. We need these to keep track of the virtual addresses * of our mbuf chains since we can only convert from physical to virtual, * not the other way around. */ struct bge_chain_data { bus_dma_tag_t bge_parent_tag; bus_dma_tag_t bge_buffer_tag; bus_dma_tag_t bge_rx_std_ring_tag; bus_dma_tag_t bge_rx_jumbo_ring_tag; bus_dma_tag_t bge_rx_return_ring_tag; bus_dma_tag_t bge_tx_ring_tag; bus_dma_tag_t bge_status_tag; bus_dma_tag_t bge_stats_tag; bus_dma_tag_t bge_rx_mtag; /* Rx mbuf mapping tag */ bus_dma_tag_t bge_tx_mtag; /* Tx mbuf mapping tag */ bus_dma_tag_t bge_mtag_jumbo; /* Jumbo mbuf mapping tag */ bus_dmamap_t bge_tx_dmamap[BGE_TX_RING_CNT]; bus_dmamap_t bge_rx_std_sparemap; bus_dmamap_t bge_rx_std_dmamap[BGE_STD_RX_RING_CNT]; bus_dmamap_t bge_rx_jumbo_sparemap; bus_dmamap_t bge_rx_jumbo_dmamap[BGE_JUMBO_RX_RING_CNT]; bus_dmamap_t bge_rx_std_ring_map; bus_dmamap_t bge_rx_jumbo_ring_map; bus_dmamap_t bge_tx_ring_map; bus_dmamap_t bge_rx_return_ring_map; bus_dmamap_t bge_status_map; bus_dmamap_t bge_stats_map; struct mbuf *bge_tx_chain[BGE_TX_RING_CNT]; struct mbuf *bge_rx_std_chain[BGE_STD_RX_RING_CNT]; struct mbuf *bge_rx_jumbo_chain[BGE_JUMBO_RX_RING_CNT]; int bge_rx_std_seglen[BGE_STD_RX_RING_CNT]; int bge_rx_jumbo_seglen[BGE_JUMBO_RX_RING_CNT][4]; }; struct bge_dmamap_arg { bus_addr_t bge_busaddr; }; #define BGE_HWREV_TIGON 0x01 #define BGE_HWREV_TIGON_II 0x02 #define BGE_TIMEOUT 100000 #define BGE_TXCONS_UNSET 0xFFFF /* impossible value */ #define BGE_TX_TIMEOUT 5 struct bge_bcom_hack { int reg; int val; }; #define ASF_ENABLE 1 #define ASF_NEW_HANDSHAKE 2 #define ASF_STACKUP 4 struct bge_softc { struct ifnet *bge_ifp; /* interface info */ device_t bge_dev; struct mtx bge_mtx; device_t bge_miibus; void *bge_intrhand; struct resource *bge_irq; struct resource *bge_res; /* MAC mapped I/O */ struct resource *bge_res2; /* APE mapped I/O */ struct ifmedia bge_ifmedia; /* TBI media info */ int bge_expcap; int bge_expmrq; int bge_msicap; int bge_pcixcap; uint32_t bge_flags; #define BGE_FLAG_TBI 0x00000001 #define BGE_FLAG_JUMBO 0x00000002 #define BGE_FLAG_JUMBO_STD 0x00000004 #define BGE_FLAG_EADDR 0x00000008 #define BGE_FLAG_MII_SERDES 0x00000010 #define BGE_FLAG_CPMU_PRESENT 0x00000020 #define BGE_FLAG_TAGGED_STATUS 0x00000040 #define BGE_FLAG_APE 0x00000080 #define BGE_FLAG_MSI 0x00000100 #define BGE_FLAG_PCIX 0x00000200 #define BGE_FLAG_PCIE 0x00000400 #define BGE_FLAG_TSO 0x00000800 #define BGE_FLAG_TSO3 0x00001000 #define BGE_FLAG_JUMBO_FRAME 0x00002000 #define BGE_FLAG_5700_FAMILY 0x00010000 #define BGE_FLAG_5705_PLUS 0x00020000 #define BGE_FLAG_5714_FAMILY 0x00040000 #define BGE_FLAG_575X_PLUS 0x00080000 #define BGE_FLAG_5755_PLUS 0x00100000 #define BGE_FLAG_5788 0x00200000 #define BGE_FLAG_5717_PLUS 0x00400000 #define BGE_FLAG_57765_PLUS 0x00800000 #define BGE_FLAG_40BIT_BUG 0x01000000 #define BGE_FLAG_4G_BNDRY_BUG 0x02000000 #define BGE_FLAG_RX_ALIGNBUG 0x04000000 #define BGE_FLAG_SHORT_DMA_BUG 0x08000000 #define BGE_FLAG_4K_RDMA_BUG 0x10000000 #define BGE_FLAG_MBOX_REORDER 0x20000000 #define BGE_FLAG_RDMA_BUG 0x40000000 uint32_t bge_mfw_flags; /* Management F/W flags */ #define BGE_MFW_ON_RXCPU 0x00000001 #define BGE_MFW_ON_APE 0x00000002 #define BGE_MFW_TYPE_NCSI 0x00000004 #define BGE_MFW_TYPE_DASH 0x00000008 int bge_phy_ape_lock; int bge_func_addr; int bge_phy_addr; uint32_t bge_phy_flags; #define BGE_PHY_NO_WIRESPEED 0x00000001 #define BGE_PHY_ADC_BUG 0x00000002 #define BGE_PHY_5704_A0_BUG 0x00000004 #define BGE_PHY_JITTER_BUG 0x00000008 #define BGE_PHY_BER_BUG 0x00000010 #define BGE_PHY_ADJUST_TRIM 0x00000020 #define BGE_PHY_CRC_BUG 0x00000040 #define BGE_PHY_NO_3LED 0x00000080 uint32_t bge_chipid; uint32_t bge_asicrev; uint32_t bge_chiprev; uint8_t bge_asf_mode; uint8_t bge_asf_count; uint16_t bge_mps; struct bge_ring_data bge_ldata; /* rings */ struct bge_chain_data bge_cdata; /* mbufs */ uint16_t bge_tx_saved_considx; uint16_t bge_rx_saved_considx; uint16_t bge_ev_saved_considx; uint16_t bge_return_ring_cnt; uint16_t bge_std; /* current std ring head */ uint16_t bge_jumbo; /* current jumo ring head */ uint32_t bge_stat_ticks; uint32_t bge_rx_coal_ticks; uint32_t bge_tx_coal_ticks; uint32_t bge_tx_prodidx; uint32_t bge_rx_max_coal_bds; uint32_t bge_tx_max_coal_bds; uint32_t bge_mi_mode; int bge_if_flags; int bge_txcnt; int bge_link; /* link state */ int bge_link_evt; /* pending link event */ int bge_timer; int bge_forced_collapse; int bge_forced_udpcsum; int bge_msi; int bge_csum_features; struct callout bge_stat_ch; uint32_t bge_rx_discards; uint32_t bge_rx_inerrs; uint32_t bge_rx_nobds; uint32_t bge_tx_discards; uint32_t bge_tx_collisions; #ifdef DEVICE_POLLING int rxcycles; #endif /* DEVICE_POLLING */ struct bge_mac_stats bge_mac_stats; struct task bge_intr_task; struct taskqueue *bge_tq; }; #define BGE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->bge_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) #define BGE_LOCK(_sc) mtx_lock(&(_sc)->bge_mtx) #define BGE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->bge_mtx, MA_OWNED) #define BGE_UNLOCK(_sc) mtx_unlock(&(_sc)->bge_mtx) #define BGE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->bge_mtx) + +#ifdef BUS_SPACE_MAXADDR +#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) +#define BGE_DMA_BOUNDARY (0x100000000) +#else +#define BGE_DMA_BOUNDARY 0 +#endif +#endif Index: head/sys/dev/pci/pci.c =================================================================== --- head/sys/dev/pci/pci.c (revision 346385) +++ head/sys/dev/pci/pci.c (revision 346386) @@ -1,6644 +1,6625 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_acpi.h" #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static int pci_probe(device_t dev); static int pci_attach(device_t dev); static int pci_detach(device_t dev); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq); static void pci_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp); static int pci_reset_post(device_t dev, device_t child); static int pci_reset_prepare(device_t dev, device_t child); static int pci_reset_child(device_t dev, device_t child, int flags); static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *rid); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_reset_prepare, pci_reset_prepare), DEVMETHOD(bus_reset_post, pci_reset_post), DEVMETHOD(bus_reset_child, pci_reset_child), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), DEVMETHOD(bus_child_location_str, pci_child_location_str_method), DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_id, pci_get_id_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); static devclass_t pci_devclass; EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL, BUS_PASS_BUS); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ #define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI-X allocation doesn't work properly for devices passed through * by VMware up to at least ESXi 5.1. */ { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */ { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */ /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit * of the command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ /* * HPE Gen 10 VGA has a memory range that can't be allocated in the * expected place. */ { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 }, { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 1; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means" " disable. 1 means conservatively place devices into D3 state. 2 means" " aggressively place devices into D3 state. 3 means put absolutely" " everything in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_msix_rewrite_table = 0; SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN, &pci_msix_rewrite_table, 0, "Rewrite entire MSI-X table when updating MSI-X entries"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers. Disable this if you depend on" " BIOS emulation of USB devices, that is you use USB devices (like" " keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); #endif static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); static int pci_clear_aer_on_attach = 0; SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN, &pci_clear_aer_on_attach, 0, "Clear port and device AER state on driver attach"); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { return (dinfo->cfg.dev); } } return (NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); did = REG(PCIR_DEVICE, 2); if (vid != 0xffff) return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); return (NULL); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ if (cfg->pp.pp_cap == 0) { cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; if ((nextptr - ptr) > PCIR_POWER_DATA) cfg->pp.pp_data = ptr + PCIR_POWER_DATA; } break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & PCIM_MSICTRL_MMC_MASK)>>1); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (ENXIO); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int state; int name; int remain; int i; int alloc, off; /* alloc/off for RO/W arrays */ int cksumvalid; int dflen; uint8_t byte; uint8_t byte2; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; state = 0; name = remain = i = 0; /* shut up stupid gcc */ alloc = off = 0; /* shut up stupid gcc */ dflen = 0; /* shut up stupid gcc */ cksumvalid = -1; while (state >= 0) { if (vpd_nextbyte(&vrs, &byte)) { state = -2; break; } #if 0 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, vrs.off, vrs.bytesinval, byte, state, remain, name, i); #endif switch (state) { case 0: /* item name */ if (byte & 0x80) { if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain |= byte2 << 8; if (remain > (0x7f*4 - vrs.off)) { state = -1; pci_printf(cfg, "invalid VPD data, remain %#x\n", remain); } name = byte & 0x7f; } else { remain = byte & 0x7; name = (byte >> 3) & 0xf; } switch (name) { case 0x2: /* String */ cfg->vpd.vpd_ident = malloc(remain + 1, M_DEVBUF, M_WAITOK); i = 0; state = 1; break; case 0xf: /* End */ state = -1; break; case 0x10: /* VPD-R */ alloc = 8; off = 0; cfg->vpd.vpd_ros = malloc(alloc * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 2; break; case 0x11: /* VPD-W */ alloc = 8; off = 0; cfg->vpd.vpd_w = malloc(alloc * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 5; break; default: /* Invalid data, abort */ state = -1; break; } break; case 1: /* Identifier String */ cfg->vpd.vpd_ident[i++] = byte; remain--; if (remain == 0) { cfg->vpd.vpd_ident[i] = '\0'; state = 0; } break; case 2: /* VPD-R Keyword Header */ if (off == alloc) { cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_ros[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].len = dflen = byte2; if (dflen == 0 && strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0) { /* * if this happens, we can't trust the rest * of the VPD. */ pci_printf(cfg, "bad keyword length: %d\n", dflen); cksumvalid = 0; state = -1; break; } else if (dflen == 0) { cfg->vpd.vpd_ros[off].value = malloc(1 * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); cfg->vpd.vpd_ros[off].value[0] = '\x00'; } else cfg->vpd.vpd_ros[off].value = malloc( (dflen + 1) * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 3's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 2; else state = 3; break; case 3: /* VPD-R Keyword Value */ cfg->vpd.vpd_ros[off].value[i++] = byte; if (strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0 && cksumvalid == -1) { if (vrs.cksum == 0) cksumvalid = 1; else { if (bootverbose) pci_printf(cfg, "bad VPD cksum, remain %hhu\n", vrs.cksum); cksumvalid = 0; state = -1; break; } } dflen--; remain--; /* keep in sync w/ state 2's transistions */ if (dflen == 0) cfg->vpd.vpd_ros[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_rocnt = off; cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, off * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 2; break; case 4: remain--; if (remain == 0) state = 0; break; case 5: /* VPD-W Keyword Header */ if (off == alloc) { cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_w[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].len = dflen = byte2; cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; cfg->vpd.vpd_w[off].value = malloc((dflen + 1) * sizeof(*cfg->vpd.vpd_w[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 6's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 5; else state = 6; break; case 6: /* VPD-W Keyword Value */ cfg->vpd.vpd_w[off].value[i++] = byte; dflen--; remain--; /* keep in sync w/ state 5's transistions */ if (dflen == 0) cfg->vpd.vpd_w[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_wcnt = off; cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, off * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 5; break; default: pci_printf(cfg, "invalid state: %d\n", state); state = -1; break; } } if (cksumvalid == 0 || state < -1) { /* read-only data bad, clean up */ if (cfg->vpd.vpd_ros != NULL) { for (off = 0; cfg->vpd.vpd_ros[off].value; off++) free(cfg->vpd.vpd_ros[off].value, M_DEVBUF); free(cfg->vpd.vpd_ros, M_DEVBUF); cfg->vpd.vpd_ros = NULL; } } if (state < -1) { /* I/O error, clean up */ pci_printf(cfg, "failed to read VPD data.\n"); if (cfg->vpd.vpd_ident != NULL) { free(cfg->vpd.vpd_ident, M_DEVBUF); cfg->vpd.vpd_ident = NULL; } if (cfg->vpd.vpd_w != NULL) { for (off = 0; cfg->vpd.vpd_w[off].value; off++) free(cfg->vpd.vpd_w[off].value, M_DEVBUF); free(cfg->vpd.vpd_w, M_DEVBUF); cfg->vpd.vpd_w = NULL; } } cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; } return (ENOENT); } /* * Find the next requested HyperTransport capability after start and return * the offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { int ptr; uint16_t val; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT, ("start capability is not HyperTransport capability")); ptr = start; /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ for (;;) { /* Skip to the next HT capability. */ if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0) break; val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t status; uint8_t ptr; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. */ while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the next requested capability after start and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg) { uint8_t ptr; KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability, ("start capability is not expected capability")); ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1); while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Find the next requested extended capability after start and return the * offset in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ecap = pci_read_config(child, start, 4); KASSERT(PCI_EXTCAP_ID(ecap) == capability, ("start extended capability is not expected capability")); ptr = PCI_EXTCAP_NEXTPTR(ecap); while (ptr != 0) { ecap = pci_read_config(child, ptr, 4); if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); } void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { if (pci_msix_rewrite_table) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; /* * Some VM hosts require MSIX to be disabled in the * control register before updating the MSIX table * entries are allowed. It is not enough to only * disable MSIX while updating a single entry. MSIX * must be disabled while updating all entries in the * table. */ pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); pci_resume_msix(child); } else pci_write_msix_entry(child, index, address, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_msgnum > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); if (!(val & PCIM_MSIX_VCTRL_MASK)) { val |= PCIM_MSIX_VCTRL_MASK; bus_write_4(msix->msix_table_res, offset, val); } } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); if (val & PCIM_MSIX_VCTRL_MASK) { val &= ~PCIM_MSIX_VCTRL_MASK; bus_write_4(msix->msix_table_res, offset, val); } } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; int i; if (msix->msix_alloc > 0) { /* First, mask all vectors. */ for (i = 0; i < msix->msix_msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_write_msix_entry(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irq, max; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, cfg->msix.msix_msgnum); max = min(*count, cfg->msix.msix_msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { int run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = 0; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = 1; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = 0; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* Mask all vectors. */ for (i = 0; i < cfg->msix.msix_msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, cfg->msix.msix_ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i, irq, j, *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count == 0 || count > msix->msix_msgnum) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = 1; for (i = 0; i < msix->msix_alloc - 1; i++) if (used[i] == 0 && used[i + 1] == 1) { free(used, M_DEVBUF); return (EINVAL); } if (used[0] != 1) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (used[j] == 0) { struct msix_vector *vec; while (used[j] == 0) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_msgnum); return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_max_payload(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_PAYLOAD; val >>= 5; return (1 << (val + 7)); } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; int error, i, j; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } } } return (ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irqs[32]; uint16_t ctrl; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%d supported)\n", *count, cfg->msi.msi_msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, cfg->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { int run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl = cfg->msi.msi_ctrl; ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; int error, i, irqs[32]; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; if (pci_do_msi && msi->msi_location != 0) return (msi->msi_msgnum); return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; int i; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) { free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_w, M_DEVBUF); } STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_cap == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, state); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_cap != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_cap) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { int ctrl; ctrl = cfg->msi.msi_ctrl; printf("\tMSI supports %d message%s%s%s\n", cfg->msi.msi_msgnum, (cfg->msi.msi_msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { printf("\tMSI-X supports %d message%s ", cfg->msix.msix_msgnum, (cfg->msix.msix_msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. * * NB: according to the PCI Local Bus Specification, rev. 3.0: * "Software writes 0FFFFFFFFh to both registers, reads them back, * and combines the result into a 64-bit value." (section 6.2.5.1) * * Writes to both registers must be performed before attempting to * read back the size value. */ testval = 0; pci_write_config(dev, reg, 0xffffffff, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } testval |= pci_read_config(dev, reg, 4); /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable buses look better when * all resources are allocated, so allow '0' to be overriden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if ((pci_do_realloc_bars || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR)) && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } #endif static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { #ifdef __PCI_REROUTE_INTERRUPT /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); #else pci_assign_interrupt(bus, dev, 0); #endif } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); #endif } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == 0xffff) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == 0xffff) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *vf_dinfo; device_t pcib; int busno, slot, func; pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo) { int aer; uint32_t r; uint16_t r2; if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, 2); r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR | PCIEM_ROOT_CTL_SERR_NONFATAL | PCIEM_ROOT_CTL_SERR_FATAL); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_ROOT_CTL, r2, 2); } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER UC 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4); r &= ~(PCIM_AER_UC_TRAINING_ERROR | PCIM_AER_UC_DL_PROTOCOL_ERROR | PCIM_AER_UC_SURPRISE_LINK_DOWN | PCIM_AER_UC_POISONED_TLP | PCIM_AER_UC_FC_PROTOCOL_ERROR | PCIM_AER_UC_COMPLETION_TIMEOUT | PCIM_AER_UC_COMPLETER_ABORT | PCIM_AER_UC_UNEXPECTED_COMPLETION | PCIM_AER_UC_RECEIVER_OVERFLOW | PCIM_AER_UC_MALFORMED_TLP | PCIM_AER_UC_ECRC_ERROR | PCIM_AER_UC_UNSUPPORTED_REQUEST | PCIM_AER_UC_ACS_VIOLATION | PCIM_AER_UC_INTERNAL_ERROR | PCIM_AER_UC_MC_BLOCKED_TLP | PCIM_AER_UC_ATOMIC_EGRESS_BLK | PCIM_AER_UC_TLP_PREFIX_BLOCKED); pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); if (r != 0 && bootverbose) { pci_printf(&dinfo->cfg, "clearing AER COR 0x%08x -> 0x%08x\n", r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4)); } r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4); r &= ~(PCIM_AER_COR_RECEIVER_ERROR | PCIM_AER_COR_BAD_TLP | PCIM_AER_COR_BAD_DLLP | PCIM_AER_COR_REPLAY_ROLLOVER | PCIM_AER_COR_REPLAY_TIMEOUT | PCIM_AER_COR_ADVISORY_NF_ERROR | PCIM_AER_COR_INTERNAL_ERROR | PCIM_AER_COR_HEADER_LOG_OVFLOW); pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4); r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2); r |= PCIEM_CTL_COR_ENABLE | PCIEM_CTL_NFER_ENABLE | PCIEM_CTL_FER_ENABLE | PCIEM_CTL_URR_ENABLE; pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, r, 2); } } void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { device_t dev; dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1); device_set_ivars(dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dev, dinfo, 0); pci_cfg_restore(dev, dinfo); pci_print_verbose(dinfo); pci_add_resources(bus, dev, 0, 0); pci_child_added(dinfo->cfg.dev); if (pci_clear_aer_on_attach) pci_add_child_clear_aer(dev, dinfo); EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; -#ifdef PCI_DMA_BOUNDARY - int error, tag_valid; -#endif #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } #endif if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); -#ifdef PCI_DMA_BOUNDARY - tag_valid = 0; - if (device_get_devclass(device_get_parent(device_get_parent(dev))) != - devclass_find("pci")) { - error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, - PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, - NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, - BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag); - if (error) - device_printf(dev, "Failed to create DMA tag: %d\n", - error); - else - tag_valid = 1; - } - if (!tag_valid) -#endif - sc->sc_dma_tag = bus_get_dma_tag(dev); + sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } static int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * buses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); return (bus_generic_attach(dev)); } static int pci_detach(device_t dev) { #ifdef PCI_RES_BUS struct pci_softc *sc; #endif int error; error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_RES_BUS sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); if (error) return (error); #endif return (device_delete_children(dev)); } static void pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp) { int line, unit; const char *at; char me1[24], me2[32]; uint8_t b, s, f; uint32_t d; d = pci_get_domain(child); b = pci_get_bus(child); s = pci_get_slot(child); f = pci_get_function(child); snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f); snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f); line = 0; while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) { resource_string_value(name, unit, "at", &at); if (strcmp(at, me1) != 0 && strcmp(at, me2) != 0) continue; /* No match, try next candidate */ *unitp = unit; return; } } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) { /* * Make sure this device's interrupt handler is not invoked * in the case the device uses a shared interrupt that can * be raised by some other device. * This is applicable only to regular (legacy) PCI interrupts * as MSI/MSI-X interrupts are never shared. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_suspend_intr(child, rle->res); pci_set_power_child(dev, child, PCI_POWERSTATE_D3); } return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list_entry *rle; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); /* * Allow interrupts only after fully resuming the driver and hardware. */ if (pci_do_power_suspend) { /* See pci_suspend_child for details. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) (void)bus_resume_intr(child, rle->res); } return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } /* * The MSIX table entry must be made valid by * incrementing the mte_handlers before * calling pci_enable_msix() and * pci_resume_msix(). Else the MSIX rewrite * table quirk will not work as expected. */ mte->mte_handlers++; if (mte->mte_handlers == 1) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"}, {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"}, {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n"); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); #ifdef PCI_RES_BUS if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); #endif pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND(pciregs, db_pci_dump) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ static struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; uint16_t cmd; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); /* Disable decoding via the CMD register before updating the BAR */ cmd = pci_read_config(child, PCIR_COMMAND, 2); pci_write_config(child, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); map = rman_get_start(res); pci_write_bar(child, pm, map); /* Restore the original value of the CMD register */ pci_write_config(child, PCIR_COMMAND, cmd, 2); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); #endif case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } #endif /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: return (pci_vf_release_mem_resource(dev, child, rid, r)); } /* Fall through for other types of resource allocations. */ } #endif #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { switch (rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: return (bus_generic_release_resource(dev, child, type, rid, r)); } } #endif rl = &dinfo->resources; return (resource_list_release(rl, dev, child, type, rid, r)); } int pci_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_activate_resource(dev, child, type, rid, r); if (error) return (error); /* Enable decoding in the command register when activating BARs. */ if (device_get_parent(child) == dev) { /* Device ROMs need their decoding explicitly enabled. */ dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } } return (error); } int pci_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_deactivate_resource(dev, child, type, rid, r); if (error) return (error); /* Disable decoding for device ROMs. */ if (device_get_parent(child) == dev) { dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); } return (0); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; EVENTHANDLER_INVOKE(pci_delete_device, child); /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } #ifdef ACPI_DMAR bus_dma_tag_t dmar_get_dma_tag(device_t dev, device_t child); bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { bus_dma_tag_t tag; struct pci_softc *sc; if (device_get_parent(dev) == bus) { /* try dmar and return if it works */ tag = dmar_get_dma_tag(bus, dev); } else tag = NULL; if (tag == NULL) { sc = device_get_softc(bus); tag = sc->sc_dma_tag; } return (tag); } #else bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } #endif uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int pci_child_location_str_method(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "slot=%d function=%d dbsf=pci%d:%d:%d:%d", pci_get_slot(child), pci_get_function(child), pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, size_t buflen) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); #endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); #ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); #endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *id) { return (PCIB_GET_ID(device_get_parent(dev), child, type, id)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } /* * Wait for pending transactions to complete on a PCI-express function. * * The maximum delay is specified in milliseconds in max_delay. Note * that this function may sleep. * * Returns true if the function is idle and false if the timeout is * exceeded. If dev is not a PCI-express function, this returns true. */ bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t sta; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (true); sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); while (sta & PCIEM_STA_TRANSACTION_PND) { if (max_delay == 0) return (false); /* Poll once every 100 milliseconds up to the timeout. */ if (max_delay > 100) { pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK); max_delay -= 100; } else { pause_sbt("pcietp", max_delay * SBT_1MS, 0, C_HARDCLOCK); max_delay = 0; } sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2); } return (true); } /* * Determine the maximum Completion Timeout in microseconds. * * For non-PCI-express functions this returns 0. */ int pcie_get_max_completion_timeout(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); /* * Functions using the 1.x spec use the default timeout range of * 50 microseconds to 50 milliseconds. Functions that do not * support programmable timeouts also use this range. */ if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) & PCIEM_CAP2_COMP_TIMO_RANGES) == 0) return (50 * 1000); switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) & PCIEM_CTL2_COMP_TIMO_VAL) { case PCIEM_CTL2_COMP_TIMO_100US: return (100); case PCIEM_CTL2_COMP_TIMO_10MS: return (10 * 1000); case PCIEM_CTL2_COMP_TIMO_55MS: return (55 * 1000); case PCIEM_CTL2_COMP_TIMO_210MS: return (210 * 1000); case PCIEM_CTL2_COMP_TIMO_900MS: return (900 * 1000); case PCIEM_CTL2_COMP_TIMO_3500MS: return (3500 * 1000); case PCIEM_CTL2_COMP_TIMO_13S: return (13 * 1000 * 1000); case PCIEM_CTL2_COMP_TIMO_64S: return (64 * 1000 * 1000); default: return (50 * 1000); } } /* * Perform a Function Level Reset (FLR) on a device. * * This function first waits for any pending transactions to complete * within the timeout specified by max_delay. If transactions are * still pending, the function will return false without attempting a * reset. * * If dev is not a PCI-express function or does not support FLR, this * function returns false. * * Note that no registers are saved or restored. The caller is * responsible for saving and restoring any registers including * PCI-standard registers via pci_save_state() and * pci_restore_state(). */ bool pcie_flr(device_t dev, u_int max_delay, bool force) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint16_t cmd, ctl; int compl_delay; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (false); if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR)) return (false); /* * Disable busmastering to prevent generation of new * transactions while waiting for the device to go idle. If * the idle timeout fails, the command register is restored * which will re-enable busmastering. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2); if (!pcie_wait_for_pending_transactions(dev, max_delay)) { if (!force) { pci_write_config(dev, PCIR_COMMAND, cmd, 2); return (false); } pci_printf(&dinfo->cfg, "Resetting with transactions pending after %d ms\n", max_delay); /* * Extend the post-FLR delay to cover the maximum * Completion Timeout delay of anything in flight * during the FLR delay. Enforce a minimum delay of * at least 10ms. */ compl_delay = pcie_get_max_completion_timeout(dev) / 1000; if (compl_delay < 10) compl_delay = 10; } else compl_delay = 0; /* Initiate the reset. */ ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl | PCIEM_CTL_INITIATE_FLR, 2); /* Wait for 100ms. */ pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK); if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) & PCIEM_STA_TRANSACTION_PND) pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); return (true); } /* * Attempt a power-management reset by cycling the device in/out of D3 * state. PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ int pci_power_reset(device_t dev) { int ps; ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_set_powerstate(dev, PCI_POWERSTATE_D3); pci_set_powerstate(dev, ps); return (0); } /* * Try link drop and retrain of the downstream port of upstream * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must * cause Conventional Hot reset of the device in the slot. * Alternative, for PCIe, could be the secondary bus reset initiatied * on the upstream switch PCIR_BRIDGECTL_1, bit 6. */ int pcie_link_reset(device_t port, int pcie_location) { uint16_t v; v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2); v |= PCIEM_LINK_CTL_LINK_DIS; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier1", mstosbt(20), 0, 0); v &= ~PCIEM_LINK_CTL_LINK_DIS; v |= PCIEM_LINK_CTL_RETRAIN_LINK; pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2); pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */ v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2); return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0); } static int pci_reset_post(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_restore_state(child); return (0); } static int pci_reset_prepare(device_t dev, device_t child) { if (dev == device_get_parent(child)) pci_save_state(child); return (0); } static int pci_reset_child(device_t dev, device_t child, int flags) { int error; if (dev == NULL || device_get_parent(child) != dev) return (0); if ((flags & DEVF_RESET_DETACH) != 0) { error = device_get_state(child) == DS_ATTACHED ? device_detach(child) : 0; } else { error = BUS_SUSPEND_CHILD(dev, child); } if (error == 0) { if (!pcie_flr(child, 1000, false)) { error = BUS_RESET_PREPARE(dev, child); if (error == 0) pci_power_reset(child); BUS_RESET_POST(dev, child); } if ((flags & DEVF_RESET_DETACH) != 0) device_probe_and_attach(child); else BUS_RESUME_CHILD(dev, child); } return (error); } const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt) { bool match; uint16_t vendor, device, subvendor, subdevice, class, subclass, revid; vendor = pci_get_vendor(child); device = pci_get_device(child); subvendor = pci_get_subvendor(child); subdevice = pci_get_subdevice(child); class = pci_get_class(child); subclass = pci_get_subclass(child); revid = pci_get_revid(child); while (nelt-- > 0) { match = true; if (id->match_flag_vendor) match &= vendor == id->vendor; if (id->match_flag_device) match &= device == id->device; if (id->match_flag_subvendor) match &= subvendor == id->subvendor; if (id->match_flag_subdevice) match &= subdevice == id->subdevice; if (id->match_flag_class) match &= class == id->class_id; if (id->match_flag_subclass) match &= subclass == id->subclass; if (id->match_flag_revid) match &= revid == id->revid; if (match) return (id); id++; } return (NULL); } static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo) { const char *dev_name; device_t dev; dev = dinfo->cfg.dev; printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func); dev_name = device_get_name(dev); if (dev_name != NULL) printf(" (%s%d)", dev_name, device_get_unit(dev)); } void pci_print_faulted_dev(void) { struct pci_devinfo *dinfo; device_t dev; int aer, i; uint32_t r1, r2; uint16_t status; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status = pci_read_config(dev, PCIR_STATUS, 2); status &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status != 0) { pci_print_faulted_dev_name(dinfo); printf(" error 0x%04x\n", status); } if (dinfo->cfg.pcie.pcie_location != 0) { status = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); if ((status & (PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ)) != 0) { pci_print_faulted_dev_name(dinfo); printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n", pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_CTL, 2), status); } } if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) { r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r1 != 0 || r2 != 0) { pci_print_faulted_dev_name(dinfo); printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n" " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n", r1, pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4), pci_read_config(dev, aer + PCIR_AER_UC_SEVERITY, 4), r2, pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4), pci_read_config(dev, aer + PCIR_AER_CAP_CONTROL, 4)); for (i = 0; i < 4; i++) { r1 = pci_read_config(dev, aer + PCIR_AER_HEADER_LOG + i * 4, 4); printf(" HL%d: 0x%08x\n", i, r1); } } } } } #ifdef DDB DB_SHOW_COMMAND(pcierr, pci_print_faulted_dev_db) { pci_print_faulted_dev(); } static void db_clear_pcie_errors(const struct pci_devinfo *dinfo) { device_t dev; int aer; uint32_t r; dev = dinfo->cfg.dev; r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, 2); pci_write_config(dev, dinfo->cfg.pcie.pcie_location + PCIER_DEVICE_STA, r, 2); if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0) return; r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4); r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4); if (r != 0) pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4); } DB_COMMAND(pci_clearerr, db_pci_clearerr) { struct pci_devinfo *dinfo; device_t dev; uint16_t status, status1; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { dev = dinfo->cfg.dev; status1 = status = pci_read_config(dev, PCIR_STATUS, 2); status1 &= PCIM_STATUS_MDPERR | PCIM_STATUS_STABORT | PCIM_STATUS_RTABORT | PCIM_STATUS_RMABORT | PCIM_STATUS_SERR | PCIM_STATUS_PERR; if (status1 != 0) { status &= ~status1; pci_write_config(dev, PCIR_STATUS, status, 2); } if (dinfo->cfg.pcie.pcie_location != 0) db_clear_pcie_errors(dinfo); } } #endif Index: head/sys/dev/pci/pcivar.h =================================================================== --- head/sys/dev/pci/pcivar.h (revision 346385) +++ head/sys/dev/pci/pcivar.h (revision 346386) @@ -1,739 +1,731 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _PCIVAR_H_ #define _PCIVAR_H_ #include #include /* some PCI bus constants */ #define PCI_MAXMAPS_0 6 /* max. no. of memory/port maps */ #define PCI_MAXMAPS_1 2 /* max. no. of maps for PCI to PCI bridge */ #define PCI_MAXMAPS_2 1 /* max. no. of maps for CardBus bridge */ typedef uint64_t pci_addr_t; /* Config registers for PCI-PCI and PCI-Cardbus bridges. */ struct pcicfg_bridge { uint8_t br_seclat; uint8_t br_subbus; uint8_t br_secbus; uint8_t br_pribus; uint16_t br_control; }; /* Interesting values for PCI power management */ struct pcicfg_pp { uint16_t pp_cap; /* PCI power management capabilities */ uint8_t pp_status; /* conf. space addr. of PM control/status reg */ uint8_t pp_bse; /* conf. space addr. of PM BSE reg */ uint8_t pp_data; /* conf. space addr. of PM data reg */ }; struct pci_map { pci_addr_t pm_value; /* Raw BAR value */ pci_addr_t pm_size; uint16_t pm_reg; STAILQ_ENTRY(pci_map) pm_link; }; struct vpd_readonly { char keyword[2]; char *value; int len; }; struct vpd_write { char keyword[2]; char *value; int start; int len; }; struct pcicfg_vpd { uint8_t vpd_reg; /* base register, + 2 for addr, + 4 data */ char vpd_cached; char *vpd_ident; /* string identifier */ int vpd_rocnt; struct vpd_readonly *vpd_ros; int vpd_wcnt; struct vpd_write *vpd_w; }; /* Interesting values for PCI MSI */ struct pcicfg_msi { uint16_t msi_ctrl; /* Message Control */ uint8_t msi_location; /* Offset of MSI capability registers. */ uint8_t msi_msgnum; /* Number of messages */ int msi_alloc; /* Number of allocated messages. */ uint64_t msi_addr; /* Contents of address register. */ uint16_t msi_data; /* Contents of data register. */ u_int msi_handlers; }; /* Interesting values for PCI MSI-X */ struct msix_vector { uint64_t mv_address; /* Contents of address register. */ uint32_t mv_data; /* Contents of data register. */ int mv_irq; }; struct msix_table_entry { u_int mte_vector; /* 1-based index into msix_vectors array. */ u_int mte_handlers; }; struct pcicfg_msix { uint16_t msix_ctrl; /* Message Control */ uint16_t msix_msgnum; /* Number of messages */ uint8_t msix_location; /* Offset of MSI-X capability registers. */ uint8_t msix_table_bar; /* BAR containing vector table. */ uint8_t msix_pba_bar; /* BAR containing PBA. */ uint32_t msix_table_offset; uint32_t msix_pba_offset; int msix_alloc; /* Number of allocated vectors. */ int msix_table_len; /* Length of virtual table. */ struct msix_table_entry *msix_table; /* Virtual table. */ struct msix_vector *msix_vectors; /* Array of allocated vectors. */ struct resource *msix_table_res; /* Resource containing vector table. */ struct resource *msix_pba_res; /* Resource containing PBA. */ }; /* Interesting values for HyperTransport */ struct pcicfg_ht { uint8_t ht_slave; /* Non-zero if device is an HT slave. */ uint8_t ht_msimap; /* Offset of MSI mapping cap registers. */ uint16_t ht_msictrl; /* MSI mapping control */ uint64_t ht_msiaddr; /* MSI mapping base address */ }; /* Interesting values for PCI-express */ struct pcicfg_pcie { uint8_t pcie_location; /* Offset of PCI-e capability registers. */ uint8_t pcie_type; /* Device type. */ uint16_t pcie_flags; /* Device capabilities register. */ uint16_t pcie_device_ctl; /* Device control register. */ uint16_t pcie_link_ctl; /* Link control register. */ uint16_t pcie_slot_ctl; /* Slot control register. */ uint16_t pcie_root_ctl; /* Root control register. */ uint16_t pcie_device_ctl2; /* Second device control register. */ uint16_t pcie_link_ctl2; /* Second link control register. */ uint16_t pcie_slot_ctl2; /* Second slot control register. */ }; struct pcicfg_pcix { uint16_t pcix_command; uint8_t pcix_location; /* Offset of PCI-X capability registers. */ }; struct pcicfg_vf { int index; }; struct pci_ea_entry { int eae_bei; uint32_t eae_flags; uint64_t eae_base; uint64_t eae_max_offset; uint32_t eae_cfg_offset; STAILQ_ENTRY(pci_ea_entry) eae_link; }; struct pcicfg_ea { int ea_location; /* Structure offset in Configuration Header */ STAILQ_HEAD(, pci_ea_entry) ea_entries; /* EA entries */ }; #define PCICFG_VF 0x0001 /* Device is an SR-IOV Virtual Function */ /* config header information common to all header types */ typedef struct pcicfg { device_t dev; /* device which owns this */ STAILQ_HEAD(, pci_map) maps; /* BARs */ uint16_t subvendor; /* card vendor ID */ uint16_t subdevice; /* card device ID, assigned by card vendor */ uint16_t vendor; /* chip vendor ID */ uint16_t device; /* chip device ID, assigned by chip vendor */ uint16_t cmdreg; /* disable/enable chip and PCI options */ uint16_t statreg; /* supported PCI features and error state */ uint8_t baseclass; /* chip PCI class */ uint8_t subclass; /* chip PCI subclass */ uint8_t progif; /* chip PCI programming interface */ uint8_t revid; /* chip revision ID */ uint8_t hdrtype; /* chip config header type */ uint8_t cachelnsz; /* cache line size in 4byte units */ uint8_t intpin; /* PCI interrupt pin */ uint8_t intline; /* interrupt line (IRQ for PC arch) */ uint8_t mingnt; /* min. useful bus grant time in 250ns units */ uint8_t maxlat; /* max. tolerated bus grant latency in 250ns */ uint8_t lattimer; /* latency timer in units of 30ns bus cycles */ uint8_t mfdev; /* multi-function device (from hdrtype reg) */ uint8_t nummaps; /* actual number of PCI maps used */ uint32_t domain; /* PCI domain */ uint8_t bus; /* config space bus address */ uint8_t slot; /* config space slot address */ uint8_t func; /* config space function number */ uint32_t flags; /* flags defined above */ struct pcicfg_bridge bridge; /* Bridges */ struct pcicfg_pp pp; /* Power management */ struct pcicfg_vpd vpd; /* Vital product data */ struct pcicfg_msi msi; /* PCI MSI */ struct pcicfg_msix msix; /* PCI MSI-X */ struct pcicfg_ht ht; /* HyperTransport */ struct pcicfg_pcie pcie; /* PCI Express */ struct pcicfg_pcix pcix; /* PCI-X */ struct pcicfg_iov *iov; /* SR-IOV */ struct pcicfg_vf vf; /* SR-IOV Virtual Function */ struct pcicfg_ea ea; /* Enhanced Allocation */ } pcicfgregs; /* additional type 1 device config header information (PCI to PCI bridge) */ typedef struct { pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ uint32_t membase; /* base address of memory window */ uint32_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih1cfgregs; /* additional type 2 device config header information (CardBus bridge) */ typedef struct { uint32_t membase0; /* base address of memory window */ uint32_t memlimit0; /* topmost address of memory window */ uint32_t membase1; /* base address of memory window */ uint32_t memlimit1; /* topmost address of memory window */ uint32_t iobase0; /* base address of port window */ uint32_t iolimit0; /* topmost address of port window */ uint32_t iobase1; /* base address of port window */ uint32_t iolimit1; /* topmost address of port window */ uint32_t pccardif; /* PC Card 16bit IF legacy more base addr. */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih2cfgregs; extern uint32_t pci_numdevs; /* * The bitfield has to be stable and match the fields below (so that * match_flag_vendor must be bit 0) so we have to do the endian dance. We can't * use enums or #define constants because then the macros for subsetting matches * wouldn't work. These tables are parsed by devmatch and others to connect * modules with devices on the PCI bus. */ struct pci_device_table { #if BYTE_ORDER == LITTLE_ENDIAN uint16_t match_flag_vendor:1, match_flag_device:1, match_flag_subvendor:1, match_flag_subdevice:1, match_flag_class:1, match_flag_subclass:1, match_flag_revid:1, match_flag_unused:9; #else uint16_t match_flag_unused:9, match_flag_revid:1, match_flag_subclass:1, match_flag_class:1, match_flag_subdevice:1, match_flag_subvendor:1, match_flag_device:1, match_flag_vendor:1; #endif uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; uint16_t class_id; uint16_t subclass; uint16_t revid; uint16_t unused; uintptr_t driver_data; char *descr; }; #define PCI_DEV(v, d) \ .match_flag_vendor = 1, .vendor = (v), \ .match_flag_device = 1, .device = (d) #define PCI_SUBDEV(sv, sd) \ .match_flag_subvendor = 1, .subvendor = (sv), \ .match_flag_subdevice = 1, .subdevice = (sd) #define PCI_CLASS(x) \ .match_flag_class = 1, .class_id = (x) #define PCI_SUBCLASS(x) \ .match_flag_subclass = 1, .subclass = (x) #define PCI_REVID(x) \ .match_flag_revid = 1, .revid = (x) #define PCI_DESCR(x) \ .descr = (x) #define PCI_PNP_STR \ "M16:mask;U16:vendor;U16:device;U16:subvendor;U16:subdevice;" \ "U16:class;U16:subclass;U16:revid;" #define PCI_PNP_INFO(table) \ MODULE_PNP_INFO(PCI_PNP_STR, pci, table, table, \ sizeof(table) / sizeof(table[0])) const struct pci_device_table *pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt); #define PCI_MATCH(child, table) \ pci_match_device(child, (table), nitems(table)); /* Only if the prerequisites are present */ #if defined(_SYS_BUS_H_) && defined(_SYS_PCIIO_H_) struct pci_devinfo { STAILQ_ENTRY(pci_devinfo) pci_links; struct resource_list resources; pcicfgregs cfg; struct pci_conf conf; }; #endif #ifdef _SYS_BUS_H_ #include "pci_if.h" enum pci_device_ivars { PCI_IVAR_SUBVENDOR, PCI_IVAR_SUBDEVICE, PCI_IVAR_VENDOR, PCI_IVAR_DEVICE, PCI_IVAR_DEVID, PCI_IVAR_CLASS, PCI_IVAR_SUBCLASS, PCI_IVAR_PROGIF, PCI_IVAR_REVID, PCI_IVAR_INTPIN, PCI_IVAR_IRQ, PCI_IVAR_DOMAIN, PCI_IVAR_BUS, PCI_IVAR_SLOT, PCI_IVAR_FUNCTION, PCI_IVAR_ETHADDR, PCI_IVAR_CMDREG, PCI_IVAR_CACHELNSZ, PCI_IVAR_MINGNT, PCI_IVAR_MAXLAT, PCI_IVAR_LATTIMER }; /* * Simplified accessors for pci devices */ #define PCI_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pci, var, PCI, ivar, type) PCI_ACCESSOR(subvendor, SUBVENDOR, uint16_t) PCI_ACCESSOR(subdevice, SUBDEVICE, uint16_t) PCI_ACCESSOR(vendor, VENDOR, uint16_t) PCI_ACCESSOR(device, DEVICE, uint16_t) PCI_ACCESSOR(devid, DEVID, uint32_t) PCI_ACCESSOR(class, CLASS, uint8_t) PCI_ACCESSOR(subclass, SUBCLASS, uint8_t) PCI_ACCESSOR(progif, PROGIF, uint8_t) PCI_ACCESSOR(revid, REVID, uint8_t) PCI_ACCESSOR(intpin, INTPIN, uint8_t) PCI_ACCESSOR(irq, IRQ, uint8_t) PCI_ACCESSOR(domain, DOMAIN, uint32_t) PCI_ACCESSOR(bus, BUS, uint8_t) PCI_ACCESSOR(slot, SLOT, uint8_t) PCI_ACCESSOR(function, FUNCTION, uint8_t) PCI_ACCESSOR(ether, ETHADDR, uint8_t *) PCI_ACCESSOR(cmdreg, CMDREG, uint8_t) PCI_ACCESSOR(cachelnsz, CACHELNSZ, uint8_t) PCI_ACCESSOR(mingnt, MINGNT, uint8_t) PCI_ACCESSOR(maxlat, MAXLAT, uint8_t) PCI_ACCESSOR(lattimer, LATTIMER, uint8_t) #undef PCI_ACCESSOR /* * Operations on configuration space. */ static __inline uint32_t pci_read_config(device_t dev, int reg, int width) { return PCI_READ_CONFIG(device_get_parent(dev), dev, reg, width); } static __inline void pci_write_config(device_t dev, int reg, uint32_t val, int width) { PCI_WRITE_CONFIG(device_get_parent(dev), dev, reg, val, width); } /* * Ivars for pci bridges. */ /*typedef enum pci_device_ivars pcib_device_ivars;*/ enum pcib_device_ivars { PCIB_IVAR_DOMAIN, PCIB_IVAR_BUS }; #define PCIB_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pcib, var, PCIB, ivar, type) PCIB_ACCESSOR(domain, DOMAIN, uint32_t) PCIB_ACCESSOR(bus, BUS, uint32_t) #undef PCIB_ACCESSOR /* * PCI interrupt validation. Invalid interrupt values such as 0 or 128 * on i386 or other platforms should be mapped out in the MD pcireadconf * code and not here, since the only MI invalid IRQ is 255. */ #define PCI_INVALID_IRQ 255 #define PCI_INTERRUPT_VALID(x) ((x) != PCI_INVALID_IRQ) /* * Convenience functions. * * These should be used in preference to manually manipulating * configuration space. */ static __inline int pci_enable_busmaster(device_t dev) { return(PCI_ENABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_disable_busmaster(device_t dev) { return(PCI_DISABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_enable_io(device_t dev, int space) { return(PCI_ENABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_disable_io(device_t dev, int space) { return(PCI_DISABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_get_vpd_ident(device_t dev, const char **identptr) { return(PCI_GET_VPD_IDENT(device_get_parent(dev), dev, identptr)); } static __inline int pci_get_vpd_readonly(device_t dev, const char *kw, const char **vptr) { return(PCI_GET_VPD_READONLY(device_get_parent(dev), dev, kw, vptr)); } /* * Check if the address range falls within the VGA defined address range(s) */ static __inline int pci_is_vga_ioport_range(rman_res_t start, rman_res_t end) { return (((start >= 0x3b0 && end <= 0x3bb) || (start >= 0x3c0 && end <= 0x3df)) ? 1 : 0); } static __inline int pci_is_vga_memory_range(rman_res_t start, rman_res_t end) { return ((start >= 0xa0000 && end <= 0xbffff) ? 1 : 0); } /* * PCI power states are as defined by ACPI: * * D0 State in which device is on and running. It is receiving full * power from the system and delivering full functionality to the user. * D1 Class-specific low-power state in which device context may or may not * be lost. Buses in D1 cannot do anything to the bus that would force * devices on that bus to lose context. * D2 Class-specific low-power state in which device context may or may * not be lost. Attains greater power savings than D1. Buses in D2 * can cause devices on that bus to lose some context. Devices in D2 * must be prepared for the bus to be in D2 or higher. * D3 State in which the device is off and not running. Device context is * lost. Power can be removed from the device. */ #define PCI_POWERSTATE_D0 0 #define PCI_POWERSTATE_D1 1 #define PCI_POWERSTATE_D2 2 #define PCI_POWERSTATE_D3 3 #define PCI_POWERSTATE_UNKNOWN -1 static __inline int pci_set_powerstate(device_t dev, int state) { return PCI_SET_POWERSTATE(device_get_parent(dev), dev, state); } static __inline int pci_get_powerstate(device_t dev) { return PCI_GET_POWERSTATE(device_get_parent(dev), dev); } static __inline int pci_find_cap(device_t dev, int capability, int *capreg) { return (PCI_FIND_CAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_cap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_CAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_find_extcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_EXTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_extcap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_EXTCAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_find_htcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_HTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_next_htcap(device_t dev, int capability, int start, int *capreg) { return (PCI_FIND_NEXT_HTCAP(device_get_parent(dev), dev, capability, start, capreg)); } static __inline int pci_alloc_msi(device_t dev, int *count) { return (PCI_ALLOC_MSI(device_get_parent(dev), dev, count)); } static __inline int pci_alloc_msix(device_t dev, int *count) { return (PCI_ALLOC_MSIX(device_get_parent(dev), dev, count)); } static __inline void pci_enable_msi(device_t dev, uint64_t address, uint16_t data) { PCI_ENABLE_MSI(device_get_parent(dev), dev, address, data); } static __inline void pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data) { PCI_ENABLE_MSIX(device_get_parent(dev), dev, index, address, data); } static __inline void pci_disable_msi(device_t dev) { PCI_DISABLE_MSI(device_get_parent(dev), dev); } static __inline int pci_remap_msix(device_t dev, int count, const u_int *vectors) { return (PCI_REMAP_MSIX(device_get_parent(dev), dev, count, vectors)); } static __inline int pci_release_msi(device_t dev) { return (PCI_RELEASE_MSI(device_get_parent(dev), dev)); } static __inline int pci_msi_count(device_t dev) { return (PCI_MSI_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_count(device_t dev) { return (PCI_MSIX_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_pba_bar(device_t dev) { return (PCI_MSIX_PBA_BAR(device_get_parent(dev), dev)); } static __inline int pci_msix_table_bar(device_t dev) { return (PCI_MSIX_TABLE_BAR(device_get_parent(dev), dev)); } static __inline int pci_get_id(device_t dev, enum pci_id_type type, uintptr_t *id) { return (PCI_GET_ID(device_get_parent(dev), dev, type, id)); } /* * This is the deprecated interface, there is no way to tell the difference * between a failure and a valid value that happens to be the same as the * failure value. */ static __inline uint16_t pci_get_rid(device_t dev) { uintptr_t rid; if (pci_get_id(dev, PCI_ID_RID, &rid) != 0) return (0); return (rid); } static __inline void pci_child_added(device_t dev) { return (PCI_CHILD_ADDED(device_get_parent(dev), dev)); } device_t pci_find_bsf(uint8_t, uint8_t, uint8_t); device_t pci_find_dbsf(uint32_t, uint8_t, uint8_t, uint8_t); device_t pci_find_device(uint16_t, uint16_t); device_t pci_find_class(uint8_t class, uint8_t subclass); /* Can be used by drivers to manage the MSI-X table. */ int pci_pending_msix(device_t dev, u_int index); int pci_msi_device_blacklisted(device_t dev); int pci_msix_device_blacklisted(device_t dev); void pci_ht_map_msi(device_t dev, uint64_t addr); device_t pci_find_pcie_root_port(device_t dev); int pci_get_max_payload(device_t dev); int pci_get_max_read_req(device_t dev); void pci_restore_state(device_t dev); void pci_save_state(device_t dev); int pci_set_max_read_req(device_t dev, int size); int pci_power_reset(device_t dev); uint32_t pcie_read_config(device_t dev, int reg, int width); void pcie_write_config(device_t dev, int reg, uint32_t value, int width); uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width); bool pcie_flr(device_t dev, u_int max_delay, bool force); int pcie_get_max_completion_timeout(device_t dev); bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay); int pcie_link_reset(device_t port, int pcie_location); void pci_print_faulted_dev(void); -#ifdef BUS_SPACE_MAXADDR -#if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) -#define PCI_DMA_BOUNDARY 0x100000000 -#else -#define PCI_DMA_BOUNDARY 0 -#endif -#endif - #endif /* _SYS_BUS_H_ */ /* * cdev switch for control device, initialised in generic PCI code */ extern struct cdevsw pcicdev; /* * List of all PCI devices, generation count for the list. */ STAILQ_HEAD(devlist, pci_devinfo); extern struct devlist pci_devq; extern uint32_t pci_generation; struct pci_map *pci_find_bar(device_t dev, int reg); int pci_bar_enabled(device_t dev, struct pci_map *pm); struct pcicfg_vpd *pci_fetch_vpd_list(device_t dev); #define VGA_PCI_BIOS_SHADOW_ADDR 0xC0000 #define VGA_PCI_BIOS_SHADOW_SIZE 131072 int vga_pci_is_boot_display(device_t dev); void * vga_pci_map_bios(device_t dev, size_t *size); void vga_pci_unmap_bios(device_t dev, void *bios); int vga_pci_repost(device_t dev); /** * Global eventhandlers invoked when PCI devices are added or removed * from the system. */ typedef void (*pci_event_fn)(void *arg, device_t dev); EVENTHANDLER_DECLARE(pci_add_device, pci_event_fn); EVENTHANDLER_DECLARE(pci_delete_device, pci_event_fn); #endif /* _PCIVAR_H_ */ Index: head/sys/dev/twa/tw_osl.h =================================================================== --- head/sys/dev/twa/tw_osl.h (revision 346385) +++ head/sys/dev/twa/tw_osl.h (revision 346386) @@ -1,320 +1,326 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004-07 Applied Micro Circuits Corporation. * Copyright (c) 2004-05 Vinod Kashyap. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * AMCC'S 3ware driver for 9000 series storage controllers. * * Author: Vinod Kashyap * Modifications by: Adam Radford * Modifications by: Manjunath Ranganathaiah */ #ifndef TW_OSL_H #define TW_OSL_H /* * OS Layer internal macros, structures and functions. */ #define TW_OSLI_DEVICE_NAME "3ware 9000 series Storage Controller" #define TW_OSLI_MALLOC_CLASS M_TWA #define TW_OSLI_MAX_NUM_REQUESTS TW_CL_MAX_SIMULTANEOUS_REQUESTS /* Reserve two command packets. One for ioctls and one for AENs */ #define TW_OSLI_MAX_NUM_IOS (TW_OSLI_MAX_NUM_REQUESTS - 2) #define TW_OSLI_MAX_NUM_AENS 0x100 +#ifdef PAE +#define TW_OSLI_DMA_BOUNDARY (1u << 31) +#else +#define TW_OSLI_DMA_BOUNDARY ((bus_size_t)((uint64_t)1 << 32)) +#endif + /* Possible values of req->state. */ #define TW_OSLI_REQ_STATE_INIT 0x0 /* being initialized */ #define TW_OSLI_REQ_STATE_BUSY 0x1 /* submitted to CL */ #define TW_OSLI_REQ_STATE_PENDING 0x2 /* in pending queue */ #define TW_OSLI_REQ_STATE_COMPLETE 0x3 /* completed by CL */ /* Possible values of req->flags. */ #define TW_OSLI_REQ_FLAGS_DATA_IN (1<<0) /* read request */ #define TW_OSLI_REQ_FLAGS_DATA_OUT (1<<1) /* write request */ #define TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED (1<<2)/* data in ccb is misaligned, have to copy to/from private buffer */ #define TW_OSLI_REQ_FLAGS_MAPPED (1<<3) /* request has been mapped */ #define TW_OSLI_REQ_FLAGS_IN_PROGRESS (1<<4) /* bus_dmamap_load returned EINPROGRESS */ #define TW_OSLI_REQ_FLAGS_PASSTHRU (1<<5) /* pass through request */ #define TW_OSLI_REQ_FLAGS_SLEEPING (1<<6) /* owner sleeping on this cmd */ #define TW_OSLI_REQ_FLAGS_FAILED (1<<7) /* bus_dmamap_load() failed */ #define TW_OSLI_REQ_FLAGS_CCB (1<<8) /* req is ccb. */ #ifdef TW_OSL_DEBUG struct tw_osli_q_stats { TW_UINT32 cur_len; /* current # of items in q */ TW_UINT32 max_len; /* max value reached by q_length */ }; #endif /* TW_OSL_DEBUG */ /* Queues of OSL internal request context packets. */ #define TW_OSLI_FREE_Q 0 /* free q */ #define TW_OSLI_BUSY_Q 1 /* q of reqs submitted to CL */ #define TW_OSLI_Q_COUNT 2 /* total number of queues */ /* Driver's request packet. */ struct tw_osli_req_context { struct tw_cl_req_handle req_handle;/* tag to track req b/w OSL & CL */ struct mtx ioctl_wake_timeout_lock_handle;/* non-spin lock used to detect ioctl timeout */ struct mtx *ioctl_wake_timeout_lock;/* ptr to above lock */ struct twa_softc *ctlr; /* ptr to OSL's controller context */ TW_VOID *data; /* ptr to data being passed to CL */ TW_UINT32 length; /* length of buf being passed to CL */ TW_UINT64 deadline;/* request timeout (in absolute time) */ /* * ptr to, and length of data passed to us from above, in case a buffer * copy was done due to non-compliance to alignment requirements */ TW_VOID *real_data; TW_UINT32 real_length; TW_UINT32 state; /* request state */ TW_UINT32 flags; /* request flags */ /* error encountered before request submission to CL */ TW_UINT32 error_code; /* ptr to orig req for use during callback */ TW_VOID *orig_req; struct tw_cl_link link; /* to link this request in a list */ bus_dmamap_t dma_map;/* DMA map for data */ struct tw_cl_req_packet req_pkt;/* req pkt understood by CL */ }; /* Per-controller structure. */ struct twa_softc { struct tw_cl_ctlr_handle ctlr_handle; struct tw_osli_req_context *req_ctx_buf; /* Controller state. */ TW_UINT8 open; TW_UINT32 flags; TW_INT32 device_id; TW_UINT32 alignment; TW_UINT32 sg_size_factor; TW_VOID *non_dma_mem; TW_VOID *dma_mem; TW_UINT64 dma_mem_phys; /* Request queues and arrays. */ struct tw_cl_link req_q_head[TW_OSLI_Q_COUNT]; struct task deferred_intr_callback;/* taskqueue function */ struct mtx io_lock_handle;/* general purpose lock */ struct mtx *io_lock;/* ptr to general purpose lock */ struct mtx q_lock_handle; /* queue manipulation lock */ struct mtx *q_lock;/* ptr to queue manipulation lock */ struct mtx sim_lock_handle;/* sim lock shared with cam */ struct mtx *sim_lock;/* ptr to sim lock */ struct callout watchdog_callout[2]; /* For command timeout */ TW_UINT32 watchdog_index; #ifdef TW_OSL_DEBUG struct tw_osli_q_stats q_stats[TW_OSLI_Q_COUNT];/* queue statistics */ #endif /* TW_OSL_DEBUG */ device_t bus_dev; /* bus device */ struct cdev *ctrl_dev; /* control device */ struct resource *reg_res; /* register interface window */ TW_INT32 reg_res_id; /* register resource id */ bus_space_handle_t bus_handle; /* bus space handle */ bus_space_tag_t bus_tag; /* bus space tag */ bus_dma_tag_t parent_tag; /* parent DMA tag */ bus_dma_tag_t cmd_tag; /* DMA tag for CL's DMA'able mem */ bus_dma_tag_t dma_tag; /* data buffer DMA tag */ bus_dma_tag_t ioctl_tag; /* ioctl data buffer DMA tag */ bus_dmamap_t cmd_map; /* DMA map for CL's DMA'able mem */ bus_dmamap_t ioctl_map; /* DMA map for ioctl data buffers */ struct resource *irq_res; /* interrupt resource */ TW_INT32 irq_res_id; /* register resource id */ TW_VOID *intr_handle; /* interrupt handle */ struct sysctl_ctx_list sysctl_ctxt; /* sysctl context */ struct sysctl_oid *sysctl_tree; /* sysctl oid */ struct cam_sim *sim; /* sim for this controller */ struct cam_path *path; /* peripheral, path, tgt, lun associated with this controller */ }; /* * Queue primitives. */ #ifdef TW_OSL_DEBUG #define TW_OSLI_Q_INIT(sc, q_type) do { \ (sc)->q_stats[q_type].cur_len = 0; \ (sc)->q_stats[q_type].max_len = 0; \ } while(0) #define TW_OSLI_Q_INSERT(sc, q_type) do { \ struct tw_osli_q_stats *q_stats = &((sc)->q_stats[q_type]); \ \ if (++(q_stats->cur_len) > q_stats->max_len) \ q_stats->max_len = q_stats->cur_len; \ } while(0) #define TW_OSLI_Q_REMOVE(sc, q_type) \ (sc)->q_stats[q_type].cur_len-- #else /* TW_OSL_DEBUG */ #define TW_OSLI_Q_INIT(sc, q_index) #define TW_OSLI_Q_INSERT(sc, q_index) #define TW_OSLI_Q_REMOVE(sc, q_index) #endif /* TW_OSL_DEBUG */ /* Initialize a queue of requests. */ static __inline TW_VOID tw_osli_req_q_init(struct twa_softc *sc, TW_UINT8 q_type) { TW_CL_Q_INIT(&(sc->req_q_head[q_type])); TW_OSLI_Q_INIT(sc, q_type); } /* Insert the given request at the head of the given queue (q_type). */ static __inline TW_VOID tw_osli_req_q_insert_head(struct tw_osli_req_context *req, TW_UINT8 q_type) { mtx_lock_spin(req->ctlr->q_lock); TW_CL_Q_INSERT_HEAD(&(req->ctlr->req_q_head[q_type]), &(req->link)); TW_OSLI_Q_INSERT(req->ctlr, q_type); mtx_unlock_spin(req->ctlr->q_lock); } /* Insert the given request at the tail of the given queue (q_type). */ static __inline TW_VOID tw_osli_req_q_insert_tail(struct tw_osli_req_context *req, TW_UINT8 q_type) { mtx_lock_spin(req->ctlr->q_lock); TW_CL_Q_INSERT_TAIL(&(req->ctlr->req_q_head[q_type]), &(req->link)); TW_OSLI_Q_INSERT(req->ctlr, q_type); mtx_unlock_spin(req->ctlr->q_lock); } /* Remove and return the request at the head of the given queue (q_type). */ static __inline struct tw_osli_req_context * tw_osli_req_q_remove_head(struct twa_softc *sc, TW_UINT8 q_type) { struct tw_osli_req_context *req = NULL; struct tw_cl_link *link; mtx_lock_spin(sc->q_lock); if ((link = TW_CL_Q_FIRST_ITEM(&(sc->req_q_head[q_type]))) != TW_CL_NULL) { req = TW_CL_STRUCT_HEAD(link, struct tw_osli_req_context, link); TW_CL_Q_REMOVE_ITEM(&(sc->req_q_head[q_type]), &(req->link)); TW_OSLI_Q_REMOVE(sc, q_type); } mtx_unlock_spin(sc->q_lock); return(req); } /* Remove the given request from the given queue (q_type). */ static __inline TW_VOID tw_osli_req_q_remove_item(struct tw_osli_req_context *req, TW_UINT8 q_type) { mtx_lock_spin(req->ctlr->q_lock); TW_CL_Q_REMOVE_ITEM(&(req->ctlr->req_q_head[q_type]), &(req->link)); TW_OSLI_Q_REMOVE(req->ctlr, q_type); mtx_unlock_spin(req->ctlr->q_lock); } #ifdef TW_OSL_DEBUG extern TW_INT32 TW_DEBUG_LEVEL_FOR_OSL; #define tw_osli_dbg_dprintf(dbg_level, sc, fmt, args...) \ if (dbg_level <= TW_DEBUG_LEVEL_FOR_OSL) \ device_printf(sc->bus_dev, "%s: " fmt "\n", \ __func__, ##args) #define tw_osli_dbg_printf(dbg_level, fmt, args...) \ if (dbg_level <= TW_DEBUG_LEVEL_FOR_OSL) \ printf("%s: " fmt "\n", __func__, ##args) #else /* TW_OSL_DEBUG */ #define tw_osli_dbg_dprintf(dbg_level, sc, fmt, args...) #define tw_osli_dbg_printf(dbg_level, fmt, args...) #endif /* TW_OSL_DEBUG */ /* For regular printing. */ #define twa_printf(sc, fmt, args...) \ device_printf(((struct twa_softc *)(sc))->bus_dev, fmt, ##args) /* For printing in the "consistent error reporting" format. */ #define tw_osli_printf(sc, err_specific_desc, args...) \ device_printf((sc)->bus_dev, \ "%s: (0x%02X: 0x%04X): %s: " err_specific_desc "\n", ##args) #endif /* TW_OSL_H */ Index: head/sys/dev/twa/tw_osl_freebsd.c =================================================================== --- head/sys/dev/twa/tw_osl_freebsd.c (revision 346385) +++ head/sys/dev/twa/tw_osl_freebsd.c (revision 346386) @@ -1,1714 +1,1714 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004-07 Applied Micro Circuits Corporation. * Copyright (c) 2004-05 Vinod Kashyap. * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMCC'S 3ware driver for 9000 series storage controllers. * * Author: Vinod Kashyap * Modifications by: Adam Radford * Modifications by: Manjunath Ranganathaiah */ /* * FreeBSD specific functions not related to CAM, and other * miscellaneous functions. */ #include #include #include #include #ifdef TW_OSL_DEBUG TW_INT32 TW_DEBUG_LEVEL_FOR_OSL = TW_OSL_DEBUG; TW_INT32 TW_OSL_DEBUG_LEVEL_FOR_CL = TW_OSL_DEBUG; #endif /* TW_OSL_DEBUG */ static MALLOC_DEFINE(TW_OSLI_MALLOC_CLASS, "twa_commands", "twa commands"); static d_open_t twa_open; static d_close_t twa_close; static d_ioctl_t twa_ioctl; static struct cdevsw twa_cdevsw = { .d_version = D_VERSION, .d_open = twa_open, .d_close = twa_close, .d_ioctl = twa_ioctl, .d_name = "twa", }; static devclass_t twa_devclass; /* * Function name: twa_open * Description: Called when the controller is opened. * Simply marks the controller as open. * * Input: dev -- control device corresponding to the ctlr * flags -- mode of open * fmt -- device type (character/block etc.) * proc -- current process * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_open(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); tw_osli_dbg_dprintf(5, sc, "entered"); sc->open = TW_CL_TRUE; return(0); } /* * Function name: twa_close * Description: Called when the controller is closed. * Simply marks the controller as not open. * * Input: dev -- control device corresponding to the ctlr * flags -- mode of corresponding open * fmt -- device type (character/block etc.) * proc -- current process * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_close(struct cdev *dev, TW_INT32 flags, TW_INT32 fmt, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); tw_osli_dbg_dprintf(5, sc, "entered"); sc->open = TW_CL_FALSE; return(0); } /* * Function name: twa_ioctl * Description: Called when an ioctl is posted to the controller. * Handles any OS Layer specific cmds, passes the rest * on to the Common Layer. * * Input: dev -- control device corresponding to the ctlr * cmd -- ioctl cmd * buf -- ptr to buffer in kernel memory, which is * a copy of the input buffer in user-space * flags -- mode of corresponding open * proc -- current process * Output: buf -- ptr to buffer in kernel memory, which will * be copied to the output buffer in user-space * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_ioctl(struct cdev *dev, u_long cmd, caddr_t buf, TW_INT32 flags, struct thread *proc) { struct twa_softc *sc = (struct twa_softc *)(dev->si_drv1); TW_INT32 error; tw_osli_dbg_dprintf(5, sc, "entered"); switch (cmd) { case TW_OSL_IOCTL_FIRMWARE_PASS_THROUGH: tw_osli_dbg_dprintf(6, sc, "ioctl: fw_passthru"); error = tw_osli_fw_passthru(sc, (TW_INT8 *)buf); break; case TW_OSL_IOCTL_SCAN_BUS: /* Request CAM for a bus scan. */ tw_osli_dbg_dprintf(6, sc, "ioctl: scan bus"); error = tw_osli_request_bus_scan(sc); break; default: tw_osli_dbg_dprintf(6, sc, "ioctl: 0x%lx", cmd); error = tw_cl_ioctl(&sc->ctlr_handle, cmd, buf); break; } return(error); } static TW_INT32 twa_probe(device_t dev); static TW_INT32 twa_attach(device_t dev); static TW_INT32 twa_detach(device_t dev); static TW_INT32 twa_shutdown(device_t dev); static TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op); static TW_VOID twa_pci_intr(TW_VOID *arg); static TW_VOID twa_watchdog(TW_VOID *arg); int twa_setup_intr(struct twa_softc *sc); int twa_teardown_intr(struct twa_softc *sc); static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc); static TW_VOID tw_osli_free_resources(struct twa_softc *sc); static TW_VOID twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); static TW_VOID twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error); static device_method_t twa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, twa_probe), DEVMETHOD(device_attach, twa_attach), DEVMETHOD(device_detach, twa_detach), DEVMETHOD(device_shutdown, twa_shutdown), DEVMETHOD_END }; static driver_t twa_pci_driver = { "twa", twa_methods, sizeof(struct twa_softc) }; DRIVER_MODULE(twa, pci, twa_pci_driver, twa_devclass, 0, 0); MODULE_DEPEND(twa, cam, 1, 1, 1); MODULE_DEPEND(twa, pci, 1, 1, 1); /* * Function name: twa_probe * Description: Called at driver load time. Claims 9000 ctlrs. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: <= 0 -- success * > 0 -- failure */ static TW_INT32 twa_probe(device_t dev) { static TW_UINT8 first_ctlr = 1; tw_osli_dbg_printf(3, "entered"); if (tw_cl_ctlr_supported(pci_get_vendor(dev), pci_get_device(dev))) { device_set_desc(dev, TW_OSLI_DEVICE_NAME); /* Print the driver version only once. */ if (first_ctlr) { printf("3ware device driver for 9000 series storage " "controllers, version: %s\n", TW_OSL_DRIVER_VERSION_STRING); first_ctlr = 0; } return(0); } return(ENXIO); } int twa_setup_intr(struct twa_softc *sc) { int error = 0; if (!(sc->intr_handle) && (sc->irq_res)) { error = bus_setup_intr(sc->bus_dev, sc->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, twa_pci_intr, sc, &sc->intr_handle); } return( error ); } int twa_teardown_intr(struct twa_softc *sc) { int error = 0; if ((sc->intr_handle) && (sc->irq_res)) { error = bus_teardown_intr(sc->bus_dev, sc->irq_res, sc->intr_handle); sc->intr_handle = NULL; } return( error ); } /* * Function name: twa_attach * Description: Allocates pci resources; updates sc; adds a node to the * sysctl tree to expose the driver version; makes calls * (to the Common Layer) to initialize ctlr, and to * attach to CAM. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_attach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 bar_num; TW_INT32 bar0_offset; TW_INT32 bar_size; TW_INT32 error; tw_osli_dbg_dprintf(3, sc, "entered"); sc->ctlr_handle.osl_ctlr_ctxt = sc; /* Initialize the softc structure. */ sc->bus_dev = dev; sc->device_id = pci_get_device(dev); /* Initialize the mutexes right here. */ sc->io_lock = &(sc->io_lock_handle); mtx_init(sc->io_lock, "tw_osl_io_lock", NULL, MTX_SPIN); sc->q_lock = &(sc->q_lock_handle); mtx_init(sc->q_lock, "tw_osl_q_lock", NULL, MTX_SPIN); sc->sim_lock = &(sc->sim_lock_handle); mtx_init(sc->sim_lock, "tw_osl_sim_lock", NULL, MTX_DEF | MTX_RECURSE); sysctl_ctx_init(&sc->sysctl_ctxt); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctxt, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if (sc->sysctl_tree == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2000, "Cannot add sysctl tree node", ENXIO); return(ENXIO); } SYSCTL_ADD_STRING(&sc->sysctl_ctxt, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); /* Force the busmaster enable bit on, in case the BIOS forgot. */ pci_enable_busmaster(dev); /* Allocate the PCI register window. */ if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, &bar_num, &bar0_offset, &bar_size))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201F, "Can't get PCI BAR info", error); tw_osli_free_resources(sc); return(error); } sc->reg_res_id = PCIR_BARS + bar0_offset; if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2002, "Can't allocate register window", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Allocate and register our interrupt. */ sc->irq_res_id = 0; if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ, &(sc->irq_res_id), RF_SHAREABLE | RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2003, "Can't allocate interrupt", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } if ((error = twa_setup_intr(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2004, "Can't set up interrupt", error); tw_osli_free_resources(sc); return(error); } if ((error = tw_osli_alloc_mem(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2005, "Memory allocation failure", error); tw_osli_free_resources(sc); return(error); } /* Initialize the Common Layer for this controller. */ if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, sc->non_dma_mem, sc->dma_mem, sc->dma_mem_phys ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2006, "Failed to initialize Common Layer/controller", error); tw_osli_free_resources(sc); return(error); } /* Create the control device. */ sc->ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->bus_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twa%d", device_get_unit(sc->bus_dev)); sc->ctrl_dev->si_drv1 = sc; if ((error = tw_osli_cam_attach(sc))) { tw_osli_free_resources(sc); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2007, "Failed to initialize CAM", error); return(error); } sc->watchdog_index = 0; callout_init(&(sc->watchdog_callout[0]), 1); callout_init(&(sc->watchdog_callout[1]), 1); callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); return(0); } static TW_VOID twa_watchdog(TW_VOID *arg) { struct tw_cl_ctlr_handle *ctlr_handle = (struct tw_cl_ctlr_handle *)arg; struct twa_softc *sc = ctlr_handle->osl_ctlr_ctxt; int i; int i_need_a_reset = 0; int driver_is_active = 0; int my_watchdog_was_pending = 1234; TW_UINT64 current_time; struct tw_osli_req_context *my_req; //============================================================================== current_time = (TW_UINT64) (tw_osl_get_local_time()); for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { my_req = &(sc->req_ctx_buf[i]); if ((my_req->state == TW_OSLI_REQ_STATE_BUSY) && (my_req->deadline) && (my_req->deadline < current_time)) { tw_cl_set_reset_needed(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Request %d timed out! d = %llu, c = %llu\n", i, my_req->deadline, current_time); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Request %d timed out!\n", i); #endif /* TW_OSL_DEBUG */ break; } } //============================================================================== i_need_a_reset = tw_cl_is_reset_needed(ctlr_handle); i = (int) ((sc->watchdog_index++) & 1); driver_is_active = tw_cl_is_active(ctlr_handle); if (i_need_a_reset) { #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog rescheduled in 70 seconds\n"); #endif /* TW_OSL_DEBUG */ my_watchdog_was_pending = callout_reset(&(sc->watchdog_callout[i]), 70*hz, twa_watchdog, &sc->ctlr_handle); tw_cl_reset_ctlr(ctlr_handle); #ifdef TW_OSL_DEBUG device_printf((sc)->bus_dev, "Watchdog reset completed!\n"); #endif /* TW_OSL_DEBUG */ } else if (driver_is_active) { my_watchdog_was_pending = callout_reset(&(sc->watchdog_callout[i]), 5*hz, twa_watchdog, &sc->ctlr_handle); } #ifdef TW_OSL_DEBUG if (i_need_a_reset || my_watchdog_was_pending) device_printf((sc)->bus_dev, "i_need_a_reset = %d, " "driver_is_active = %d, my_watchdog_was_pending = %d\n", i_need_a_reset, driver_is_active, my_watchdog_was_pending); #endif /* TW_OSL_DEBUG */ } /* * Function name: tw_osli_alloc_mem * Description: Allocates memory needed both by CL and OSL. * * Input: sc -- OSL internal controller context * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 tw_osli_alloc_mem(struct twa_softc *sc) { struct tw_osli_req_context *req; TW_UINT32 max_sg_elements; TW_UINT32 non_dma_mem_size; TW_UINT32 dma_mem_size; TW_INT32 error; TW_INT32 i; tw_osli_dbg_dprintf(3, sc, "entered"); sc->flags |= (sizeof(bus_addr_t) == 8) ? TW_CL_64BIT_ADDRESSES : 0; sc->flags |= (sizeof(bus_size_t) == 8) ? TW_CL_64BIT_SG_LENGTH : 0; max_sg_elements = (sizeof(bus_addr_t) == 8) ? TW_CL_MAX_64BIT_SG_ELEMENTS : TW_CL_MAX_32BIT_SG_ELEMENTS; if ((error = tw_cl_get_mem_requirements(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, &(sc->alignment), &(sc->sg_size_factor), &non_dma_mem_size, &dma_mem_size ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2008, "Can't get Common Layer's memory requirements", error); return(error); } if ((sc->non_dma_mem = malloc(non_dma_mem_size, TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2009, "Can't allocate non-dma memory", ENOMEM); return(ENOMEM); } /* Create the parent dma tag. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->bus_dev), /* parent */ sc->alignment, /* alignment */ - 0, /* boundary */ + TW_OSLI_DMA_BOUNDARY, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->parent_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200A, "Can't allocate parent DMA tag", ENOMEM); return(ENOMEM); } /* Create a dma tag for Common Layer's DMA'able memory (dma_mem). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_mem_size, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->cmd_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200B, "Can't allocate DMA tag for Common Layer's " "DMA'able memory", ENOMEM); return(ENOMEM); } if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { /* Try a second time. */ if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem, BUS_DMA_NOWAIT, &sc->cmd_map)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200C, "Can't allocate DMA'able memory for the" "Common Layer", ENOMEM); return(ENOMEM); } } bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem, dma_mem_size, twa_map_load_callback, &sc->dma_mem_phys, 0); /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ twa_busdma_lock, /* lockfunc */ sc->io_lock, /* lockfuncarg */ &sc->dma_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x200F, "Can't allocate DMA tag for data buffers", ENOMEM); return(ENOMEM); } /* * Create a dma tag for ioctl data buffers; size will be the maximum * possible I/O size (128kB). */ if (bus_dma_tag_create(sc->parent_tag, /* parent */ sc->alignment, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TW_CL_MAX_IO_SIZE, /* maxsize */ max_sg_elements, /* nsegments */ TW_CL_MAX_IO_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ twa_busdma_lock, /* lockfunc */ sc->io_lock, /* lockfuncarg */ &sc->ioctl_tag /* tag */)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2010, "Can't allocate DMA tag for ioctl data buffers", ENOMEM); return(ENOMEM); } /* Create just one map for all ioctl request data buffers. */ if (bus_dmamap_create(sc->ioctl_tag, 0, &sc->ioctl_map)) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2011, "Can't create ioctl map", ENOMEM); return(ENOMEM); } /* Initialize request queues. */ tw_osli_req_q_init(sc, TW_OSLI_FREE_Q); tw_osli_req_q_init(sc, TW_OSLI_BUSY_Q); if ((sc->req_ctx_buf = (struct tw_osli_req_context *) malloc((sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS), TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2012, "Failed to allocate request packets", ENOMEM); return(ENOMEM); } bzero(sc->req_ctx_buf, sizeof(struct tw_osli_req_context) * TW_OSLI_MAX_NUM_REQUESTS); for (i = 0; i < TW_OSLI_MAX_NUM_REQUESTS; i++) { req = &(sc->req_ctx_buf[i]); req->ctlr = sc; if (bus_dmamap_create(sc->dma_tag, 0, &req->dma_map)) { tw_osli_printf(sc, "request # = %d, error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2013, "Can't create dma map", i, ENOMEM); return(ENOMEM); } /* Initialize the ioctl wakeup/ timeout mutex */ req->ioctl_wake_timeout_lock = &(req->ioctl_wake_timeout_lock_handle); mtx_init(req->ioctl_wake_timeout_lock, "tw_ioctl_wake_timeout_lock", NULL, MTX_DEF); /* Insert request into the free queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); } return(0); } /* * Function name: tw_osli_free_resources * Description: Performs clean-up at the time of going down. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: None */ static TW_VOID tw_osli_free_resources(struct twa_softc *sc) { struct tw_osli_req_context *req; TW_INT32 error = 0; tw_osli_dbg_dprintf(3, sc, "entered"); /* Detach from CAM */ tw_osli_cam_detach(sc); if (sc->req_ctx_buf) while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) != NULL) { mtx_destroy(req->ioctl_wake_timeout_lock); if ((error = bus_dmamap_destroy(sc->dma_tag, req->dma_map))) tw_osli_dbg_dprintf(1, sc, "dmamap_destroy(dma) returned %d", error); } if ((sc->ioctl_tag) && (sc->ioctl_map)) if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map))) tw_osli_dbg_dprintf(1, sc, "dmamap_destroy(ioctl) returned %d", error); /* Free all memory allocated so far. */ if (sc->req_ctx_buf) free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS); if (sc->non_dma_mem) free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS); if (sc->dma_mem) { bus_dmamap_unload(sc->cmd_tag, sc->cmd_map); bus_dmamem_free(sc->cmd_tag, sc->dma_mem, sc->cmd_map); } if (sc->cmd_tag) if ((error = bus_dma_tag_destroy(sc->cmd_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(cmd) returned %d", error); if (sc->dma_tag) if ((error = bus_dma_tag_destroy(sc->dma_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(dma) returned %d", error); if (sc->ioctl_tag) if ((error = bus_dma_tag_destroy(sc->ioctl_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(ioctl) returned %d", error); if (sc->parent_tag) if ((error = bus_dma_tag_destroy(sc->parent_tag))) tw_osli_dbg_dprintf(1, sc, "dma_tag_destroy(parent) returned %d", error); /* Disconnect the interrupt handler. */ if ((error = twa_teardown_intr(sc))) tw_osli_dbg_dprintf(1, sc, "teardown_intr returned %d", error); if (sc->irq_res != NULL) if ((error = bus_release_resource(sc->bus_dev, SYS_RES_IRQ, sc->irq_res_id, sc->irq_res))) tw_osli_dbg_dprintf(1, sc, "release_resource(irq) returned %d", error); /* Release the register window mapping. */ if (sc->reg_res != NULL) if ((error = bus_release_resource(sc->bus_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))) tw_osli_dbg_dprintf(1, sc, "release_resource(io) returned %d", error); /* Destroy the control device. */ if (sc->ctrl_dev != (struct cdev *)NULL) destroy_dev(sc->ctrl_dev); if ((error = sysctl_ctx_free(&sc->sysctl_ctxt))) tw_osli_dbg_dprintf(1, sc, "sysctl_ctx_free returned %d", error); } /* * Function name: twa_detach * Description: Called when the controller is being detached from * the pci bus. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_detach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 error; tw_osli_dbg_dprintf(3, sc, "entered"); error = EBUSY; if (sc->open) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2014, "Device open", error); goto out; } /* Shut the controller down. */ if ((error = twa_shutdown(dev))) goto out; /* Free all resources associated with this controller. */ tw_osli_free_resources(sc); error = 0; out: return(error); } /* * Function name: twa_shutdown * Description: Called at unload/shutdown time. Lets the controller * know that we are going down. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_shutdown(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 error = 0; tw_osli_dbg_dprintf(3, sc, "entered"); /* Disconnect interrupts. */ error = twa_teardown_intr(sc); /* Stop watchdog task. */ callout_drain(&(sc->watchdog_callout[0])); callout_drain(&(sc->watchdog_callout[1])); /* Disconnect from the controller. */ if ((error = tw_cl_shutdown_ctlr(&(sc->ctlr_handle), 0))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2015, "Failed to shutdown Common Layer/controller", error); } return(error); } /* * Function name: twa_busdma_lock * Description: Function to provide synchronization during busdma_swi. * * Input: lock_arg -- lock mutex sent as argument * op -- operation (lock/unlock) expected of the function * Output: None * Return value: None */ TW_VOID twa_busdma_lock(TW_VOID *lock_arg, bus_dma_lock_op_t op) { struct mtx *lock; lock = (struct mtx *)lock_arg; switch (op) { case BUS_DMA_LOCK: mtx_lock_spin(lock); break; case BUS_DMA_UNLOCK: mtx_unlock_spin(lock); break; default: panic("Unknown operation 0x%x for twa_busdma_lock!", op); } } /* * Function name: twa_pci_intr * Description: Interrupt handler. Wrapper for twa_interrupt. * * Input: arg -- ptr to OSL internal ctlr context * Output: None * Return value: None */ static TW_VOID twa_pci_intr(TW_VOID *arg) { struct twa_softc *sc = (struct twa_softc *)arg; tw_osli_dbg_dprintf(10, sc, "entered"); tw_cl_interrupt(&(sc->ctlr_handle)); } /* * Function name: tw_osli_fw_passthru * Description: Builds a fw passthru cmd pkt, and submits it to CL. * * Input: sc -- ptr to OSL internal ctlr context * buf -- ptr to ioctl pkt understood by CL * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_fw_passthru(struct twa_softc *sc, TW_INT8 *buf) { struct tw_osli_req_context *req; struct tw_osli_ioctl_no_data_buf *user_buf = (struct tw_osli_ioctl_no_data_buf *)buf; TW_TIME end_time; TW_UINT32 timeout = 60; TW_UINT32 data_buf_size_adjusted; struct tw_cl_req_packet *req_pkt; struct tw_cl_passthru_req_packet *pt_req; TW_INT32 error; tw_osli_dbg_dprintf(5, sc, "ioctl: passthru"); if ((req = tw_osli_get_request(sc)) == NULL) return(EBUSY); req->req_handle.osl_req_ctxt = req; req->orig_req = buf; req->flags |= TW_OSLI_REQ_FLAGS_PASSTHRU; req_pkt = &(req->req_pkt); req_pkt->status = 0; req_pkt->tw_osl_callback = tw_osl_complete_passthru; /* Let the Common Layer retry the request on cmd queue full. */ req_pkt->flags |= TW_CL_REQ_RETRY_ON_BUSY; pt_req = &(req_pkt->gen_req_pkt.pt_req); /* * Make sure that the data buffer sent to firmware is a * 512 byte multiple in size. */ data_buf_size_adjusted = (user_buf->driver_pkt.buffer_length + (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); if ((req->length = data_buf_size_adjusted)) { if ((req->data = malloc(data_buf_size_adjusted, TW_OSLI_MALLOC_CLASS, M_WAITOK)) == NULL) { error = ENOMEM; tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2016, "Could not alloc mem for " "fw_passthru data_buf", error); goto fw_passthru_err; } /* Copy the payload. */ if ((error = copyin((TW_VOID *)(user_buf->pdata), req->data, user_buf->driver_pkt.buffer_length)) != 0) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2017, "Could not copyin fw_passthru data_buf", error); goto fw_passthru_err; } pt_req->sgl_entries = 1; /* will be updated during mapping */ req->flags |= (TW_OSLI_REQ_FLAGS_DATA_IN | TW_OSLI_REQ_FLAGS_DATA_OUT); } else pt_req->sgl_entries = 0; /* no payload */ pt_req->cmd_pkt = (TW_VOID *)(&(user_buf->cmd_pkt)); pt_req->cmd_pkt_length = sizeof(struct tw_cl_command_packet); if ((error = tw_osli_map_request(req))) goto fw_passthru_err; end_time = tw_osl_get_local_time() + timeout; while (req->state != TW_OSLI_REQ_STATE_COMPLETE) { mtx_lock(req->ioctl_wake_timeout_lock); req->flags |= TW_OSLI_REQ_FLAGS_SLEEPING; error = mtx_sleep(req, req->ioctl_wake_timeout_lock, 0, "twa_passthru", timeout*hz); mtx_unlock(req->ioctl_wake_timeout_lock); if (!(req->flags & TW_OSLI_REQ_FLAGS_SLEEPING)) error = 0; req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; if (! error) { if (((error = req->error_code)) || ((error = (req->state != TW_OSLI_REQ_STATE_COMPLETE))) || ((error = req_pkt->status))) goto fw_passthru_err; break; } if (req_pkt->status) { error = req_pkt->status; goto fw_passthru_err; } if (error == EWOULDBLOCK) { /* Time out! */ if ((!(req->error_code)) && (req->state == TW_OSLI_REQ_STATE_COMPLETE) && (!(req_pkt->status)) ) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x7777, "FALSE Passthru timeout!", req); #endif /* TW_OSL_DEBUG */ error = 0; /* False error */ break; } if (!(tw_cl_is_reset_needed(&(req->ctlr->ctlr_handle)))) { #ifdef TW_OSL_DEBUG tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2018, "Passthru request timed out!", req); #else /* TW_OSL_DEBUG */ device_printf((sc)->bus_dev, "Passthru request timed out!\n"); #endif /* TW_OSL_DEBUG */ tw_cl_reset_ctlr(&(req->ctlr->ctlr_handle)); } error = 0; end_time = tw_osl_get_local_time() + timeout; continue; /* * Don't touch req after a reset. It (and any * associated data) will be * unmapped by the callback. */ } /* * Either the request got completed, or we were woken up by a * signal. Calculate the new timeout, in case it was the latter. */ timeout = (end_time - tw_osl_get_local_time()); } /* End of while loop */ /* If there was a payload, copy it back. */ if ((!error) && (req->length)) if ((error = copyout(req->data, user_buf->pdata, user_buf->driver_pkt.buffer_length))) tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2019, "Could not copyout fw_passthru data_buf", error); fw_passthru_err: if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) error = EBUSY; user_buf->driver_pkt.os_status = error; /* Free resources. */ if (req->data) free(req->data, TW_OSLI_MALLOC_CLASS); tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); return(error); } /* * Function name: tw_osl_complete_passthru * Description: Called to complete passthru requests. * * Input: req_handle -- ptr to request handle * Output: None * Return value: None */ TW_VOID tw_osl_complete_passthru(struct tw_cl_req_handle *req_handle) { struct tw_osli_req_context *req = req_handle->osl_req_ctxt; struct tw_cl_req_packet *req_pkt = (struct tw_cl_req_packet *)(&req->req_pkt); struct twa_softc *sc = req->ctlr; tw_osli_dbg_dprintf(5, sc, "entered"); if (req->state != TW_OSLI_REQ_STATE_BUSY) { tw_osli_printf(sc, "request = %p, status = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201B, "Unposted command completed!!", req, req->state); } /* * Remove request from the busy queue. Just mark it complete. * There's no need to move it into the complete queue as we are * going to be done with it right now. */ req->state = TW_OSLI_REQ_STATE_COMPLETE; tw_osli_req_q_remove_item(req, TW_OSLI_BUSY_Q); tw_osli_unmap_request(req); /* * Don't do a wake up if there was an error even before the request * was sent down to the Common Layer, and we hadn't gotten an * EINPROGRESS. The request originator will then be returned an * error, and he can do the clean-up. */ if ((req->error_code) && (!(req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS))) return; if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { if (req->flags & TW_OSLI_REQ_FLAGS_SLEEPING) { /* Wake up the sleeping command originator. */ tw_osli_dbg_dprintf(5, sc, "Waking up originator of request %p", req); req->flags &= ~TW_OSLI_REQ_FLAGS_SLEEPING; wakeup_one(req); } else { /* * If the request completed even before mtx_sleep * was called, simply return. */ if (req->flags & TW_OSLI_REQ_FLAGS_MAPPED) return; if (req_pkt->status == TW_CL_ERR_REQ_BUS_RESET) return; tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201C, "Passthru callback called, " "and caller not sleeping", req); } } else { tw_osli_printf(sc, "request = %p", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201D, "Passthru callback called for non-passthru request", req); } } /* * Function name: tw_osli_get_request * Description: Gets a request pkt from the free queue. * * Input: sc -- ptr to OSL internal ctlr context * Output: None * Return value: ptr to request pkt -- success * NULL -- failure */ struct tw_osli_req_context * tw_osli_get_request(struct twa_softc *sc) { struct tw_osli_req_context *req; tw_osli_dbg_dprintf(4, sc, "entered"); /* Get a free request packet. */ req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q); /* Initialize some fields to their defaults. */ if (req) { req->req_handle.osl_req_ctxt = NULL; req->req_handle.cl_req_ctxt = NULL; req->req_handle.is_io = 0; req->data = NULL; req->length = 0; req->deadline = 0; req->real_data = NULL; req->real_length = 0; req->state = TW_OSLI_REQ_STATE_INIT;/* req being initialized */ req->flags = 0; req->error_code = 0; req->orig_req = NULL; bzero(&(req->req_pkt), sizeof(struct tw_cl_req_packet)); } return(req); } /* * Function name: twa_map_load_data_callback * Description: Callback of bus_dmamap_load for the buffer associated * with data. Updates the cmd pkt (size/sgl_entries * fields, as applicable) to reflect the number of sg * elements. * * Input: arg -- ptr to OSL internal request context * segs -- ptr to a list of segment descriptors * nsegments--# of segments * error -- 0 if no errors encountered before callback, * non-zero if errors were encountered * Output: None * Return value: None */ static TW_VOID twa_map_load_data_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error) { struct tw_osli_req_context *req = (struct tw_osli_req_context *)arg; struct twa_softc *sc = req->ctlr; struct tw_cl_req_packet *req_pkt = &(req->req_pkt); tw_osli_dbg_dprintf(10, sc, "entered"); if (error == EINVAL) { req->error_code = error; return; } /* Mark the request as currently being processed. */ req->state = TW_OSLI_REQ_STATE_BUSY; /* Move the request into the busy queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); req->flags |= TW_OSLI_REQ_FLAGS_MAPPED; if (error == EFBIG) { req->error_code = error; goto out; } if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { struct tw_cl_passthru_req_packet *pt_req; if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_PREREAD); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { /* * If we're using an alignment buffer, and we're * writing data, copy the real data out. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->real_data, req->data, req->real_length); bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_PREWRITE); } pt_req = &(req_pkt->gen_req_pkt.pt_req); pt_req->sg_list = (TW_UINT8 *)segs; pt_req->sgl_entries += (nsegments - 1); error = tw_cl_fw_passthru(&(sc->ctlr_handle), req_pkt, &(req->req_handle)); } else { struct tw_cl_scsi_req_packet *scsi_req; if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_PREREAD); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) { /* * If we're using an alignment buffer, and we're * writing data, copy the real data out. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->real_data, req->data, req->real_length); bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_PREWRITE); } scsi_req = &(req_pkt->gen_req_pkt.scsi_req); scsi_req->sg_list = (TW_UINT8 *)segs; scsi_req->sgl_entries += (nsegments - 1); error = tw_cl_start_io(&(sc->ctlr_handle), req_pkt, &(req->req_handle)); } out: if (error) { req->error_code = error; req_pkt->tw_osl_callback(&(req->req_handle)); /* * If the caller had been returned EINPROGRESS, and he has * registered a callback for handling completion, the callback * will never get called because we were unable to submit the * request. So, free up the request right here. */ if (req->flags & TW_OSLI_REQ_FLAGS_IN_PROGRESS) tw_osli_req_q_insert_tail(req, TW_OSLI_FREE_Q); } } /* * Function name: twa_map_load_callback * Description: Callback of bus_dmamap_load for the buffer associated * with a cmd pkt. * * Input: arg -- ptr to variable to hold phys addr * segs -- ptr to a list of segment descriptors * nsegments--# of segments * error -- 0 if no errors encountered before callback, * non-zero if errors were encountered * Output: None * Return value: None */ static TW_VOID twa_map_load_callback(TW_VOID *arg, bus_dma_segment_t *segs, TW_INT32 nsegments, TW_INT32 error) { *((bus_addr_t *)arg) = segs[0].ds_addr; } /* * Function name: tw_osli_map_request * Description: Maps a cmd pkt and data associated with it, into * DMA'able memory. * * Input: req -- ptr to request pkt * Output: None * Return value: 0 -- success * non-zero-- failure */ TW_INT32 tw_osli_map_request(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; TW_INT32 error = 0; tw_osli_dbg_dprintf(10, sc, "entered"); /* If the command involves data, map that too. */ if (req->data != NULL) { /* * It's sufficient for the data pointer to be 4-byte aligned * to work with 9000. However, if 4-byte aligned addresses * are passed to bus_dmamap_load, we can get back sg elements * that are not 512-byte multiples in size. So, we will let * only those buffers that are 512-byte aligned to pass * through, and bounce the rest, so as to make sure that we * always get back sg elements that are 512-byte multiples * in size. */ if (((vm_offset_t)req->data % sc->sg_size_factor) || (req->length % sc->sg_size_factor)) { req->flags |= TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED; /* Save original data pointer and length. */ req->real_data = req->data; req->real_length = req->length; req->length = (req->length + (sc->sg_size_factor - 1)) & ~(sc->sg_size_factor - 1); req->data = malloc(req->length, TW_OSLI_MALLOC_CLASS, M_NOWAIT); if (req->data == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201E, "Failed to allocate memory " "for bounce buffer", ENOMEM); /* Restore original data pointer and length. */ req->data = req->real_data; req->length = req->real_length; return(ENOMEM); } } /* * Map the data buffer into bus space and build the SG list. */ if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { /* Lock against multiple simultaneous ioctl calls. */ mtx_lock_spin(sc->io_lock); error = bus_dmamap_load(sc->ioctl_tag, sc->ioctl_map, req->data, req->length, twa_map_load_data_callback, req, BUS_DMA_WAITOK); mtx_unlock_spin(sc->io_lock); } else if (req->flags & TW_OSLI_REQ_FLAGS_CCB) { error = bus_dmamap_load_ccb(sc->dma_tag, req->dma_map, req->orig_req, twa_map_load_data_callback, req, BUS_DMA_WAITOK); } else { /* * There's only one CAM I/O thread running at a time. * So, there's no need to hold the io_lock. */ error = bus_dmamap_load(sc->dma_tag, req->dma_map, req->data, req->length, twa_map_load_data_callback, req, BUS_DMA_WAITOK); } if (!error) error = req->error_code; else { if (error == EINPROGRESS) { /* * Specifying sc->io_lock as the lockfuncarg * in ...tag_create should protect the access * of ...FLAGS_MAPPED from the callback. */ mtx_lock_spin(sc->io_lock); if (!(req->flags & TW_OSLI_REQ_FLAGS_MAPPED)) req->flags |= TW_OSLI_REQ_FLAGS_IN_PROGRESS; tw_osli_disallow_new_requests(sc, &(req->req_handle)); mtx_unlock_spin(sc->io_lock); error = 0; } else { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x9999, "Failed to map DMA memory " "for I/O request", error); req->flags |= TW_OSLI_REQ_FLAGS_FAILED; /* Free alignment buffer if it was used. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { free(req->data, TW_OSLI_MALLOC_CLASS); /* * Restore original data pointer * and length. */ req->data = req->real_data; req->length = req->real_length; } } } } else { /* Mark the request as currently being processed. */ req->state = TW_OSLI_REQ_STATE_BUSY; /* Move the request into the busy queue. */ tw_osli_req_q_insert_tail(req, TW_OSLI_BUSY_Q); if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) error = tw_cl_fw_passthru(&sc->ctlr_handle, &(req->req_pkt), &(req->req_handle)); else error = tw_cl_start_io(&sc->ctlr_handle, &(req->req_pkt), &(req->req_handle)); if (error) { req->error_code = error; req->req_pkt.tw_osl_callback(&(req->req_handle)); } } return(error); } /* * Function name: tw_osli_unmap_request * Description: Undoes the mapping done by tw_osli_map_request. * * Input: req -- ptr to request pkt * Output: None * Return value: None */ TW_VOID tw_osli_unmap_request(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; tw_osli_dbg_dprintf(10, sc, "entered"); /* If the command involved data, unmap that too. */ if (req->data != NULL) { if (req->flags & TW_OSLI_REQ_FLAGS_PASSTHRU) { /* Lock against multiple simultaneous ioctl calls. */ mtx_lock_spin(sc->io_lock); if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_POSTREAD); /* * If we are using a bounce buffer, and we are * reading data, copy the real data in. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->data, req->real_data, req->real_length); } if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) bus_dmamap_sync(sc->ioctl_tag, sc->ioctl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ioctl_tag, sc->ioctl_map); mtx_unlock_spin(sc->io_lock); } else { if (req->flags & TW_OSLI_REQ_FLAGS_DATA_IN) { bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_POSTREAD); /* * If we are using a bounce buffer, and we are * reading data, copy the real data in. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) bcopy(req->data, req->real_data, req->real_length); } if (req->flags & TW_OSLI_REQ_FLAGS_DATA_OUT) bus_dmamap_sync(sc->dma_tag, req->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dma_tag, req->dma_map); } } /* Free alignment buffer if it was used. */ if (req->flags & TW_OSLI_REQ_FLAGS_DATA_COPY_NEEDED) { free(req->data, TW_OSLI_MALLOC_CLASS); /* Restore original data pointer and length. */ req->data = req->real_data; req->length = req->real_length; } } #ifdef TW_OSL_DEBUG TW_VOID twa_report_stats(TW_VOID); TW_VOID twa_reset_stats(TW_VOID); TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc); TW_VOID twa_print_req_info(struct tw_osli_req_context *req); /* * Function name: twa_report_stats * Description: For being called from ddb. Calls functions that print * OSL and CL internal stats for the controller. * * Input: None * Output: None * Return value: None */ TW_VOID twa_report_stats(TW_VOID) { struct twa_softc *sc; TW_INT32 i; for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { tw_osli_print_ctlr_stats(sc); tw_cl_print_ctlr_stats(&sc->ctlr_handle); } } /* * Function name: tw_osli_print_ctlr_stats * Description: For being called from ddb. Prints OSL controller stats * * Input: sc -- ptr to OSL internal controller context * Output: None * Return value: None */ TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc) { twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc); twa_printf(sc, "OSLq type current max\n"); twa_printf(sc, "free %04d %04d\n", sc->q_stats[TW_OSLI_FREE_Q].cur_len, sc->q_stats[TW_OSLI_FREE_Q].max_len); twa_printf(sc, "busy %04d %04d\n", sc->q_stats[TW_OSLI_BUSY_Q].cur_len, sc->q_stats[TW_OSLI_BUSY_Q].max_len); } /* * Function name: twa_print_req_info * Description: For being called from ddb. Calls functions that print * OSL and CL internal details for the request. * * Input: req -- ptr to OSL internal request context * Output: None * Return value: None */ TW_VOID twa_print_req_info(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; twa_printf(sc, "OSL details for request:\n"); twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n" "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n" "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n" "next_req = %p, prev_req = %p, dma_map = %p\n", req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt, req->data, req->length, req->real_data, req->real_length, req->state, req->flags, req->error_code, req->orig_req, req->link.next, req->link.prev, req->dma_map); tw_cl_print_req_info(&(req->req_handle)); } /* * Function name: twa_reset_stats * Description: For being called from ddb. * Resets some OSL controller stats. * * Input: None * Output: None * Return value: None */ TW_VOID twa_reset_stats(TW_VOID) { struct twa_softc *sc; TW_INT32 i; for (i = 0; (sc = devclass_get_softc(twa_devclass, i)) != NULL; i++) { sc->q_stats[TW_OSLI_FREE_Q].max_len = 0; sc->q_stats[TW_OSLI_BUSY_Q].max_len = 0; tw_cl_reset_stats(&sc->ctlr_handle); } } #endif /* TW_OSL_DEBUG */ Index: head/sys/x86/iommu/intel_ctx.c =================================================================== --- head/sys/x86/iommu/intel_ctx.c (revision 346385) +++ head/sys/x86/iommu/intel_ctx.c (revision 346386) @@ -1,846 +1,846 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_DMAR_CTX, "dmar_ctx", "Intel DMAR Context"); static MALLOC_DEFINE(M_DMAR_DOMAIN, "dmar_dom", "Intel DMAR Domain"); static void dmar_domain_unload_task(void *arg, int pending); static void dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain); static void dmar_domain_destroy(struct dmar_domain *domain); static void dmar_ensure_ctx_page(struct dmar_unit *dmar, int bus) { struct sf_buf *sf; dmar_root_entry_t *re; vm_page_t ctxm; /* * Allocated context page must be linked. */ ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_NOALLOC); if (ctxm != NULL) return; /* * Page not present, allocate and link. Note that other * thread might execute this sequence in parallel. This * should be safe, because the context entries written by both * threads are equal. */ TD_PREP_PINNED_ASSERT; ctxm = dmar_pgalloc(dmar->ctx_obj, 1 + bus, DMAR_PGF_ZERO | DMAR_PGF_WAITOK); re = dmar_map_pgtbl(dmar->ctx_obj, 0, DMAR_PGF_NOALLOC, &sf); re += bus; dmar_pte_store(&re->r1, DMAR_ROOT_R1_P | (DMAR_ROOT_R1_CTP_MASK & VM_PAGE_TO_PHYS(ctxm))); dmar_flush_root_to_ram(dmar, re); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; } static dmar_ctx_entry_t * dmar_map_ctx_entry(struct dmar_ctx *ctx, struct sf_buf **sfp) { dmar_ctx_entry_t *ctxp; ctxp = dmar_map_pgtbl(ctx->domain->dmar->ctx_obj, 1 + PCI_RID2BUS(ctx->rid), DMAR_PGF_NOALLOC | DMAR_PGF_WAITOK, sfp); ctxp += ctx->rid & 0xff; return (ctxp); } static void ctx_tag_init(struct dmar_ctx *ctx, device_t dev) { bus_addr_t maxaddr; maxaddr = MIN(ctx->domain->end, BUS_SPACE_MAXADDR); ctx->ctx_tag.common.ref_count = 1; /* Prevent free */ ctx->ctx_tag.common.impl = &bus_dma_dmar_impl; - ctx->ctx_tag.common.boundary = PCI_DMA_BOUNDARY; + ctx->ctx_tag.common.boundary = 0; ctx->ctx_tag.common.lowaddr = maxaddr; ctx->ctx_tag.common.highaddr = maxaddr; ctx->ctx_tag.common.maxsize = maxaddr; ctx->ctx_tag.common.nsegments = BUS_SPACE_UNRESTRICTED; ctx->ctx_tag.common.maxsegsz = maxaddr; ctx->ctx_tag.ctx = ctx; ctx->ctx_tag.owner = dev; } static void ctx_id_entry_init(struct dmar_ctx *ctx, dmar_ctx_entry_t *ctxp, bool move) { struct dmar_unit *unit; struct dmar_domain *domain; vm_page_t ctx_root; domain = ctx->domain; unit = domain->dmar; KASSERT(move || (ctxp->ctx1 == 0 && ctxp->ctx2 == 0), ("dmar%d: initialized ctx entry %d:%d:%d 0x%jx 0x%jx", unit->unit, pci_get_bus(ctx->ctx_tag.owner), pci_get_slot(ctx->ctx_tag.owner), pci_get_function(ctx->ctx_tag.owner), ctxp->ctx1, ctxp->ctx2)); /* * For update due to move, the store is not atomic. It is * possible that DMAR read upper doubleword, while low * doubleword is not yet updated. The domain id is stored in * the upper doubleword, while the table pointer in the lower. * * There is no good solution, for the same reason it is wrong * to clear P bit in the ctx entry for update. */ dmar_pte_store1(&ctxp->ctx2, DMAR_CTX2_DID(domain->domain) | domain->awlvl); if ((domain->flags & DMAR_DOMAIN_IDMAP) != 0 && (unit->hw_ecap & DMAR_ECAP_PT) != 0) { KASSERT(domain->pgtbl_obj == NULL, ("ctx %p non-null pgtbl_obj", ctx)); dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_PASS | DMAR_CTX1_P); } else { ctx_root = dmar_pgalloc(domain->pgtbl_obj, 0, DMAR_PGF_NOALLOC); dmar_pte_store1(&ctxp->ctx1, DMAR_CTX1_T_UNTR | (DMAR_CTX1_ASR_MASK & VM_PAGE_TO_PHYS(ctx_root)) | DMAR_CTX1_P); } dmar_flush_ctx_to_ram(unit, ctxp); } static int dmar_flush_for_ctx_entry(struct dmar_unit *dmar, bool force) { int error; /* * If dmar declares Caching Mode as Set, follow 11.5 "Caching * Mode Consideration" and do the (global) invalidation of the * negative TLB entries. */ if ((dmar->hw_cap & DMAR_CAP_CM) == 0 && !force) return (0); if (dmar->qi_enabled) { dmar_qi_invalidate_ctx_glob_locked(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force) dmar_qi_invalidate_iotlb_glob_locked(dmar); return (0); } error = dmar_inv_ctx_glob(dmar); if (error == 0 && ((dmar->hw_ecap & DMAR_ECAP_DI) != 0 || force)) error = dmar_inv_iotlb_glob(dmar); return (error); } static int domain_init_rmrr(struct dmar_domain *domain, device_t dev, int bus, int slot, int func, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len) { struct dmar_map_entries_tailq rmrr_entries; struct dmar_map_entry *entry, *entry1; vm_page_t *ma; dmar_gaddr_t start, end; vm_pindex_t size, i; int error, error1; error = 0; TAILQ_INIT(&rmrr_entries); dmar_dev_parse_rmrr(domain, dev_domain, dev_busno, dev_path, dev_path_len, &rmrr_entries); TAILQ_FOREACH_SAFE(entry, &rmrr_entries, unroll_link, entry1) { /* * VT-d specification requires that the start of an * RMRR entry is 4k-aligned. Buggy BIOSes put * anything into the start and end fields. Truncate * and round as neccesary. * * We also allow the overlapping RMRR entries, see * dmar_gas_alloc_region(). */ start = entry->start; end = entry->end; if (bootverbose) printf("dmar%d ctx pci%d:%d:%d RMRR [%#jx, %#jx]\n", domain->dmar->unit, bus, slot, func, (uintmax_t)start, (uintmax_t)end); entry->start = trunc_page(start); entry->end = round_page(end); if (entry->start == entry->end) { /* Workaround for some AMI (?) BIOSes */ if (bootverbose) { if (dev != NULL) device_printf(dev, ""); printf("pci%d:%d:%d ", bus, slot, func); printf("BIOS bug: dmar%d RMRR " "region (%jx, %jx) corrected\n", domain->dmar->unit, start, end); } entry->end += DMAR_PAGE_SIZE * 0x20; } size = OFF_TO_IDX(entry->end - entry->start); ma = malloc(sizeof(vm_page_t) * size, M_TEMP, M_WAITOK); for (i = 0; i < size; i++) { ma[i] = vm_page_getfake(entry->start + PAGE_SIZE * i, VM_MEMATTR_DEFAULT); } error1 = dmar_gas_map_region(domain, entry, DMAR_MAP_ENTRY_READ | DMAR_MAP_ENTRY_WRITE, DMAR_GM_CANWAIT, ma); /* * Non-failed RMRR entries are owned by context rb * tree. Get rid of the failed entry, but do not stop * the loop. Rest of the parsed RMRR entries are * loaded and removed on the context destruction. */ if (error1 == 0 && entry->end != entry->start) { DMAR_LOCK(domain->dmar); domain->refs++; /* XXXKIB prevent free */ domain->flags |= DMAR_DOMAIN_RMRR; DMAR_UNLOCK(domain->dmar); } else { if (error1 != 0) { if (dev != NULL) device_printf(dev, ""); printf("pci%d:%d:%d ", bus, slot, func); printf( "dmar%d failed to map RMRR region (%jx, %jx) %d\n", domain->dmar->unit, start, end, error1); error = error1; } TAILQ_REMOVE(&rmrr_entries, entry, unroll_link); dmar_gas_free_entry(domain, entry); } for (i = 0; i < size; i++) vm_page_putfake(ma[i]); free(ma, M_TEMP); } return (error); } static struct dmar_domain * dmar_domain_alloc(struct dmar_unit *dmar, bool id_mapped) { struct dmar_domain *domain; int error, id, mgaw; id = alloc_unr(dmar->domids); if (id == -1) return (NULL); domain = malloc(sizeof(*domain), M_DMAR_DOMAIN, M_WAITOK | M_ZERO); domain->domain = id; LIST_INIT(&domain->contexts); RB_INIT(&domain->rb_root); TAILQ_INIT(&domain->unload_entries); TASK_INIT(&domain->unload_task, 0, dmar_domain_unload_task, domain); mtx_init(&domain->lock, "dmardom", NULL, MTX_DEF); domain->dmar = dmar; /* * For now, use the maximal usable physical address of the * installed memory to calculate the mgaw on id_mapped domain. * It is useful for the identity mapping, and less so for the * virtualized bus address space. */ domain->end = id_mapped ? ptoa(Maxmem) : BUS_SPACE_MAXADDR; mgaw = dmar_maxaddr2mgaw(dmar, domain->end, !id_mapped); error = domain_set_agaw(domain, mgaw); if (error != 0) goto fail; if (!id_mapped) /* Use all supported address space for remapping. */ domain->end = 1ULL << (domain->agaw - 1); dmar_gas_init_domain(domain); if (id_mapped) { if ((dmar->hw_ecap & DMAR_ECAP_PT) == 0) { domain->pgtbl_obj = domain_get_idmap_pgtbl(domain, domain->end); } domain->flags |= DMAR_DOMAIN_IDMAP; } else { error = domain_alloc_pgtbl(domain); if (error != 0) goto fail; /* Disable local apic region access */ error = dmar_gas_reserve_region(domain, 0xfee00000, 0xfeefffff + 1); if (error != 0) goto fail; } return (domain); fail: dmar_domain_destroy(domain); return (NULL); } static struct dmar_ctx * dmar_ctx_alloc(struct dmar_domain *domain, uint16_t rid) { struct dmar_ctx *ctx; ctx = malloc(sizeof(*ctx), M_DMAR_CTX, M_WAITOK | M_ZERO); ctx->domain = domain; ctx->rid = rid; ctx->refs = 1; return (ctx); } static void dmar_ctx_link(struct dmar_ctx *ctx) { struct dmar_domain *domain; domain = ctx->domain; DMAR_ASSERT_LOCKED(domain->dmar); KASSERT(domain->refs >= domain->ctx_cnt, ("dom %p ref underflow %d %d", domain, domain->refs, domain->ctx_cnt)); domain->refs++; domain->ctx_cnt++; LIST_INSERT_HEAD(&domain->contexts, ctx, link); } static void dmar_ctx_unlink(struct dmar_ctx *ctx) { struct dmar_domain *domain; domain = ctx->domain; DMAR_ASSERT_LOCKED(domain->dmar); KASSERT(domain->refs > 0, ("domain %p ctx dtr refs %d", domain, domain->refs)); KASSERT(domain->ctx_cnt >= domain->refs, ("domain %p ctx dtr refs %d ctx_cnt %d", domain, domain->refs, domain->ctx_cnt)); domain->refs--; domain->ctx_cnt--; LIST_REMOVE(ctx, link); } static void dmar_domain_destroy(struct dmar_domain *domain) { KASSERT(TAILQ_EMPTY(&domain->unload_entries), ("unfinished unloads %p", domain)); KASSERT(LIST_EMPTY(&domain->contexts), ("destroying dom %p with contexts", domain)); KASSERT(domain->ctx_cnt == 0, ("destroying dom %p with ctx_cnt %d", domain, domain->ctx_cnt)); KASSERT(domain->refs == 0, ("destroying dom %p with refs %d", domain, domain->refs)); if ((domain->flags & DMAR_DOMAIN_GAS_INITED) != 0) { DMAR_DOMAIN_LOCK(domain); dmar_gas_fini_domain(domain); DMAR_DOMAIN_UNLOCK(domain); } if ((domain->flags & DMAR_DOMAIN_PGTBL_INITED) != 0) { if (domain->pgtbl_obj != NULL) DMAR_DOMAIN_PGLOCK(domain); domain_free_pgtbl(domain); } mtx_destroy(&domain->lock); free_unr(domain->dmar->domids, domain->domain); free(domain, M_DMAR_DOMAIN); } static struct dmar_ctx * dmar_get_ctx_for_dev1(struct dmar_unit *dmar, device_t dev, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init) { struct dmar_domain *domain, *domain1; struct dmar_ctx *ctx, *ctx1; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; int bus, slot, func, error; bool enable; if (dev != NULL) { bus = pci_get_bus(dev); slot = pci_get_slot(dev); func = pci_get_function(dev); } else { bus = PCI_RID2BUS(rid); slot = PCI_RID2SLOT(rid); func = PCI_RID2FUNC(rid); } enable = false; TD_PREP_PINNED_ASSERT; DMAR_LOCK(dmar); ctx = dmar_find_ctx_locked(dmar, rid); error = 0; if (ctx == NULL) { /* * Perform the allocations which require sleep or have * higher chance to succeed if the sleep is allowed. */ DMAR_UNLOCK(dmar); dmar_ensure_ctx_page(dmar, PCI_RID2BUS(rid)); domain1 = dmar_domain_alloc(dmar, id_mapped); if (domain1 == NULL) { TD_PINNED_ASSERT; return (NULL); } if (!id_mapped) { error = domain_init_rmrr(domain1, dev, bus, slot, func, dev_domain, dev_busno, dev_path, dev_path_len); if (error != 0) { dmar_domain_destroy(domain1); TD_PINNED_ASSERT; return (NULL); } } ctx1 = dmar_ctx_alloc(domain1, rid); ctxp = dmar_map_ctx_entry(ctx1, &sf); DMAR_LOCK(dmar); /* * Recheck the contexts, other thread might have * already allocated needed one. */ ctx = dmar_find_ctx_locked(dmar, rid); if (ctx == NULL) { domain = domain1; ctx = ctx1; dmar_ctx_link(ctx); ctx->ctx_tag.owner = dev; ctx_tag_init(ctx, dev); /* * This is the first activated context for the * DMAR unit. Enable the translation after * everything is set up. */ if (LIST_EMPTY(&dmar->domains)) enable = true; LIST_INSERT_HEAD(&dmar->domains, domain, link); ctx_id_entry_init(ctx, ctxp, false); if (dev != NULL) { device_printf(dev, "dmar%d pci%d:%d:%d:%d rid %x domain %d mgaw %d " "agaw %d %s-mapped\n", dmar->unit, dmar->segment, bus, slot, func, rid, domain->domain, domain->mgaw, domain->agaw, id_mapped ? "id" : "re"); } dmar_unmap_pgtbl(sf); } else { dmar_unmap_pgtbl(sf); dmar_domain_destroy(domain1); /* Nothing needs to be done to destroy ctx1. */ free(ctx1, M_DMAR_CTX); domain = ctx->domain; ctx->refs++; /* tag referenced us */ } } else { domain = ctx->domain; if (ctx->ctx_tag.owner == NULL) ctx->ctx_tag.owner = dev; ctx->refs++; /* tag referenced us */ } error = dmar_flush_for_ctx_entry(dmar, enable); if (error != 0) { dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } /* * The dmar lock was potentially dropped between check for the * empty context list and now. Recheck the state of GCMD_TE * to avoid unneeded command. */ if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) { error = dmar_enable_translation(dmar); if (error == 0) { if (bootverbose) { printf("dmar%d: enabled translation\n", dmar->unit); } } else { printf("dmar%d: enabling translation failed, " "error %d\n", dmar->unit, error); dmar_free_ctx_locked(dmar, ctx); TD_PINNED_ASSERT; return (NULL); } } DMAR_UNLOCK(dmar); TD_PINNED_ASSERT; return (ctx); } struct dmar_ctx * dmar_get_ctx_for_dev(struct dmar_unit *dmar, device_t dev, uint16_t rid, bool id_mapped, bool rmrr_init) { int dev_domain, dev_path_len, dev_busno; dev_domain = pci_get_domain(dev); dev_path_len = dmar_dev_depth(dev); ACPI_DMAR_PCI_PATH dev_path[dev_path_len]; dmar_dev_path(dev, &dev_busno, dev_path, dev_path_len); return (dmar_get_ctx_for_dev1(dmar, dev, rid, dev_domain, dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); } struct dmar_ctx * dmar_get_ctx_for_devpath(struct dmar_unit *dmar, uint16_t rid, int dev_domain, int dev_busno, const void *dev_path, int dev_path_len, bool id_mapped, bool rmrr_init) { return (dmar_get_ctx_for_dev1(dmar, NULL, rid, dev_domain, dev_busno, dev_path, dev_path_len, id_mapped, rmrr_init)); } int dmar_move_ctx_to_domain(struct dmar_domain *domain, struct dmar_ctx *ctx) { struct dmar_unit *dmar; struct dmar_domain *old_domain; dmar_ctx_entry_t *ctxp; struct sf_buf *sf; int error; dmar = domain->dmar; old_domain = ctx->domain; if (domain == old_domain) return (0); KASSERT(old_domain->dmar == dmar, ("domain %p %u moving between dmars %u %u", domain, domain->domain, old_domain->dmar->unit, domain->dmar->unit)); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); dmar_ctx_unlink(ctx); ctx->domain = domain; dmar_ctx_link(ctx); ctx_id_entry_init(ctx, ctxp, true); dmar_unmap_pgtbl(sf); error = dmar_flush_for_ctx_entry(dmar, true); /* If flush failed, rolling back would not work as well. */ printf("dmar%d rid %x domain %d->%d %s-mapped\n", dmar->unit, ctx->rid, old_domain->domain, domain->domain, (domain->flags & DMAR_DOMAIN_IDMAP) != 0 ? "id" : "re"); dmar_unref_domain_locked(dmar, old_domain); TD_PINNED_ASSERT; return (error); } static void dmar_unref_domain_locked(struct dmar_unit *dmar, struct dmar_domain *domain) { DMAR_ASSERT_LOCKED(dmar); KASSERT(domain->refs >= 1, ("dmar %d domain %p refs %u", dmar->unit, domain, domain->refs)); KASSERT(domain->refs > domain->ctx_cnt, ("dmar %d domain %p refs %d ctx_cnt %d", dmar->unit, domain, domain->refs, domain->ctx_cnt)); if (domain->refs > 1) { domain->refs--; DMAR_UNLOCK(dmar); return; } KASSERT((domain->flags & DMAR_DOMAIN_RMRR) == 0, ("lost ref on RMRR domain %p", domain)); LIST_REMOVE(domain, link); DMAR_UNLOCK(dmar); taskqueue_drain(dmar->delayed_taskqueue, &domain->unload_task); dmar_domain_destroy(domain); } void dmar_free_ctx_locked(struct dmar_unit *dmar, struct dmar_ctx *ctx) { struct sf_buf *sf; dmar_ctx_entry_t *ctxp; struct dmar_domain *domain; DMAR_ASSERT_LOCKED(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * If our reference is not last, only the dereference should * be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); return; } KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Otherwise, the context entry must be cleared before the * page table is destroyed. The mapping of the context * entries page could require sleep, unlock the dmar. */ DMAR_UNLOCK(dmar); TD_PREP_PINNED_ASSERT; ctxp = dmar_map_ctx_entry(ctx, &sf); DMAR_LOCK(dmar); KASSERT(ctx->refs >= 1, ("dmar %p ctx %p refs %u", dmar, ctx, ctx->refs)); /* * Other thread might have referenced the context, in which * case again only the dereference should be performed. */ if (ctx->refs > 1) { ctx->refs--; DMAR_UNLOCK(dmar); dmar_unmap_pgtbl(sf); TD_PINNED_ASSERT; return; } KASSERT((ctx->flags & DMAR_CTX_DISABLED) == 0, ("lost ref on disabled ctx %p", ctx)); /* * Clear the context pointer and flush the caches. * XXXKIB: cannot do this if any RMRR entries are still present. */ dmar_pte_clear(&ctxp->ctx1); ctxp->ctx2 = 0; dmar_flush_ctx_to_ram(dmar, ctxp); dmar_inv_ctx_glob(dmar); if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) { if (dmar->qi_enabled) dmar_qi_invalidate_iotlb_glob_locked(dmar); else dmar_inv_iotlb_glob(dmar); } dmar_unmap_pgtbl(sf); domain = ctx->domain; dmar_ctx_unlink(ctx); free(ctx, M_DMAR_CTX); dmar_unref_domain_locked(dmar, domain); TD_PINNED_ASSERT; } void dmar_free_ctx(struct dmar_ctx *ctx) { struct dmar_unit *dmar; dmar = ctx->domain->dmar; DMAR_LOCK(dmar); dmar_free_ctx_locked(dmar, ctx); } /* * Returns with the domain locked. */ struct dmar_ctx * dmar_find_ctx_locked(struct dmar_unit *dmar, uint16_t rid) { struct dmar_domain *domain; struct dmar_ctx *ctx; DMAR_ASSERT_LOCKED(dmar); LIST_FOREACH(domain, &dmar->domains, link) { LIST_FOREACH(ctx, &domain->contexts, link) { if (ctx->rid == rid) return (ctx); } } return (NULL); } void dmar_domain_free_entry(struct dmar_map_entry *entry, bool free) { struct dmar_domain *domain; domain = entry->domain; DMAR_DOMAIN_LOCK(domain); if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0) dmar_gas_free_region(domain, entry); else dmar_gas_free_space(domain, entry); DMAR_DOMAIN_UNLOCK(domain); if (free) dmar_gas_free_entry(domain, entry); else entry->flags = 0; } void dmar_domain_unload_entry(struct dmar_map_entry *entry, bool free) { struct dmar_unit *unit; unit = entry->domain->dmar; if (unit->qi_enabled) { DMAR_LOCK(unit); dmar_qi_invalidate_locked(entry->domain, entry->start, entry->end - entry->start, &entry->gseq, true); if (!free) entry->flags |= DMAR_MAP_ENTRY_QI_NF; TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link); DMAR_UNLOCK(unit); } else { domain_flush_iotlb_sync(entry->domain, entry->start, entry->end - entry->start); dmar_domain_free_entry(entry, free); } } static bool dmar_domain_unload_emit_wait(struct dmar_domain *domain, struct dmar_map_entry *entry) { if (TAILQ_NEXT(entry, dmamap_link) == NULL) return (true); return (domain->batch_no++ % dmar_batch_coalesce == 0); } void dmar_domain_unload(struct dmar_domain *domain, struct dmar_map_entries_tailq *entries, bool cansleep) { struct dmar_unit *unit; struct dmar_map_entry *entry, *entry1; int error; unit = domain->dmar; TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) { KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0, ("not mapped entry %p %p", domain, entry)); error = domain_unmap_buf(domain, entry->start, entry->end - entry->start, cansleep ? DMAR_PGF_WAITOK : 0); KASSERT(error == 0, ("unmap %p error %d", domain, error)); if (!unit->qi_enabled) { domain_flush_iotlb_sync(domain, entry->start, entry->end - entry->start); TAILQ_REMOVE(entries, entry, dmamap_link); dmar_domain_free_entry(entry, true); } } if (TAILQ_EMPTY(entries)) return; KASSERT(unit->qi_enabled, ("loaded entry left")); DMAR_LOCK(unit); TAILQ_FOREACH(entry, entries, dmamap_link) { dmar_qi_invalidate_locked(domain, entry->start, entry->end - entry->start, &entry->gseq, dmar_domain_unload_emit_wait(domain, entry)); } TAILQ_CONCAT(&unit->tlb_flush_entries, entries, dmamap_link); DMAR_UNLOCK(unit); } static void dmar_domain_unload_task(void *arg, int pending) { struct dmar_domain *domain; struct dmar_map_entries_tailq entries; domain = arg; TAILQ_INIT(&entries); for (;;) { DMAR_DOMAIN_LOCK(domain); TAILQ_SWAP(&domain->unload_entries, &entries, dmar_map_entry, dmamap_link); DMAR_DOMAIN_UNLOCK(domain); if (TAILQ_EMPTY(&entries)) break; dmar_domain_unload(domain, &entries, true); } }