Index: head/sys/contrib/octeon-sdk/cvmx-dma-engine.h
===================================================================
--- head/sys/contrib/octeon-sdk/cvmx-dma-engine.h (revision 307222)
+++ head/sys/contrib/octeon-sdk/cvmx-dma-engine.h (revision 307223)
@@ -1,378 +1,378 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the PCI / PCIe DMA engines. These are only avialable
* on chips with PCI / PCIe.
*
*
$Revision: 70030 $
*/
#ifndef __CVMX_DMA_ENGINES_H__
#define __CVMX_DMA_ENGINES_H__
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include
#else
#include "cvmx-dpi-defs.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef enum
{
CVMX_DMA_ENGINE_TRANSFER_OUTBOUND = 0, /**< OUTBOUND (read from L2/DRAM, write into PCI / PCIe memory space) */
CVMX_DMA_ENGINE_TRANSFER_INBOUND = 1, /**< INBOUND (read from PCI / PCIe memory space, write into L2/DRAM) */
CVMX_DMA_ENGINE_TRANSFER_INTERNAL = 2, /**< INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM). Only available on chips with PCIe */
CVMX_DMA_ENGINE_TRANSFER_EXTERNAL = 3, /**< EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). Only available on chips with PCIe */
} cvmx_dma_engine_transfer_t;
typedef union
{
uint64_t u64;
struct
{
uint64_t reserved_60_63 : 4; /**< Must be zero */
uint64_t fport : 2; /**< First port. FPort indicates the physical PCIe port used for the
PCIe memory space pointers in the FIRST POINTERS block in the
EXTERNAL-ONLY case. Must be zero in the OUTBOUND, INBOUND and
INTERNAL-ONLY cases. Must be zero on chips with PCI */
uint64_t lport : 2; /**< Last port. LPort indicates the physical PCIe port used for the
PCIe memory space pointers in the LAST POINTERS block in the
OUTBOUND, INBOUND, and EXTERNAL-ONLY cases. Must be zero in the
INTERNAL-ONLY case. Must be zero on chips with PCI */
- cvmx_dma_engine_transfer_t type : 2; /**< Type A given PCI DMA transfer is either OUTBOUND (read from L2/DRAM,
+ cvmx_dma_engine_transfer_t type : 2; /**< Type - A given PCI DMA transfer is either OUTBOUND (read from L2/DRAM,
write into PCI / PCIe memory space), INBOUND (read from PCI / PCIe memory space, write
into L2/DRAM), INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM), or
EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). */
uint64_t wqp : 1; /**< Work-queue pointer. When WQP = 1, PTR (if non-zero) is a pointer to a
work-queue entry that is submitted by the hardware after completing the DMA;
when WQP = 0, PTR (if non-zero) is a pointer to a byte in local memory that
is written to 0 by the hardware after completing the DMA. */
- uint64_t c : 1; /**< C Counter. 1 = use counter 1, 0 = use counter 0.
+ uint64_t c : 1; /**< C - Counter. 1 = use counter 1, 0 = use counter 0.
The C bit selects between the two counters (NPEI_DMA_CNTS[DMA0,DMA1])
that can optionally be updated after an OUTBOUND or EXTERNAL-ONLY
transfer, and also selects between the two forced-interrupt bits
(NPEI_INT_SUMn[DMA0_FI, DMA1_FI]) that can optionally be set after an
OUTBOUND or EXTERNAL-ONLY transfer. C must be zero for INBOUND or
INTERNAL-ONLY transfers. */
- uint64_t ca : 1; /**< CA Counter add.
+ uint64_t ca : 1; /**< CA - Counter add.
When CA = 1, the hardware updates the selected counter after it completes the
PCI DMA OUTBOUND or EXTERNAL-ONLY Instruction.
- If C = 0, PCIE_DMA_CNT0 is updated
- If C = 1, PCIE_DMA_CNT1 is updated.
Note that this update may indirectly cause
NPEI_INT_SUM[DCNT0,DCNT1,DTIME0,DTIME1] to become set (depending
on the NPEI_DMA*_INT_LEVEL settings), so may cause interrupts to occur on a
remote PCI host.
- If NPEI_DMA_CONTROL[O_ADD1] = 1, the counter is updated by 1.
- If NPEI_DMA_CONTROL[O_ADD1] = 0, the counter is updated by the total
bytes in the transfer.
When CA = 0, the hardware does not update any counters.
For an INBOUND or INTERNAL-ONLY PCI DMA transfer, CA must never be
set, and the hardware never adds to the counters. */
- uint64_t fi : 1; /**< FI Force interrupt.
+ uint64_t fi : 1; /**< FI - Force interrupt.
When FI is set for an OUTBOUND or EXTERNAL-ONLY transfer, the hardware
sets a forced interrupt bit after it completes the PCI DMA Instruction. If C = 0,
NPEI_INT_SUMn[DMA0_FI] is set, else NPEI_INT_SUMn[DMA1_FI] is set. For
an INBOUND or INTERNAL-ONLY PCI DMA operation, FI must never be set,
and the hardware never generates interrupts. */
- uint64_t ii : 1; /**< II Ignore the I bit (i.e. the I bit of the PCI DMA instruction local pointer).
+ uint64_t ii : 1; /**< II- Ignore the I bit (i.e. the I bit of the PCI DMA instruction local pointer).
For OUTBOUND transfers when II = 1, ignore the I bit and the FL bit in the
DMA HDR alone determines whether the hardware frees any/all of the local
buffers in the FIRST POINTERS area:
- when FL = 1, the hardware frees the local buffer when II=1.
- when FL = 0, the hardware does not free the local buffer when II=1.
For OUTBOUND transfers when II = 0, the I bit in the local pointer selects
whether local buffers are freed on a pointer-by-pointer basis:
- when (FL I) is true, the hardware frees the local buffer when II=0.
For INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY PCI DMA transfers,
II must never be set, and local buffers are never freed. */
- uint64_t fl : 1; /**< FL Free local buffer.
+ uint64_t fl : 1; /**< FL - Free local buffer.
When FL = 1, for an OUTBOUND operation, it indicates that the local buffers in
the FIRST BUFFERS area should be freed.
If II = 1, the FL bit alone indicates whether the local buffer should be freed:
- when FL = 1, the hardware frees the local buffer when II=1.
- when FL = 0, the hardware does not free the local buffer when II=1.
If II = 0, the I bit in the local pointer (refer to Section 9.5.2) determines whether
the local buffer is freed:
- when (FL I) is true, the hardware frees the local buffer when II=0.
For an INBOUND, INTERNAL-ONLY, or EXTERNAL-ONLY PCI DMA transfer,
FL must never be set, and local buffers are never freed. */
- uint64_t nlst : 4; /**< NLST Number Last pointers.
+ uint64_t nlst : 4; /**< NLST - Number Last pointers.
The number of pointers in the LAST POINTERS area.
In the INBOUND, OUTBOUND, and EXTERNAL-ONLY cases, the LAST
POINTERS area contains PCI components, and the number of 64-bit words
required in the LAST POINTERS area is:
- HDR.NLST + ((HDR.NLST + 3)/4) where the division removes the fraction.
In the INTERNAL-ONLY case, the LAST POINTERS area contains local
pointers, and the number of 64-bit words required in the LAST POINTERS area is:
- HDR.NLST
Note that the sum of the number of 64-bit words in the LAST POINTERS and
FIRST POINTERS area must never exceed 31. */
- uint64_t nfst : 4; /**< NFST Number First pointers.
+ uint64_t nfst : 4; /**< NFST - Number First pointers.
The number of pointers in the FIRST POINTERS area.
In the INBOUND, OUTBOUND, and INTERNAL-ONLY cases, the FIRST
POINTERS area contains local pointers, and the number of 64-bit words required
in the FIRST POINTERS area is:
- HDR.NFST
In the EXTERNAL-ONLY case, the FIRST POINTERS area contains PCI
components, and the number of 64-bit words required in the FIRST POINTERS
area is:
- HDR.NFST + ((HDR.NFST + 3)/4) where the division removes the fraction. */
- uint64_t addr : 40; /**< PTR Pointer, either a work-queue-entry pointer (when WQP = 1) or a local
+ uint64_t addr : 40; /**< PTR - Pointer, either a work-queue-entry pointer (when WQP = 1) or a local
memory pointer (WQP = 0).
When WQP = 1 and PTR 0x0, the hardware inserts the work-queue entry
indicated by PTR into a POW input queue after the PCI DMA operation is
complete. (Section 5.4 describes the work queue entry requirements in this
case.) When WQP = 1, PTR<2:0> must be 0x0.
When WQP = 0 and PTR 0x0, the hardware writes the single byte in local
memory indicated by PTR to 0x0 after the PCI DMA operation is complete.
NPEI_DMA_CONTROL[B0_LEND] selects the endian-ness of PTR in this
case.
When PTR = 0x0, the hardware performs no operation after the PCI DMA
operation is complete. */
} s;
} cvmx_dma_engine_header_t;
typedef union
{
uint64_t u64;
struct
{
- uint64_t i : 1; /**< I Invert free.
+ uint64_t i : 1; /**< I - Invert free.
This bit gives the software the ability to free buffers independently for an
OUTBOUND PCI DMA transfer. I is not used by the hardware when II is set. I
must not be set, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
and EXTERNAL-ONLY PCI DMA transfers. */
- uint64_t back : 4; /**< Back Backup amount.
+ uint64_t back : 4; /**< Back - Backup amount.
Allows the start of a buffer that is to be freed during an OUTBOUND transfer to
be different from the ptr value. Back specifies the amount to subtract from the
pointer to reach the start when freeing a buffer.
The address that is the start of the buffer being freed is:
- Buffer start address = ((ptr >> 7) - Back) << 7.
Back is only used by the hardware when the buffer corresponding to ptr is freed.
Back must be 0x0, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
and EXTERNAL-ONLY PCI DMA transfers. */
- uint64_t pool : 3; /**< Pool Free pool.
+ uint64_t pool : 3; /**< Pool - Free pool.
Specifies which pool (of the eight hardware-managed FPA free pools) receives the
buffer associated with ptr when freed during an OUTBOUND transfer.
Pool is only used when the buffer corresponding to ptr is freed. Pool must be 0x0,
and buffers are never freed, for INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY
PCI DMA transfers. */
- uint64_t f : 1; /**< F Full-block writes are allowed.
+ uint64_t f : 1; /**< F - Full-block writes are allowed.
When set, the hardware is permitted to write all the bytes in the cache blocks
covered by ptr, ptr + Size - 1. This can improve memory system performance
when the write misses in the L2 cache.
F can only be set for local pointers that can be written to:
- The local pointers in the FIRST POINTERS area that are write pointers for
INBOUND transfers.
- The local pointers in the LAST POINTERS area that are always write
pointers (when present for INTERNAL-ONLY transfers).
F must not be set for local pointers that are not written to:
- The local pointers in the FIRST POINTERS area for OUTBOUND and
INTERNAL-ONLY transfers. */
- uint64_t a : 1; /**< A Allocate L2.
+ uint64_t a : 1; /**< A - Allocate L2.
This is a hint to the hardware that the cache blocks should be allocated in the L2
cache (if they were not already). */
- uint64_t l : 1; /**< L Little-endian.
+ uint64_t l : 1; /**< L - Little-endian.
When L is set, the data at ptr is in little-endian format rather than big-endian. */
- uint64_t size : 13; /**< Size Size in bytes of the contiguous space specified by ptr. A Size value of 0 is
+ uint64_t size : 13; /**< Size - Size in bytes of the contiguous space specified by ptr. A Size value of 0 is
illegal. Note that the sum of the sizes in the FIRST POINTERS area must always
exactly equal the sum of the sizes/lengths in the LAST POINTERS area:
- In the OUTBOUND and INBOUND cases, the HDR.NFST size fields in the
local pointers in the FIRST POINTERS area must exactly equal the lengths
of the HDR.NLST fragments in the PCI components in the LAST POINTERS
area.
- In the INTERNAL-ONLY case, the HDR.NFST size fields in the local
pointers in the FIRST POINTERS area must equal the HDR.NLST size
fields in the local pointers in the LAST POINTERS area. */
uint64_t reserved_36_39 : 4; /**< Must be zero */
uint64_t addr : 36; /**< L2/DRAM byte pointer. Points to where the packet data starts.
Ptr can be any byte alignment. Note that ptr is interpreted as a big-endian byte
pointer when L is clear, a little-endian byte pointer when L is set. */
} internal;
struct
{
uint64_t len0 : 16; /**< Length of PCI / PCIe memory for address 0 */
uint64_t len1 : 16; /**< Length of PCI / PCIe memory for address 1 */
uint64_t len2 : 16; /**< Length of PCI / PCIe memory for address 2 */
uint64_t len3 : 16; /**< Length of PCI / PCIe memory for address 3 */
} pcie_length;
} cvmx_dma_engine_buffer_t;
/**
* Initialize the DMA engines for use
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_initialize(void);
/**
* Shutdown all DMA engines. The engeines must be idle when this
* function is called.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_shutdown(void);
/**
* Return the number of DMA engimes supported by this chip
*
* @return Number of DMA engines
*/
int cvmx_dma_engine_get_num(void);
/**
* Submit a series of DMA command to the DMA engines.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header Command header
* @param num_buffers
* The number of data pointers
* @param buffers Command data pointers
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[]);
/**
* Build the first and last pointers based on a DMA engine header
* and submit them to the engine. The purpose of this function is
* to simplify the building of DMA engine commands by automatically
* converting a simple address and size into the apropriate internal
* or PCI / PCIe address list. This function does not support gather lists,
* so you will need to build your own lists in that case.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header DMA Command header. Note that the nfst and nlst fields do not
* need to be filled in. All other fields must be set properly.
* @param first_address
* Address to use for the first pointers. In the case of INTERNAL,
* INBOUND, and OUTBOUND this is an Octeon memory address. In the
* case of EXTERNAL, this is the source PCI / PCIe address.
* @param last_address
* Address to use for the last pointers. In the case of EXTERNAL,
* INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
* case of INTERNAL, this is the Octeon memory destination address.
* @param size Size of the transfer to perform.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
uint64_t first_address, uint64_t last_address,
int size);
/**
* Simplified interface to the DMA engines to emulate memcpy()
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
* used to turn this into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param source Pointer to the source memory.
* cvmx_ptr_to_phys() will be used to turn this
* into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param length Number of bytes to copy
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_dma_engine_memcpy(int engine, void *dest, void *source, int length)
{
cvmx_dma_engine_header_t header;
header.u64 = 0;
header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
cvmx_ptr_to_phys(dest), length);
}
/**
* Simplified interface to the DMA engines to emulate memcpy()
* When dici_mode is enabled, send zero byte.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
* used to turn this into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param source Pointer to the source memory.
* cvmx_ptr_to_phys() will be used to turn this
* into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param length Number of bytes to copy
* @param core core number for zero byte write
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_dma_engine_memcpy_zero_byte(int engine, void *dest, void *source, int length, int core)
{
cvmx_dma_engine_header_t header;
header.u64 = 0;
header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
/* If dici_mode is set, DPI increments the DPI_DMA_PPn_CNT[CNT], where the
value of core n is PTR<5:0>-1 when WQP=0 and PTR != 0 && PTR < 64. */
if (octeon_has_feature(OCTEON_FEATURE_DICI_MODE))
{
cvmx_dpi_dma_control_t dma_control;
dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
if (dma_control.s.dici_mode)
{
header.s.wqp = 0; // local memory pointer
header.s.addr = core + 1;
}
}
return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
cvmx_ptr_to_phys(dest), length);
}
#ifdef __cplusplus
}
#endif
#endif // __CVMX_CMD_QUEUE_H__
Index: head/sys/contrib/octeon-sdk/cvmx-higig.h
===================================================================
--- head/sys/contrib/octeon-sdk/cvmx-higig.h (revision 307222)
+++ head/sys/contrib/octeon-sdk/cvmx-higig.h (revision 307223)
@@ -1,418 +1,418 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions and typedefs for using Octeon in HiGig/HiGig+/HiGig2 mode over
* XAUI.
*
*
$Revision: 70030 $
*/
#ifndef __CVMX_HIGIG_H__
#define __CVMX_HIGIG_H__
#include "cvmx-wqe.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
union
{
uint32_t u32;
struct
{
uint32_t start : 8; /**< 8-bits of Preamble indicating start of frame */
uint32_t hgi : 2; /**< HiGig interface format indicator
00 = Reserved
01 = Pure preamble - IEEE standard framing of 10GE
10 = XGS header - framing based on XGS family definition In this
format, the default length of the header is 12 bytes and additional
bytes are indicated by the HDR_EXT_LEN field
11 = Reserved */
uint32_t cng_high : 1; /**< Congestion Bit High flag */
uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
to the standard 12-bytes of XGS HiGig header. Each unit represents 4
bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
and b'111' are reserved. For HGI field value of b'01' this field should be
b'01'. For all other values of HGI it is don't care. */
uint32_t src_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
SRC_MODID (bits 4:0 are in Byte 4 and bit 5 is in Byte 9). For HGI field
value of b'01' this field should be b'0'. For all other values of HGI it is don't
care. */
uint32_t dst_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
DST_MODID (bits 4:0 are in Byte 7 and bit 5 is in Byte 9). ). For HGI field
value of b'01' this field should be b'1'. For all other values of HGI it is don't
care. */
uint32_t vid_high : 8; /**< 8-bits of the VLAN tag information */
uint32_t vid_low : 8; /**< 8 bits LSB of the VLAN tag information */
} s;
} dw0;
union
{
uint32_t u32;
struct
{
uint32_t src_modid_low : 5; /**< Bits 4:0 of Module ID of the source module on which the packet ingress (bit
5 is in Byte 9 and bit 6 Is in Byte 1) */
uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
000 = Control frames used for CPU to CPU communications
001 = Unicast packet with destination resolved; The packet can be
either Layer 2 unicast packet or L3 unicast packet that was
routed in the ingress chip.
010 = Broadcast or unknown Unicast packet or unknown multicast,
destined to all members of the VLAN
011 = L2 Multicast packet, destined to all ports of the group indicated
in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
100 = IP Multicast packet, destined to all ports of the group indicated
in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
101 = Reserved
110 = Reserved
111 = Reserved */
uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
multicast (unknown L2 multicast and IPMC) packets. This field is used
when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
For registered L2 multicast packets:
- PFM= 0 Flood to VLAN
- PFM= 1 or 2 Send to group members in the L2MC table
+ PFM= 0 - Flood to VLAN
+ PFM= 1 or 2 - Send to group members in the L2MC table
For unregistered L2 multicast packets:
- PFM= 0 or 1 Flood to VLAN
- PFM= 2 Drop the packet */
+ PFM= 0 or 1 - Flood to VLAN
+ PFM= 2 - Drop the packet */
uint32_t src_port_tgid : 6; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
on, else it represents the physical port the packet ingressed on. */
uint32_t dst_port : 5; /**< Port number of destination port on which the packet needs to egress. */
uint32_t priority : 3; /**< This is the internal priority of the packet. This internal priority will go through
COS_SEL mapping registers to map to the actual MMU queues. */
uint32_t header_type : 2; /**< Indicates the format of the next 4 bytes of the XGS HiGig header
00 = Overlay 1 (default)
01 = Overlay 2 (Classification Tag)
10 = Reserved
11 = Reserved */
uint32_t cng_low : 1; /**< Semantics of CNG_HIGH and CNG_LOW are as follows: The following
encodings are to make it backward compatible:
[CNG_HIGH, CNG_LOW] - COLOR
- [0, 0] Packet is green
- [0, 1] Packet is red
- [1, 1] Packet is yellow
- [1, 0] Undefined */
+ [0, 0] - Packet is green
+ [0, 1] - Packet is red
+ [1, 1] - Packet is yellow
+ [1, 0] - Undefined */
uint32_t dst_modid_low : 5; /**< Bits [4-: 0] of Module ID of the destination port on which the packet needs to egress. */
} s;
} dw1;
union
{
uint32_t u32;
struct
{
uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
group. */
uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
DO_NOT_LEARN bit is overlaid on the second bit of this field. */
uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
ingressed the system. */
uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
needs to be mirrored. */
uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
may still need to be switched. */
uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
packet was switched and only needs to be mirrored. */
uint32_t src_modid_5 : 1; /**< Source Module ID: Bit 5 of Src_ModID (bits 4:0 are in byte 4 and bit 6 is in
byte 1) */
uint32_t dst_modid_5 : 1; /**< Destination Module ID: Bit 5 of Dst_ModID (bits 4:0 are in byte 7 and bit 6
is in byte 1) */
uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
added field. */
uint32_t vc_label_16_19 : 4; /**< VC Label: Bits 19:16 of VC label: HiGig+ added field */
uint32_t vc_label_0_15 : 16;/**< VC Label: Bits 15:0 of VC label: HiGig+ added field */
} o1;
struct
{
uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
uint32_t reserved_0_15 : 16;
} o2;
} dw2;
} cvmx_higig_header_t;
typedef struct
{
union
{
uint32_t u32;
struct
{
uint32_t k_sop : 8; /**< The delimiter indicating the start of a packet transmission */
uint32_t reserved_21_23 : 3;
uint32_t mcst : 1; /**< MCST indicates whether the packet should be unicast or
multicast forwarded through the XGS switching fabric
- 0: Unicast
- 1: Mulitcast */
uint32_t tc : 4; /**< Traffic Class [3:0] indicates the distinctive Quality of Service (QoS)
the switching fabric will provide when forwarding the packet
through the fabric */
uint32_t dst_modid_mgid : 8; /**< When MCST=0, this field indicates the destination XGS module to
which the packet will be delivered. When MCST=1, this field indicates
higher order bits of the Multicast Group ID. */
uint32_t dst_pid_mgid : 8; /**< When MCST=0, this field indicates a port associated with the
module indicated by the DST_MODID, through which the packet
will exit the system. When MCST=1, this field indicates lower order
bits of the Multicast Group ID */
} s;
} dw0;
union
{
uint32_t u32;
struct
{
uint32_t src_modid : 8; /**< Source Module ID indicates the source XGS module from which
the packet is originated. (It can also be used for the fabric multicast
load balancing purpose.) */
uint32_t src_pid : 8; /**< Source Port ID indicates a port associated with the module
indicated by the SRC_MODID, through which the packet has
entered the system */
uint32_t lbid : 8; /**< Load Balancing ID indicates a packet flow hashing index
computed by the ingress XGS module for statistical distribution of
packet flows through a multipath fabric */
uint32_t dp : 2; /**< Drop Precedence indicates the traffic rate violation status of the
packet measured by the ingress module.
- 00: GREEN
- 01: RED
- 10: Reserved
- 11: Yellow */
uint32_t reserved_3_5 : 3;
uint32_t ppd_type : 3; /**< Packet Processing Descriptor Type
- 000: PPD Overlay1
- 001: PPD Overlay2
- 010~111: Reserved */
} s;
} dw1;
union
{
uint32_t u32;
struct
{
uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
group. */
uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
DO_NOT_LEARN bit is overlaid on the second bit of this field. */
uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
ingressed the system. */
uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
needs to be mirrored. */
uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
may still need to be switched. */
uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
packet was switched and only needs to be mirrored. */
uint32_t reserved_22_23 : 2;
uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
added field. */
uint32_t vc_label : 20; /**< Refer to the HiGig+ Architecture Specification */
} o1;
struct
{
uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
uint32_t reserved_0_15 : 16;
} o2;
} dw2;
union
{
uint32_t u32;
struct
{
uint32_t vid : 16; /**< VLAN tag information */
uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
multicast (unknown L2 multicast and IPMC) packets. This field is used
when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
For registered L2 multicast packets:
- PFM= 0 Flood to VLAN
- PFM= 1 or 2 Send to group members in the L2MC table
+ PFM= 0 - Flood to VLAN
+ PFM= 1 or 2 - Send to group members in the L2MC table
For unregistered L2 multicast packets:
- PFM= 0 or 1 Flood to VLAN
- PFM= 2 Drop the packet */
+ PFM= 0 or 1 - Flood to VLAN
+ PFM= 2 - Drop the packet */
uint32_t src_t : 1; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
on, else it represents the physical port the packet ingressed on. */
uint32_t reserved_11_12 : 2;
uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
000 = Control frames used for CPU to CPU communications
001 = Unicast packet with destination resolved; The packet can be
either Layer 2 unicast packet or L3 unicast packet that was
routed in the ingress chip.
010 = Broadcast or unknown Unicast packet or unknown multicast,
destined to all members of the VLAN
011 = L2 Multicast packet, destined to all ports of the group indicated
in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
100 = IP Multicast packet, destined to all ports of the group indicated
in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
101 = Reserved
110 = Reserved
111 = Reserved */
uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
to the standard 12-bytes of XGS HiGig header. Each unit represents 4
bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
and b'111' are reserved. For HGI field value of b'01' this field should be
b'01'. For all other values of HGI it is don't care. */
uint32_t reserved_0_4 : 5;
} s;
} dw3;
} cvmx_higig2_header_t;
/**
* Initialize the HiGig aspects of a XAUI interface. This function
* should be called before the cvmx-helper generic init.
*
* @param interface Interface to initialize HiGig on (0-1)
* @param enable_higig2
* Non zero to enable HiGig2 support. Zero to support HiGig
* and HiGig+.
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_higig_initialize(int interface, int enable_higig2)
{
cvmx_pip_prt_cfgx_t pip_prt_cfg;
cvmx_gmxx_rxx_udd_skp_t gmx_rx_udd_skp;
cvmx_gmxx_txx_min_pkt_t gmx_tx_min_pkt;
cvmx_gmxx_txx_append_t gmx_tx_append;
cvmx_gmxx_tx_ifg_t gmx_tx_ifg;
cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
cvmx_gmxx_rxx_frm_ctl_t gmx_rx_frm_ctl;
cvmx_gmxx_tx_xaui_ctl_t gmx_tx_xaui_ctl;
int i, pknd;
int header_size = (enable_higig2) ? 16 : 12;
/* Setup PIP to handle HiGig */
if (octeon_has_feature(OCTEON_FEATURE_PKND))
pknd = cvmx_helper_get_pknd(interface, 0);
else
pknd = interface*16;
pip_prt_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
pip_prt_cfg.s.dsa_en = 0;
pip_prt_cfg.s.higig_en = 1;
pip_prt_cfg.s.hg_qos = 1;
pip_prt_cfg.s.skip = header_size;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), pip_prt_cfg.u64);
/* Setup some sample QoS defaults. These can be changed later */
if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
{
for (i=0; i<64; i++)
{
cvmx_pip_hg_pri_qos_t pip_hg_pri_qos;
pip_hg_pri_qos.u64 = 0;
pip_hg_pri_qos.s.up_qos = 1;
pip_hg_pri_qos.s.pri = i;
pip_hg_pri_qos.s.qos = i&7;
cvmx_write_csr(CVMX_PIP_HG_PRI_QOS, pip_hg_pri_qos.u64);
}
}
/* Setup GMX RX to treat the HiGig header as user data to ignore */
gmx_rx_udd_skp.u64 = cvmx_read_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface));
gmx_rx_udd_skp.s.len = header_size;
gmx_rx_udd_skp.s.fcssel = 0;
cvmx_write_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface), gmx_rx_udd_skp.u64);
/* Disable GMX preamble checking */
gmx_rx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface));
gmx_rx_frm_ctl.s.pre_chk = 0;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface), gmx_rx_frm_ctl.u64);
/* Setup GMX TX to pad properly min sized packets */
gmx_tx_min_pkt.u64 = cvmx_read_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface));
gmx_tx_min_pkt.s.min_size = 59 + header_size;
cvmx_write_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface), gmx_tx_min_pkt.u64);
/* Setup GMX TX to not add a preamble */
gmx_tx_append.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(0, interface));
gmx_tx_append.s.preamble = 0;
cvmx_write_csr(CVMX_GMXX_TXX_APPEND(0, interface), gmx_tx_append.u64);
/* Reduce the inter frame gap to 8 bytes */
gmx_tx_ifg.u64 = cvmx_read_csr(CVMX_GMXX_TX_IFG(interface));
gmx_tx_ifg.s.ifg1 = 4;
gmx_tx_ifg.s.ifg2 = 4;
cvmx_write_csr(CVMX_GMXX_TX_IFG(interface), gmx_tx_ifg.u64);
/* Disable GMX backpressure */
gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp.s.bp = 0;
gmx_tx_ovr_bp.s.en = 0xf;
gmx_tx_ovr_bp.s.ign_full = 0xf;
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
if (enable_higig2)
{
/* Enable HiGig2 support and forwarding of virtual port backpressure
to PKO */
cvmx_gmxx_hg2_control_t gmx_hg2_control;
gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
gmx_hg2_control.s.hg2rx_en = 1;
gmx_hg2_control.s.hg2tx_en = 1;
gmx_hg2_control.s.logl_en = 0xffff;
gmx_hg2_control.s.phys_en = 1;
cvmx_write_csr(CVMX_GMXX_HG2_CONTROL(interface), gmx_hg2_control.u64);
}
/* Enable HiGig */
gmx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmx_tx_xaui_ctl.s.hg_en = 1;
cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmx_tx_xaui_ctl.u64);
return 0;
}
#ifdef __cplusplus
}
#endif
#endif // __CVMX_HIGIG_H__
Index: head/sys/contrib/octeon-sdk/cvmx-pcie.c
===================================================================
--- head/sys/contrib/octeon-sdk/cvmx-pcie.c (revision 307222)
+++ head/sys/contrib/octeon-sdk/cvmx-pcie.c (revision 307223)
@@ -1,1702 +1,1702 @@
/***********************license start***************
* Copyright (c) 2003-2011 Cavium, Inc. . All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to PCIe as a host(RC) or target(EP)
*
*
$Revision: 70030 $
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#ifdef CONFIG_CAVIUM_DECODE_RSL
#include
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#else
#include "cvmx.h"
#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
#include "cvmx-csr-db.h"
#endif
#include "cvmx-pcie.h"
#include "cvmx-sysinfo.h"
#include "cvmx-swap.h"
#include "cvmx-wqe.h"
#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
#include "cvmx-error.h"
#endif
#include "cvmx-helper-errata.h"
#include "cvmx-qlm.h"
#endif
#define MRRS_CN5XXX 0 /* 128 byte Max Read Request Size */
#define MPS_CN5XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
#define MRRS_CN6XXX 3 /* 1024 byte Max Read Request Size */
#define MPS_CN6XXX 0 /* 128 byte Max Packet Size (Limit of most PCs) */
/**
* Return the Core virtual base address for PCIe IO access. IOs are
* read/written as an offset from this address.
*
* @param pcie_port PCIe port the IO is for
*
* @return 64bit Octeon IO base address for read/write
*/
uint64_t cvmx_pcie_get_io_base_address(int pcie_port)
{
cvmx_pcie_address_t pcie_addr;
pcie_addr.u64 = 0;
pcie_addr.io.upper = 0;
pcie_addr.io.io = 1;
pcie_addr.io.did = 3;
pcie_addr.io.subdid = 2;
pcie_addr.io.es = 1;
pcie_addr.io.port = pcie_port;
return pcie_addr.u64;
}
/**
* Size of the IO address region returned at address
* cvmx_pcie_get_io_base_address()
*
* @param pcie_port PCIe port the IO is for
*
* @return Size of the IO window
*/
uint64_t cvmx_pcie_get_io_size(int pcie_port)
{
return 1ull<<32;
}
/**
* Return the Core virtual base address for PCIe MEM access. Memory is
* read/written as an offset from this address.
*
* @param pcie_port PCIe port the IO is for
*
* @return 64bit Octeon IO base address for read/write
*/
uint64_t cvmx_pcie_get_mem_base_address(int pcie_port)
{
cvmx_pcie_address_t pcie_addr;
pcie_addr.u64 = 0;
pcie_addr.mem.upper = 0;
pcie_addr.mem.io = 1;
pcie_addr.mem.did = 3;
pcie_addr.mem.subdid = 3 + pcie_port;
return pcie_addr.u64;
}
/**
* Size of the Mem address region returned at address
* cvmx_pcie_get_mem_base_address()
*
* @param pcie_port PCIe port the IO is for
*
* @return Size of the Mem window
*/
uint64_t cvmx_pcie_get_mem_size(int pcie_port)
{
return 1ull<<36;
}
/**
* @INTERNAL
* Initialize the RC config space CSRs
*
* @param pcie_port PCIe port to initialize
*/
static void __cvmx_pcie_rc_initialize_config_space(int pcie_port)
{
/* Max Payload Size (PCIE*_CFG030[MPS]) */
/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
{
cvmx_pciercx_cfg030_t pciercx_cfg030;
pciercx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG030(pcie_port));
if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
{
pciercx_cfg030.s.mps = MPS_CN5XXX;
pciercx_cfg030.s.mrrs = MRRS_CN5XXX;
}
else
{
pciercx_cfg030.s.mps = MPS_CN6XXX;
pciercx_cfg030.s.mrrs = MRRS_CN6XXX;
}
pciercx_cfg030.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
pciercx_cfg030.s.ns_en = 1; /* Enable no snoop processing. Not used by Octeon */
pciercx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
pciercx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
pciercx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
pciercx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG030(pcie_port), pciercx_cfg030.u32);
}
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
/* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
/* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_npei_ctl_status2_t npei_ctl_status2;
npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes for best Octeon DMA performance */
npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
if (pcie_port)
npei_ctl_status2.s.c1_b1_s = 3; /* Port1 BAR1 Size 256MB */
else
npei_ctl_status2.s.c0_b1_s = 3; /* Port0 BAR1 Size 256MB */
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
}
else
{
/* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
/* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_dpi_sli_prtx_cfg_t prt_cfg;
cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
prt_cfg.s.mps = MPS_CN6XXX;
prt_cfg.s.mrrs = MRRS_CN6XXX;
/* Max outstanding load request. */
prt_cfg.s.molr = 32;
cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
}
/* ECRC Generation (PCIE*_CFG070[GE,CE]) */
{
cvmx_pciercx_cfg070_t pciercx_cfg070;
pciercx_cfg070.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG070(pcie_port));
pciercx_cfg070.s.ge = 1; /* ECRC generation enable. */
pciercx_cfg070.s.ce = 1; /* ECRC check enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG070(pcie_port), pciercx_cfg070.u32);
}
/* Access Enables (PCIE*_CFG001[MSAE,ME]) */
/* ME and MSAE should always be set. */
/* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
/* System Error Message Enable (PCIE*_CFG001[SEE]) */
{
cvmx_pciercx_cfg001_t pciercx_cfg001;
pciercx_cfg001.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG001(pcie_port));
pciercx_cfg001.s.msae = 1; /* Memory space enable. */
pciercx_cfg001.s.me = 1; /* Bus master enable. */
pciercx_cfg001.s.i_dis = 1; /* INTx assertion disable. */
pciercx_cfg001.s.see = 1; /* SERR# enable */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG001(pcie_port), pciercx_cfg001.u32);
}
/* Advanced Error Recovery Message Enables */
/* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG066(pcie_port), 0);
/* Use CVMX_PCIERCX_CFG067 hardware default */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG069(pcie_port), 0);
/* Active State Power Management (PCIE*_CFG032[ASLPC]) */
{
cvmx_pciercx_cfg032_t pciercx_cfg032;
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
pciercx_cfg032.s.aslpc = 0; /* Active state Link PM control. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG032(pcie_port), pciercx_cfg032.u32);
}
/* Link Width Mode (PCIERCn_CFG452[LME]) - Set during cvmx_pcie_rc_initialize_link() */
/* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
{
/* We set the primary bus number to 1 so IDT bridges are happy. They don't like zero */
cvmx_pciercx_cfg006_t pciercx_cfg006;
pciercx_cfg006.u32 = 0;
pciercx_cfg006.s.pbnum = 1;
pciercx_cfg006.s.sbnum = 1;
pciercx_cfg006.s.subbnum = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG006(pcie_port), pciercx_cfg006.u32);
}
/* Memory-mapped I/O BAR (PCIERCn_CFG008) */
/* Most applications should disable the memory-mapped I/O BAR by */
/* setting PCIERCn_CFG008[ML_ADDR] < PCIERCn_CFG008[MB_ADDR] */
{
cvmx_pciercx_cfg008_t pciercx_cfg008;
pciercx_cfg008.u32 = 0;
pciercx_cfg008.s.mb_addr = 0x100;
pciercx_cfg008.s.ml_addr = 0;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG008(pcie_port), pciercx_cfg008.u32);
}
/* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
/* Most applications should disable the prefetchable BAR by setting */
/* PCIERCn_CFG011[UMEM_LIMIT],PCIERCn_CFG009[LMEM_LIMIT] < */
/* PCIERCn_CFG010[UMEM_BASE],PCIERCn_CFG009[LMEM_BASE] */
{
cvmx_pciercx_cfg009_t pciercx_cfg009;
cvmx_pciercx_cfg010_t pciercx_cfg010;
cvmx_pciercx_cfg011_t pciercx_cfg011;
pciercx_cfg009.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG009(pcie_port));
pciercx_cfg010.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG010(pcie_port));
pciercx_cfg011.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG011(pcie_port));
pciercx_cfg009.s.lmem_base = 0x100;
pciercx_cfg009.s.lmem_limit = 0;
pciercx_cfg010.s.umem_base = 0x100;
pciercx_cfg011.s.umem_limit = 0;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG009(pcie_port), pciercx_cfg009.u32);
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG010(pcie_port), pciercx_cfg010.u32);
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG011(pcie_port), pciercx_cfg011.u32);
}
/* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
/* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
{
cvmx_pciercx_cfg035_t pciercx_cfg035;
pciercx_cfg035.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG035(pcie_port));
pciercx_cfg035.s.secee = 1; /* System error on correctable error enable. */
pciercx_cfg035.s.sefee = 1; /* System error on fatal error enable. */
pciercx_cfg035.s.senfee = 1; /* System error on non-fatal error enable. */
pciercx_cfg035.s.pmeie = 1; /* PME interrupt enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG035(pcie_port), pciercx_cfg035.u32);
}
/* Advanced Error Recovery Interrupt Enables */
/* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
{
cvmx_pciercx_cfg075_t pciercx_cfg075;
pciercx_cfg075.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG075(pcie_port));
pciercx_cfg075.s.cere = 1; /* Correctable error reporting enable. */
pciercx_cfg075.s.nfere = 1; /* Non-fatal error reporting enable. */
pciercx_cfg075.s.fere = 1; /* Fatal error reporting enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG075(pcie_port), pciercx_cfg075.u32);
}
/* HP Interrupt Enables (PCIERCn_CFG034[HPINT_EN], */
/* PCIERCn_CFG034[DLLS_EN,CCINT_EN]) */
{
cvmx_pciercx_cfg034_t pciercx_cfg034;
pciercx_cfg034.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG034(pcie_port));
pciercx_cfg034.s.hpint_en = 1; /* Hot-plug interrupt enable. */
pciercx_cfg034.s.dlls_en = 1; /* Data Link Layer state changed enable */
pciercx_cfg034.s.ccint_en = 1; /* Command completed interrupt enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG034(pcie_port), pciercx_cfg034.u32);
}
}
/**
* @INTERNAL
* Initialize a host mode PCIe gen 1 link. This function takes a PCIe
* port from reset to a link up state. Software can then begin
* configuring the rest of the link.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
static int __cvmx_pcie_rc_initialize_link_gen1(int pcie_port)
{
uint64_t start_cycle;
cvmx_pescx_ctl_status_t pescx_ctl_status;
cvmx_pciercx_cfg452_t pciercx_cfg452;
cvmx_pciercx_cfg032_t pciercx_cfg032;
cvmx_pciercx_cfg448_t pciercx_cfg448;
/* Set the lane width */
pciercx_cfg452.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG452(pcie_port));
pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
if (pescx_ctl_status.s.qlm_cfg == 0)
{
/* We're in 8 lane (56XX) or 4 lane (54XX) mode */
pciercx_cfg452.s.lme = 0xf;
}
else
{
/* We're in 4 lane (56XX) or 2 lane (52XX) mode */
pciercx_cfg452.s.lme = 0x7;
}
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG452(pcie_port), pciercx_cfg452.u32);
/* CN52XX pass 1.x has an errata where length mismatches on UR responses can
cause bus errors on 64bit memory reads. Turning off length error
checking fixes this */
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
{
cvmx_pciercx_cfg455_t pciercx_cfg455;
pciercx_cfg455.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG455(pcie_port));
pciercx_cfg455.s.m_cpl_len_err = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG455(pcie_port), pciercx_cfg455.u32);
}
/* Lane swap needs to be manually enabled for CN52XX */
if (OCTEON_IS_MODEL(OCTEON_CN52XX) && (pcie_port == 1))
{
switch (cvmx_sysinfo_get()->board_type)
{
#if defined(OCTEON_VENDOR_LANNER)
case CVMX_BOARD_TYPE_CUST_LANNER_MR730:
break;
#endif
default:
pescx_ctl_status.s.lane_swp = 1;
break;
}
cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port),pescx_ctl_status.u64);
}
/* Bring up the link */
pescx_ctl_status.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS(pcie_port));
pescx_ctl_status.s.lnk_enb = 1;
cvmx_write_csr(CVMX_PESCX_CTL_STATUS(pcie_port), pescx_ctl_status.u64);
/* CN52XX pass 1.0: Due to a bug in 2nd order CDR, it needs to be disabled */
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
__cvmx_helper_errata_qlm_disable_2nd_order_cdr(0);
/* Wait for the link to come up */
start_cycle = cvmx_get_cycle();
do
{
if (cvmx_get_cycle() - start_cycle > 100*cvmx_clock_get_rate(CVMX_CLOCK_CORE))
{
cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
return -1;
}
cvmx_wait(50000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while (pciercx_cfg032.s.dlla == 0);
/* Clear all pending errors */
cvmx_write_csr(CVMX_PEXP_NPEI_INT_SUM, cvmx_read_csr(CVMX_PEXP_NPEI_INT_SUM));
/* Update the Replay Time Limit. Empirically, some PCIe devices take a
little longer to respond than expected under load. As a workaround for
this we configure the Replay Time Limit to the value expected for a 512
byte MPS instead of our actual 256 byte MPS. The numbers below are
directly from the PCIe spec table 3-4 */
pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
switch (pciercx_cfg032.s.nlw)
{
case 1: /* 1 lane */
pciercx_cfg448.s.rtl = 1677;
break;
case 2: /* 2 lanes */
pciercx_cfg448.s.rtl = 867;
break;
case 4: /* 4 lanes */
pciercx_cfg448.s.rtl = 462;
break;
case 8: /* 8 lanes */
pciercx_cfg448.s.rtl = 258;
break;
}
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
return 0;
}
static inline void __cvmx_increment_ba(cvmx_sli_mem_access_subidx_t *pmas)
{
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
pmas->cn68xx.ba++;
else
pmas->cn63xx.ba++;
}
/**
* Initialize a PCIe gen 1 port for use in host(RC) mode. It doesn't enumerate
* the bus.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
static int __cvmx_pcie_rc_initialize_gen1(int pcie_port)
{
int i;
int base;
uint64_t addr_swizzle;
cvmx_ciu_soft_prst_t ciu_soft_prst;
cvmx_pescx_bist_status_t pescx_bist_status;
cvmx_pescx_bist_status2_t pescx_bist_status2;
cvmx_npei_ctl_status_t npei_ctl_status;
cvmx_npei_mem_access_ctl_t npei_mem_access_ctl;
cvmx_npei_mem_access_subidx_t mem_access_subid;
cvmx_npei_dbg_data_t npei_dbg_data;
cvmx_pescx_ctl_status2_t pescx_ctl_status2;
cvmx_pciercx_cfg032_t pciercx_cfg032;
cvmx_npei_bar1_indexx_t bar1_index;
retry:
/* Make sure we aren't trying to setup a target mode interface in host mode */
npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
if ((pcie_port==0) && !npei_ctl_status.s.host_mode)
{
cvmx_dprintf("PCIe: Port %d in endpoint mode\n", pcie_port);
return -1;
}
/* Make sure a CN52XX isn't trying to bring up port 1 when it is disabled */
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
{
npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
if ((pcie_port==1) && npei_dbg_data.cn52xx.qlm0_link_width)
{
cvmx_dprintf("PCIe: ERROR: cvmx_pcie_rc_initialize() called on port1, but port1 is disabled\n");
return -1;
}
}
/* Make sure a CN56XX pass 1 isn't trying to do anything; errata for PASS 1 */
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) {
cvmx_dprintf ("PCIe port %d: CN56XX_PASS_1, skipping\n", pcie_port);
return -1;
}
/* PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */
npei_ctl_status.s.arb = 1;
/* Allow up to 0x20 config retries */
npei_ctl_status.s.cfg_rtry = 0x20;
/* CN52XX pass1.x has an errata where P0_NTAGS and P1_NTAGS don't reset */
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
{
npei_ctl_status.s.p0_ntags = 0x20;
npei_ctl_status.s.p1_ntags = 0x20;
}
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS, npei_ctl_status.u64);
/* Bring the PCIe out of reset */
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200)
{
/* The EBH5200 board swapped the PCIe reset lines on the board. As a
workaround for this bug, we bring both PCIe ports out of reset at
the same time instead of on separate calls. So for port 0, we bring
both out of reset and do nothing on port 1 */
if (pcie_port == 0)
{
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/* After a chip reset the PCIe will also be in reset. If it isn't,
most likely someone is trying to init it again without a proper
PCIe reset */
if (ciu_soft_prst.s.soft_prst == 0)
{
/* Reset the ports */
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
cvmx_wait_usec(2000);
}
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
}
else
{
/* The normal case: The PCIe ports are completely separate and can be
brought out of reset independently */
if (pcie_port)
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
else
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/* After a chip reset the PCIe will also be in reset. If it isn't,
most likely someone is trying to init it again without a proper
PCIe reset */
if (ciu_soft_prst.s.soft_prst == 0)
{
/* Reset the port */
ciu_soft_prst.s.soft_prst = 1;
if (pcie_port)
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
else
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
cvmx_wait_usec(2000);
}
if (pcie_port)
{
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
}
else
{
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
}
/* Wait for PCIe reset to complete. Due to errata PCIE-700, we don't poll
PESCX_CTL_STATUS2[PCIERST], but simply wait a fixed number of cycles */
cvmx_wait(400000);
/* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of CN56XX and
CN52XX, so we only probe it on newer chips */
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
{
/* Clear PCLK_RUN so we can check if the clock is running */
pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
pescx_ctl_status2.s.pclk_run = 1;
cvmx_write_csr(CVMX_PESCX_CTL_STATUS2(pcie_port), pescx_ctl_status2.u64);
/* Now that we cleared PCLK_RUN, wait for it to be set again telling
us the clock is running */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CTL_STATUS2(pcie_port),
cvmx_pescx_ctl_status2_t, pclk_run, ==, 1, 10000))
{
cvmx_dprintf("PCIe: Port %d isn't clocked, skipping.\n", pcie_port);
return -1;
}
}
/* Check and make sure PCIe came out of reset. If it doesn't the board
probably hasn't wired the clocks up and the interface should be
skipped */
pescx_ctl_status2.u64 = cvmx_read_csr(CVMX_PESCX_CTL_STATUS2(pcie_port));
if (pescx_ctl_status2.s.pcierst)
{
cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
return -1;
}
/* Check BIST2 status. If any bits are set skip this interface. This
is an attempt to catch PCIE-813 on pass 1 parts */
pescx_bist_status2.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS2(pcie_port));
if (pescx_bist_status2.u64)
{
cvmx_dprintf("PCIe: Port %d BIST2 failed. Most likely this port isn't hooked up, skipping.\n", pcie_port);
return -1;
}
/* Check BIST status */
pescx_bist_status.u64 = cvmx_read_csr(CVMX_PESCX_BIST_STATUS(pcie_port));
if (pescx_bist_status.u64)
cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pescx_bist_status.u64));
/* Initialize the config space CSRs */
__cvmx_pcie_rc_initialize_config_space(pcie_port);
/* Bring the link up */
if (__cvmx_pcie_rc_initialize_link_gen1(pcie_port))
{
cvmx_dprintf("PCIe: Failed to initialize port %d, probably the slot is empty\n", pcie_port);
return -1;
}
/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
/* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
{
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
mem_access_subid.s.ba += 1; /* Set each SUBID to extend the addressable range */
}
/* Disable the peer to peer forwarding register. This must be setup
by the OS after it enumerates the bus and assigns addresses to the
PCIe busses */
for (i=0; i<4; i++)
{
cvmx_write_csr(CVMX_PESCX_P2P_BARX_START(i, pcie_port), -1);
cvmx_write_csr(CVMX_PESCX_P2P_BARX_END(i, pcie_port), -1);
}
/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
cvmx_write_csr(CVMX_PESCX_P2N_BAR0_START(pcie_port), 0);
/* BAR1 follows BAR2 with a gap so it has the same address as for gen2. */
cvmx_write_csr(CVMX_PESCX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
bar1_index.u32 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
base = pcie_port ? 16 : 0;
/* Big endian swizzle for 32-bit PEXP_NCB register. */
#ifdef __MIPSEB__
addr_swizzle = 4;
#else
addr_swizzle = 0;
#endif
for (i = 0; i < 16; i++) {
cvmx_write64_uint32((CVMX_PEXP_NPEI_BAR1_INDEXX(base) ^ addr_swizzle), bar1_index.u32);
base++;
/* 256MB / 16 >> 22 == 4 */
bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
}
/* Set Octeon's BAR2 to decode 0-2^39. Bar0 and Bar1 take precedence
where they overlap. It also overlaps with the device addresses, so
make sure the peer to peer forwarding is set right */
cvmx_write_csr(CVMX_PESCX_P2N_BAR2_START(pcie_port), 0);
/* Setup BAR2 attributes */
/* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
- /* PTLP_RO,CTLP_RO should normally be set (except for debug). */
- /* WAIT_COM=0 will likely work for all applications. */
+ /* - PTLP_RO,CTLP_RO should normally be set (except for debug). */
+ /* - WAIT_COM=0 will likely work for all applications. */
/* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
if (pcie_port)
{
cvmx_npei_ctl_port1_t npei_ctl_port;
npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT1);
npei_ctl_port.s.bar2_enb = 1;
npei_ctl_port.s.bar2_esx = 1;
npei_ctl_port.s.bar2_cax = 0;
npei_ctl_port.s.ptlp_ro = 1;
npei_ctl_port.s.ctlp_ro = 1;
npei_ctl_port.s.wait_com = 0;
npei_ctl_port.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT1, npei_ctl_port.u64);
}
else
{
cvmx_npei_ctl_port0_t npei_ctl_port;
npei_ctl_port.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_PORT0);
npei_ctl_port.s.bar2_enb = 1;
npei_ctl_port.s.bar2_esx = 1;
npei_ctl_port.s.bar2_cax = 0;
npei_ctl_port.s.ptlp_ro = 1;
npei_ctl_port.s.ctlp_ro = 1;
npei_ctl_port.s.wait_com = 0;
npei_ctl_port.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_PORT0, npei_ctl_port.u64);
}
/* Both pass 1 and pass 2 of CN52XX and CN56XX have an errata that causes
TLP ordering to not be preserved after multiple PCIe port resets. This
code detects this fault and corrects it by aligning the TLP counters
properly. Another link reset is then performed. See PCIE-13340 */
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
{
cvmx_npei_dbg_data_t dbg_data;
int old_in_fif_p_count;
int in_fif_p_count;
int out_p_count;
int in_p_offset = (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)) ? 4 : 1;
int i;
/* Choose a write address of 1MB. It should be harmless as all bars
haven't been setup */
uint64_t write_address = (cvmx_pcie_get_mem_base_address(pcie_port) + 0x100000) | (1ull<<63);
/* Make sure at least in_p_offset have been executed before we try and
read in_fif_p_count */
i = in_p_offset;
while (i--)
{
cvmx_write64_uint32(write_address, 0);
cvmx_wait(10000);
}
/* Read the IN_FIF_P_COUNT from the debug select. IN_FIF_P_COUNT can be
unstable sometimes so read it twice with a write between the reads.
This way we can tell the value is good as it will increment by one
due to the write */
cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd7fc : 0xcffc);
cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
do
{
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
old_in_fif_p_count = dbg_data.s.data & 0xff;
cvmx_write64_uint32(write_address, 0);
cvmx_wait(10000);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
in_fif_p_count = dbg_data.s.data & 0xff;
} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
/* Update in_fif_p_count for it's offset with respect to out_p_count */
in_fif_p_count = (in_fif_p_count + in_p_offset) & 0xff;
/* Read the OUT_P_COUNT from the debug select */
cvmx_write_csr(CVMX_PEXP_NPEI_DBG_SELECT, (pcie_port) ? 0xd00f : 0xc80f);
cvmx_read_csr(CVMX_PEXP_NPEI_DBG_SELECT);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
out_p_count = (dbg_data.s.data>>1) & 0xff;
/* Check that the two counters are aligned */
if (out_p_count != in_fif_p_count)
{
cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
while (in_fif_p_count != 0)
{
cvmx_write64_uint32(write_address, 0);
cvmx_wait(10000);
in_fif_p_count = (in_fif_p_count + 1) & 0xff;
}
/* The EBH5200 board swapped the PCIe reset lines on the board. This
means we must bring both links down and up, which will cause the
PCIe0 to need alignment again. Lots of messages will be displayed,
but everything should work */
if ((cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBH5200) &&
(pcie_port == 1))
cvmx_pcie_rc_initialize(0);
/* Rety bringing this port up */
goto retry;
}
}
/* Display the link status */
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
cvmx_dprintf("PCIe: Port %d link active, %d lanes\n", pcie_port, pciercx_cfg032.s.nlw);
return 0;
}
/**
* @INTERNAL
* Initialize a host mode PCIe gen 2 link. This function takes a PCIe
* port from reset to a link up state. Software can then begin
* configuring the rest of the link.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
static int __cvmx_pcie_rc_initialize_link_gen2(int pcie_port)
{
uint64_t start_cycle;
cvmx_pemx_ctl_status_t pem_ctl_status;
cvmx_pciercx_cfg032_t pciercx_cfg032;
cvmx_pciercx_cfg448_t pciercx_cfg448;
/* Bring up the link */
pem_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
pem_ctl_status.s.lnk_enb = 1;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pem_ctl_status.u64);
/* Wait for the link to come up */
start_cycle = cvmx_get_cycle();
do
{
if (cvmx_get_cycle() - start_cycle > cvmx_clock_get_rate(CVMX_CLOCK_CORE))
return -1;
cvmx_wait(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));
/* Update the Replay Time Limit. Empirically, some PCIe devices take a
little longer to respond than expected under load. As a workaround for
this we configure the Replay Time Limit to the value expected for a 512
byte MPS instead of our actual 256 byte MPS. The numbers below are
directly from the PCIe spec table 3-4 */
pciercx_cfg448.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG448(pcie_port));
switch (pciercx_cfg032.s.nlw)
{
case 1: /* 1 lane */
pciercx_cfg448.s.rtl = 1677;
break;
case 2: /* 2 lanes */
pciercx_cfg448.s.rtl = 867;
break;
case 4: /* 4 lanes */
pciercx_cfg448.s.rtl = 462;
break;
case 8: /* 8 lanes */
pciercx_cfg448.s.rtl = 258;
break;
}
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG448(pcie_port), pciercx_cfg448.u32);
return 0;
}
/**
* Initialize a PCIe gen 2 port for use in host(RC) mode. It doesn't enumerate
* the bus.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
{
int i;
cvmx_ciu_soft_prst_t ciu_soft_prst;
cvmx_mio_rst_ctlx_t mio_rst_ctl;
cvmx_pemx_bar_ctl_t pemx_bar_ctl;
cvmx_pemx_ctl_status_t pemx_ctl_status;
cvmx_pemx_bist_status_t pemx_bist_status;
cvmx_pemx_bist_status2_t pemx_bist_status2;
cvmx_pciercx_cfg032_t pciercx_cfg032;
cvmx_pciercx_cfg515_t pciercx_cfg515;
cvmx_sli_ctl_portx_t sli_ctl_portx;
cvmx_sli_mem_access_ctl_t sli_mem_access_ctl;
cvmx_sli_mem_access_subidx_t mem_access_subid;
cvmx_pemx_bar1_indexx_t bar1_index;
int ep_mode;
/* Make sure this interface is PCIe */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
{
/* Requires reading the MIO_QLMX_CFG register to figure
out the port type. */
int qlm = pcie_port;
int status;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
qlm = 3 - (pcie_port * 2);
else if (OCTEON_IS_MODEL(OCTEON_CN61XX))
{
cvmx_mio_qlmx_cfg_t qlm_cfg;
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(1));
if (qlm_cfg.s.qlm_cfg == 1)
qlm = 1;
}
/* PCIe is allowed only in QLM1, 1 PCIe port in x2 or
2 PCIe ports in x1 */
else if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
qlm = 1;
status = cvmx_qlm_get_status(qlm);
if (status == 4 || status == 5)
{
cvmx_dprintf("PCIe: Port %d is SRIO, skipping.\n", pcie_port);
return -1;
}
if (status == 1)
{
cvmx_dprintf("PCIe: Port %d is SGMII, skipping.\n", pcie_port);
return -1;
}
if (status == 2)
{
cvmx_dprintf("PCIe: Port %d is XAUI, skipping.\n", pcie_port);
return -1;
}
if (status == -1)
{
cvmx_dprintf("PCIe: Port %d is unknown, skipping.\n", pcie_port);
return -1;
}
}
#if 0
/* This code is so that the PCIe analyzer is able to see 63XX traffic */
cvmx_dprintf("PCIE : init for pcie analyzer.\n");
cvmx_helper_qlm_jtag_init();
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 85);
cvmx_helper_qlm_jtag_shift(pcie_port, 1, 1);
cvmx_helper_qlm_jtag_shift_zeros(pcie_port, 300-86);
cvmx_helper_qlm_jtag_update(pcie_port);
#endif
/* Make sure we aren't trying to setup a target mode interface in host mode */
mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX || OCTEON_IS_MODEL(OCTEON_CNF71XX)) ? (mio_rst_ctl.s.prtmode != 1) : (!mio_rst_ctl.s.host_mode));
if (ep_mode)
{
cvmx_dprintf("PCIe: Port %d in endpoint mode.\n", pcie_port);
return -1;
}
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
{
if (pcie_port)
{
cvmx_ciu_qlm1_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
}
else
{
cvmx_ciu_qlm0_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
}
}
/* Bring the PCIe out of reset */
if (pcie_port)
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
else
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
/* After a chip reset the PCIe will also be in reset. If it isn't,
most likely someone is trying to init it again without a proper
PCIe reset */
if (ciu_soft_prst.s.soft_prst == 0)
{
/* Reset the port */
ciu_soft_prst.s.soft_prst = 1;
if (pcie_port)
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
else
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
/* Wait until pcie resets the ports. */
cvmx_wait_usec(2000);
}
if (pcie_port)
{
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
}
else
{
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 0;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
/* Wait for PCIe reset to complete */
cvmx_wait_usec(1000);
/* Check and make sure PCIe came out of reset. If it doesn't the board
probably hasn't wired the clocks up and the interface should be
skipped */
if (CVMX_WAIT_FOR_FIELD64(CVMX_MIO_RST_CTLX(pcie_port), cvmx_mio_rst_ctlx_t, rst_done, ==, 1, 10000))
{
cvmx_dprintf("PCIe: Port %d stuck in reset, skipping.\n", pcie_port);
return -1;
}
/* Check BIST status */
pemx_bist_status.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS(pcie_port));
if (pemx_bist_status.u64)
cvmx_dprintf("PCIe: BIST FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status.u64));
pemx_bist_status2.u64 = cvmx_read_csr(CVMX_PEMX_BIST_STATUS2(pcie_port));
/* Errata PCIE-14766 may cause the lower 6 bits to be randomly set on CN63XXp1 */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
pemx_bist_status2.u64 &= ~0x3full;
if (pemx_bist_status2.u64)
cvmx_dprintf("PCIe: BIST2 FAILED for port %d (0x%016llx)\n", pcie_port, CAST64(pemx_bist_status2.u64));
/* Initialize the config space CSRs */
__cvmx_pcie_rc_initialize_config_space(pcie_port);
/* Enable gen2 speed selection */
pciercx_cfg515.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG515(pcie_port));
pciercx_cfg515.s.dsc = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG515(pcie_port), pciercx_cfg515.u32);
/* Bring the link up */
if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
{
/* Some gen1 devices don't handle the gen 2 training correctly. Disable
gen2 and try again with only gen1 */
cvmx_pciercx_cfg031_t pciercx_cfg031;
pciercx_cfg031.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG031(pcie_port));
pciercx_cfg031.s.mls = 1;
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u32);
if (__cvmx_pcie_rc_initialize_link_gen2(pcie_port))
{
cvmx_dprintf("PCIe: Link timeout on port %d, probably the slot is empty\n", pcie_port);
return -1;
}
}
/* Store merge control (SLI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
sli_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL);
sli_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
sli_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_CTL, sli_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
/* PCIe Adddress Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
mem_access_subid.cn63xx.ba = 0;
/* Setup mem access 12-15 for port 0, 16-19 for port 1, supplying 36 bits of address space */
for (i=12 + pcie_port*4; i<16 + pcie_port*4; i++)
{
cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(i), mem_access_subid.u64);
/* Set each SUBID to extend the addressable range */
__cvmx_increment_ba(&mem_access_subid);
}
if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
{
/* Disable the peer to peer forwarding register. This must be setup
by the OS after it enumerates the bus and assigns addresses to the
PCIe busses */
for (i=0; i<4; i++)
{
cvmx_write_csr(CVMX_PEMX_P2P_BARX_START(i, pcie_port), -1);
cvmx_write_csr(CVMX_PEMX_P2P_BARX_END(i, pcie_port), -1);
}
}
/* Set Octeon's BAR0 to decode 0-16KB. It overlaps with Bar2 */
cvmx_write_csr(CVMX_PEMX_P2N_BAR0_START(pcie_port), 0);
/* Set Octeon's BAR2 to decode 0-2^41. Bar0 and Bar1 take precedence
where they overlap. It also overlaps with the device addresses, so
make sure the peer to peer forwarding is set right */
cvmx_write_csr(CVMX_PEMX_P2N_BAR2_START(pcie_port), 0);
/* Setup BAR2 attributes */
/* Relaxed Ordering (NPEI_CTL_PORTn[PTLP_RO,CTLP_RO, WAIT_COM]) */
- /* PTLP_RO,CTLP_RO should normally be set (except for debug). */
- /* WAIT_COM=0 will likely work for all applications. */
+ /* - PTLP_RO,CTLP_RO should normally be set (except for debug). */
+ /* - WAIT_COM=0 will likely work for all applications. */
/* Load completion relaxed ordering (NPEI_CTL_PORTn[WAITL_COM]) */
pemx_bar_ctl.u64 = cvmx_read_csr(CVMX_PEMX_BAR_CTL(pcie_port));
pemx_bar_ctl.s.bar1_siz = 3; /* 256MB BAR1*/
pemx_bar_ctl.s.bar2_enb = 1;
pemx_bar_ctl.s.bar2_esx = 1;
pemx_bar_ctl.s.bar2_cax = 0;
cvmx_write_csr(CVMX_PEMX_BAR_CTL(pcie_port), pemx_bar_ctl.u64);
sli_ctl_portx.u64 = cvmx_read_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port));
sli_ctl_portx.s.ptlp_ro = 1;
sli_ctl_portx.s.ctlp_ro = 1;
sli_ctl_portx.s.wait_com = 0;
sli_ctl_portx.s.waitl_com = 0;
cvmx_write_csr(CVMX_PEXP_SLI_CTL_PORTX(pcie_port), sli_ctl_portx.u64);
/* BAR1 follows BAR2 */
cvmx_write_csr(CVMX_PEMX_P2N_BAR1_START(pcie_port), CVMX_PCIE_BAR1_RC_BASE);
bar1_index.u64 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
for (i = 0; i < 16; i++) {
cvmx_write_csr(CVMX_PEMX_BAR1_INDEXX(i, pcie_port), bar1_index.u64);
/* 256MB / 16 >> 22 == 4 */
bar1_index.s.addr_idx += (((1ull << 28) / 16ull) >> 22);
}
/* Allow config retries for 250ms. Count is based off the 5Ghz SERDES
clock */
pemx_ctl_status.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(pcie_port));
pemx_ctl_status.s.cfg_rtry = 250 * 5000000 / 0x10000;
cvmx_write_csr(CVMX_PEMX_CTL_STATUS(pcie_port), pemx_ctl_status.u64);
/* Display the link status */
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
cvmx_dprintf("PCIe: Port %d link active, %d lanes, speed gen%d\n", pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
return 0;
}
/**
* Initialize a PCIe port for use in host(RC) mode. It doesn't enumerate the bus.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
int cvmx_pcie_rc_initialize(int pcie_port)
{
int result;
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
result = __cvmx_pcie_rc_initialize_gen1(pcie_port);
else
result = __cvmx_pcie_rc_initialize_gen2(pcie_port);
#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
if (result == 0)
cvmx_error_enable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
#endif
return result;
}
/**
* Shutdown a PCIe port and put it in reset
*
* @param pcie_port PCIe port to shutdown
*
* @return Zero on success
*/
int cvmx_pcie_rc_shutdown(int pcie_port)
{
#if (!defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)) || defined(CONFIG_CAVIUM_DECODE_RSL)
cvmx_error_disable_group(CVMX_ERROR_GROUP_PCI, pcie_port);
#endif
/* Wait for all pending operations to complete */
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
if (CVMX_WAIT_FOR_FIELD64(CVMX_PESCX_CPL_LUT_VALID(pcie_port), cvmx_pescx_cpl_lut_valid_t, tag, ==, 0, 2000))
cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
}
else
{
if (CVMX_WAIT_FOR_FIELD64(CVMX_PEMX_CPL_LUT_VALID(pcie_port), cvmx_pemx_cpl_lut_valid_t, tag, ==, 0, 2000))
cvmx_dprintf("PCIe: Port %d shutdown timeout\n", pcie_port);
}
/* Force reset */
if (pcie_port)
{
cvmx_ciu_soft_prst_t ciu_soft_prst;
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST1);
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST1, ciu_soft_prst.u64);
}
else
{
cvmx_ciu_soft_prst_t ciu_soft_prst;
ciu_soft_prst.u64 = cvmx_read_csr(CVMX_CIU_SOFT_PRST);
ciu_soft_prst.s.soft_prst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_PRST, ciu_soft_prst.u64);
}
return 0;
}
/**
* @INTERNAL
* Build a PCIe config space request address for a device
*
* @param pcie_port PCIe port to access
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return 64bit Octeon IO address
*/
static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus, int dev, int fn, int reg)
{
cvmx_pcie_address_t pcie_addr;
cvmx_pciercx_cfg006_t pciercx_cfg006;
pciercx_cfg006.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
return 0;
pcie_addr.u64 = 0;
pcie_addr.config.upper = 2;
pcie_addr.config.io = 1;
pcie_addr.config.did = 3;
pcie_addr.config.subdid = 1;
pcie_addr.config.es = 1;
pcie_addr.config.port = pcie_port;
pcie_addr.config.ty = (bus > pciercx_cfg006.s.pbnum);
pcie_addr.config.bus = bus;
pcie_addr.config.dev = dev;
pcie_addr.config.func = fn;
pcie_addr.config.reg = reg;
return pcie_addr.u64;
}
/**
* Read 8bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev, int fn, int reg)
{
uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return cvmx_read64_uint8(address);
else
return 0xff;
}
/**
* Read 16bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev, int fn, int reg)
{
uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return cvmx_le16_to_cpu(cvmx_read64_uint16(address));
else
return 0xffff;
}
/**
* Read 32bits from a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
*
* @return Result of the read
*/
uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev, int fn, int reg)
{
uint64_t address;
address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
return cvmx_le32_to_cpu(cvmx_read64_uint32(address));
else
return 0xffffffff;
}
/**
* Write 8bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
{
uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint8(address, val);
}
/**
* Write 16bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
{
uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint16(address, cvmx_cpu_to_le16(val));
}
/**
* Write 32bits to a Device's config space
*
* @param pcie_port PCIe port the device is on
* @param bus Sub bus
* @param dev Device ID
* @param fn Device sub function
* @param reg Register to access
* @param val Value to write
*/
void cvmx_pcie_config_write32(int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
{
uint64_t address = __cvmx_pcie_build_config_addr(pcie_port, bus, dev, fn, reg);
if (address)
cvmx_write64_uint32(address, cvmx_cpu_to_le32(val));
}
/**
* Read a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @param pcie_port PCIe port to read from
* @param cfg_offset Address to read
*
* @return Value read
*/
uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_pescx_cfg_rd_t pescx_cfg_rd;
pescx_cfg_rd.u64 = 0;
pescx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PESCX_CFG_RD(pcie_port), pescx_cfg_rd.u64);
pescx_cfg_rd.u64 = cvmx_read_csr(CVMX_PESCX_CFG_RD(pcie_port));
return pescx_cfg_rd.s.data;
}
else
{
cvmx_pemx_cfg_rd_t pemx_cfg_rd;
pemx_cfg_rd.u64 = 0;
pemx_cfg_rd.s.addr = cfg_offset;
cvmx_write_csr(CVMX_PEMX_CFG_RD(pcie_port), pemx_cfg_rd.u64);
pemx_cfg_rd.u64 = cvmx_read_csr(CVMX_PEMX_CFG_RD(pcie_port));
return pemx_cfg_rd.s.data;
}
}
/**
* Write a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
* @param pcie_port PCIe port to write to
* @param cfg_offset Address to write
* @param val Value to write
*/
void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset, uint32_t val)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_pescx_cfg_wr_t pescx_cfg_wr;
pescx_cfg_wr.u64 = 0;
pescx_cfg_wr.s.addr = cfg_offset;
pescx_cfg_wr.s.data = val;
cvmx_write_csr(CVMX_PESCX_CFG_WR(pcie_port), pescx_cfg_wr.u64);
}
else
{
cvmx_pemx_cfg_wr_t pemx_cfg_wr;
pemx_cfg_wr.u64 = 0;
pemx_cfg_wr.s.addr = cfg_offset;
pemx_cfg_wr.s.data = val;
cvmx_write_csr(CVMX_PEMX_CFG_WR(pcie_port), pemx_cfg_wr.u64);
}
}
/**
* Initialize a PCIe port for use in target(EP) mode.
*
* @param pcie_port PCIe port to initialize
*
* @return Zero on success
*/
int cvmx_pcie_ep_initialize(int pcie_port)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_ctl_status_t npei_ctl_status;
npei_ctl_status.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS);
if (npei_ctl_status.s.host_mode)
return -1;
}
else
{
cvmx_mio_rst_ctlx_t mio_rst_ctl;
int ep_mode;
mio_rst_ctl.u64 = cvmx_read_csr(CVMX_MIO_RST_CTLX(pcie_port));
ep_mode = (OCTEON_IS_MODEL(OCTEON_CN61XX) ? (mio_rst_ctl.s.prtmode != 0) : mio_rst_ctl.s.host_mode);
if (ep_mode)
return -1;
}
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
{
if (pcie_port)
{
cvmx_ciu_qlm1_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM1);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM1, ciu_qlm.u64);
}
else
{
cvmx_ciu_qlm0_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM0);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 5;
ciu_qlm.s.txmargin = 0x17;
cvmx_write_csr(CVMX_CIU_QLM0, ciu_qlm.u64);
}
}
/* Enable bus master and memory */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG001(pcie_port), 0x6);
/* Max Payload Size (PCIE*_CFG030[MPS]) */
/* Max Read Request Size (PCIE*_CFG030[MRRS]) */
/* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
/* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
{
cvmx_pcieepx_cfg030_t pcieepx_cfg030;
pcieepx_cfg030.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port));
if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
{
pcieepx_cfg030.s.mps = MPS_CN5XXX;
pcieepx_cfg030.s.mrrs = MRRS_CN5XXX;
}
else
{
pcieepx_cfg030.s.mps = MPS_CN6XXX;
pcieepx_cfg030.s.mrrs = MRRS_CN6XXX;
}
pcieepx_cfg030.s.ro_en = 1; /* Enable relaxed ordering. */
pcieepx_cfg030.s.ns_en = 1; /* Enable no snoop. */
pcieepx_cfg030.s.ce_en = 1; /* Correctable error reporting enable. */
pcieepx_cfg030.s.nfe_en = 1; /* Non-fatal error reporting enable. */
pcieepx_cfg030.s.fe_en = 1; /* Fatal error reporting enable. */
pcieepx_cfg030.s.ur_en = 1; /* Unsupported request reporting enable. */
cvmx_pcie_cfgx_write(pcie_port, CVMX_PCIEEPX_CFG030(pcie_port), pcieepx_cfg030.u32);
}
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
/* Max Payload Size (NPEI_CTL_STATUS2[MPS]) must match PCIE*_CFG030[MPS] */
/* Max Read Request Size (NPEI_CTL_STATUS2[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_npei_ctl_status2_t npei_ctl_status2;
npei_ctl_status2.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_CTL_STATUS2);
npei_ctl_status2.s.mps = MPS_CN5XXX; /* Max payload size = 128 bytes (Limit of most PCs) */
npei_ctl_status2.s.mrrs = MRRS_CN5XXX; /* Max read request size = 128 bytes for best Octeon DMA performance */
cvmx_write_csr(CVMX_PEXP_NPEI_CTL_STATUS2, npei_ctl_status2.u64);
}
else
{
/* Max Payload Size (DPI_SLI_PRTX_CFG[MPS]) must match PCIE*_CFG030[MPS] */
/* Max Read Request Size (DPI_SLI_PRTX_CFG[MRRS]) must not exceed PCIE*_CFG030[MRRS] */
cvmx_dpi_sli_prtx_cfg_t prt_cfg;
cvmx_sli_s2m_portx_ctl_t sli_s2m_portx_ctl;
prt_cfg.u64 = cvmx_read_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port));
prt_cfg.s.mps = MPS_CN6XXX;
prt_cfg.s.mrrs = MRRS_CN6XXX;
/* Max outstanding load request. */
prt_cfg.s.molr = 32;
cvmx_write_csr(CVMX_DPI_SLI_PRTX_CFG(pcie_port), prt_cfg.u64);
sli_s2m_portx_ctl.u64 = cvmx_read_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port));
sli_s2m_portx_ctl.s.mrrs = MRRS_CN6XXX;
cvmx_write_csr(CVMX_PEXP_SLI_S2M_PORTX_CTL(pcie_port), sli_s2m_portx_ctl.u64);
}
/* Setup Mem access SubDID 12 to access Host memory */
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_mem_access_subidx_t mem_access_subid;
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 1; /* Merging is not allowed in this window. */
mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
mem_access_subid.s.nsw = 0; /* Enable Snoop for Writes. */
mem_access_subid.s.ror = 0; /* Disable Relaxed Ordering for Reads. */
mem_access_subid.s.row = 0; /* Disable Relaxed Ordering for Writes. */
mem_access_subid.s.ba = 0; /* PCIe Adddress Bits <63:34>. */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_SUBIDX(12), mem_access_subid.u64);
}
else
{
cvmx_sli_mem_access_subidx_t mem_access_subid;
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
mem_access_subid.s.nmerge = 0; /* Merging is allowed in this window. */
mem_access_subid.s.esr = 0; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 0; /* Endian-swap for Writes. */
mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
/* PCIe Adddress Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
else
mem_access_subid.cn63xx.ba = 0;
cvmx_write_csr(CVMX_PEXP_SLI_MEM_ACCESS_SUBIDX(12 + pcie_port*4), mem_access_subid.u64);
}
return 0;
}
/**
* Wait for posted PCIe read/writes to reach the other side of
* the internal PCIe switch. This will insure that core
* read/writes are posted before anything after this function
* is called. This may be necessary when writing to memory that
* will later be read using the DMA/PKT engines.
*
* @param pcie_port PCIe port to wait for
*/
void cvmx_pcie_wait_for_pending(int pcie_port)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_data_out_cnt_t npei_data_out_cnt;
int a;
int b;
int c;
/* See section 9.8, PCIe Core-initiated Requests, in the manual for a
description of how this code works */
npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
if (pcie_port)
{
if (!npei_data_out_cnt.s.p1_fcnt)
return;
a = npei_data_out_cnt.s.p1_ucnt;
b = (a + npei_data_out_cnt.s.p1_fcnt-1) & 0xffff;
}
else
{
if (!npei_data_out_cnt.s.p0_fcnt)
return;
a = npei_data_out_cnt.s.p0_ucnt;
b = (a + npei_data_out_cnt.s.p0_fcnt-1) & 0xffff;
}
while (1)
{
npei_data_out_cnt.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DATA_OUT_CNT);
c = (pcie_port) ? npei_data_out_cnt.s.p1_ucnt : npei_data_out_cnt.s.p0_ucnt;
if (a<=b)
{
if ((cb))
return;
}
else
{
if ((c>b) && (cb))
return;
}
else
{
if ((c>b) && (c$Revision: 70030 $
*/
#ifndef __CVMX_RAID_H__
#define __CVMX_RAID_H__
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* This structure defines the type of command words the RAID block
* will accept.
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t reserved_37_63 : 27; /**< Must be zero */
uint64_t q_cmp : 1; /**< Indicates whether the Q pipe is in normal mode (CWORD[Q_CMP]=0) or in non-zero
byte detect mode (CWORD[Q_CMP]=1).
In non-zero byte detect mode, the Q OWORD[PTR] result is the non-zero detect
result, which indicates the position of the first non-zero byte in the pipe result bytes.
CWORD[Q_CMP] must not be set when CWORD[QOUT]=0, and must not be set
when CWORD[Q_XOR] is set. */
uint64_t p_cmp : 1; /**< Indicates whether the P pipe is in normal mode (CWORD[P_CMP]=0) or in non-zero
byte detect mode (CWORD[P_CMP]=1).
In non-zero byte detect mode, the P OWORD[PTR] result is the non-zero detect
result, which indicates the position of the first non-zero byte in the pipe result bytes.
CWORD[P_CMP] must not be set when CWORD[POUT]=0, and must not be set
when CWORD[P_XOR] is set. */
uint64_t q_xor : 1; /**< Indicates whether the Q output buffer bytes are the normal Q pipe result or the
normal Q pipe result exclusive-OR'ed with the P pipe result.
When CWORD[Q_XOR]=0 (and CWORD[Q_CMP]=0), the Q output buffer bytes are
the normal Q pipe result, which does not include the P pipe result in any way.
When CWORD[Q_XOR]=1, the Q output buffer bytes are the normal Q pipe result
exclusive-OR'ed with the P pipe result, as if the P pipe result were another Q IWORD
for the Q pipe with QMULT=1.
CWORD[Q_XOR] must not be set unless both CWORD[POUT,QOUT] are set, and
must not be set when CWORD[Q_CMP] is set. */
uint64_t p_xor : 1; /**< Indicates whether the P output buffer bytes are the normal P pipe result or the
normal P pipe result exclusive-OR'ed with the Q pipe result.
When CWORD[P_XOR]=0 (and CWORD[P_CMP]=0), the P output buffer bytes are
the normal P pipe result, which does not include the Q pipe result in any way.
When CWORD[P_XOR]=1, the P output buffer bytes are the normal P pipe result
exclusive-OR'ed with the Q pipe result, as if the Q pipe result were another P
IWORD for the P pipe.
CWORD[P_XOR] must not be set unless both CWORD[POUT,QOUT] are set, and
must not be set when CWORD[P_CMP] is set. */
uint64_t wqe : 1; /**< Indicates whether RAD submits a work queue entry or writes an L2/DRAM byte to
zero after completing the instruction.
When CWORD[WQE] is set and RESP[PTR]!=0, RAD adds the work queue entry
indicated by RESP[PTR] to the selected POW input queue after completing the
instruction.
When CWORD[WQE] is clear and RESP[PTR]!=0, RAD writes the L2/DRAM byte
indicated by RESP[PTR] to zero after completing the instruction. */
uint64_t qout : 1; /**< Indicates whether the Q pipe is used by this instruction.
If CWORD[QOUT] is set, IWORD[QEN] must be set for at least one IWORD.
At least one of CWORD[QOUT,POUT] must be set. */
uint64_t pout : 1; /**< Indicates whether the P pipe is used by this instruction.
If CWORD[POUT] is set, IWORD[PEN] must be set for at least one IWORD.
At least one of CWORD[QOUT,POUT] must be set. */
uint64_t iword : 6; /**< Indicates the number of input buffers used.
1 <= CWORD[IWORD] <= 32. */
uint64_t size : 24; /**< Indicates the size in bytes of all input buffers. When CWORD[Q_CMP,P_CMP]=0,
also indicates the size of the Q/P output buffers.
CWORD[SIZE] must be a multiple of 8B (i.e. <2:0> must be zero). */
} cword;
struct
{
uint64_t reserved_58_63 : 6; /**< Must be zero */
uint64_t fw : 1; /**< When set, indicates that RAD can modify any byte in any (128B) cache line touched
- by L2/DRAM addresses OWORD[PTR] through OWORD[PTR]+CWORD[SIZE]1.
+ by L2/DRAM addresses OWORD[PTR] through OWORD[PTR]+CWORD[SIZE]-1.
Setting OWORD[FW] can improve hardware performance, as some DRAM loads can
be avoided on L2 cache misses. The Q OWORD[FW] must not be set when
CWORD[Q_CMP] is set, and the P OWORD[FW] must not be set when
CWORD[P_CMP] is set. */
uint64_t nc : 1; /**< When set, indicates that RAD should not allocate L2 cache space for the P/Q data on
L2 cache misses.
OWORD[NC] should typically be clear, though setting OWORD[NC] can improve
performance in some circumstances, as the L2 cache will not be polluted by P/Q data.
The Q OWORD[NC] must not be set when CWORD[Q_CMP] is set, and the P
OWORD[NC] must not be set when CWORD[P_CMP] is set. */
uint64_t reserved_40_55 : 16; /**< Must be zero */
uint64_t addr : 40; /**< When CWORD[P_CMP,Q_CMP]=0, OWORD[PTR] indicates the starting address of
the L2/DRAM buffer that will receive the P/Q data. In the non-compare mode, the
output buffer receives all of the output buffer bytes.
When CWORD[P_CMP,Q_CMP]=1, the corresponding P/Q pipe is in compare mode,
and the only output of the pipe is the non-zero detect result. In this case,
OWORD[PTR] indicates the 8-byte location of the non-zero detect result. */
} oword;
struct
{
uint64_t reserved_57_63 : 7; /**< Must be zero */
uint64_t nc : 1; /**< When set, indicates that RAD should not allocate L2 cache space for this input buffer
data on L2 cache misses.
Setting IWORD[NC] may improve performance in some circumstances, as the L2
cache may not be polluted with input buffer data. */
uint64_t reserved_50_55 : 6; /**< Must be zero */
uint64_t qen : 1; /**< Indicates that this input buffer data should participate in the Q pipe result.
The Q pipe hardware multiplies each participating input byte by IWORD[QMULT]
before accumulating them by exclusive-OR'ing.
IWORD[QEN] must not be set when CWORD[QOUT] is not set.
If CWORD[QOUT] is set, IWORD[QEN] must be set for at least one IWORD. */
uint64_t pen : 1; /**< Indicates that this input buffer data should participate in the P pipe result.
The P pipe hardware accumulates each participating input byte by bit-wise
exclusive-OR'ing it.
IWORD[PEN] must not be set when CWORD[POUT] is not set.
If CWORD[POUT] is set, IWORD[PEN] must be set for at least one IWORD. */
uint64_t qmult : 8; /**< The Q pipe multiplier for the input buffer. Section 26.1 above describes the GF(28)
multiplication algorithm.
IWORD[QMULT] must be zero when IWORD[QEN] is not set.
IWORD[QMULT] must not be zero when IWORD[QEN] is set.
When IWORD[QMULT] is 1, the multiplication simplifies to the identity function,
and the Q pipe performs the same XOR function as the P pipe. */
uint64_t addr : 40; /**< The starting address of the input buffer in L2/DRAM.
IWORD[PTR] must be naturally-aligned on an 8 byte boundary (i.e. <2:0> must be
zero). */
} iword;
} cvmx_raid_word_t;
/**
* Initialize the RAID block
*
* @param polynomial Coefficients for the RAID polynomial
*
* @return Zero on success, negative on failure
*/
int cvmx_raid_initialize(cvmx_rad_reg_polynomial_t polynomial);
/**
* Shutdown the RAID block. RAID must be idle when
* this function is called.
*
* @return Zero on success, negative on failure
*/
int cvmx_raid_shutdown(void);
/**
* Submit a command to the RAID block
*
* @param num_words Number of command words to submit
* @param words Command words
*
* @return Zero on success, negative on failure
*/
int cvmx_raid_submit(int num_words, cvmx_raid_word_t words[]);
#ifdef __cplusplus
}
#endif
#endif // __CVMX_CMD_QUEUE_H__