Index: head/sys/contrib/ncsw/Peripherals/FM/HC/hc.c =================================================================== --- head/sys/contrib/ncsw/Peripherals/FM/HC/hc.c (revision 351321) +++ head/sys/contrib/ncsw/Peripherals/FM/HC/hc.c (revision 351322) @@ -1,1231 +1,1231 @@ /* * Copyright 2008-2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "std_ext.h" #include "error_ext.h" #include "sprint_ext.h" #include "string_ext.h" #include "fm_common.h" #include "fm_hc.h" /**************************************************************************//** @Description defaults *//***************************************************************************/ #define DEFAULT_dataMemId 0 #define HC_HCOR_OPCODE_PLCR_PRFL 0x0 #define HC_HCOR_OPCODE_KG_SCM 0x1 #define HC_HCOR_OPCODE_SYNC 0x2 #define HC_HCOR_OPCODE_CC 0x3 #define HC_HCOR_OPCODE_CC_AGE_MASK 0x4 #define HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT 0x5 #define HC_HCOR_OPCODE_CC_REASSM_TIMEOUT 0x10 #define HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION 0x11 #define HC_HCOR_OPCODE_CC_UPDATE_WITH_AGING 0x13 #define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT 24 #define HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT 24 #define HC_HCOR_EXTRA_REG_CC_AGING_ADD 0x80000000 #define HC_HCOR_EXTRA_REG_CC_AGING_REMOVE 0x40000000 #define HC_HCOR_EXTRA_REG_CC_AGING_CHANGE_MASK 0xC0000000 #define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_SHIFT 24 #define HC_HCOR_EXTRA_REG_CC_REMOVE_INDX_MASK 0x1F000000 #define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT 16 #define HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK 0xF #define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT 24 #define HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID 16 #define HC_HCOR_GBL 0x20000000 #define HC_HCOR_KG_SCHEME_COUNTER 0x00000400 #if (DPAA_VERSION == 10) #define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFF800 #else #define HC_HCOR_KG_SCHEME_REGS_MASK 0xFFFFFE00 #endif /* (DPAA_VERSION == 10) */ #define SIZE_OF_HC_FRAME_PORT_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdKgPortRegs)) #define SIZE_OF_HC_FRAME_SCHEME_REGS sizeof(t_HcFrame) #define SIZE_OF_HC_FRAME_PROFILES_REGS (sizeof(t_HcFrame)-sizeof(struct fman_kg_scheme_regs)+sizeof(t_FmPcdPlcrProfileRegs)) #define SIZE_OF_HC_FRAME_PROFILE_CNT (sizeof(t_HcFrame)-sizeof(t_FmPcdPlcrProfileRegs)+sizeof(uint32_t)) #define SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC 16 #define HC_CMD_POOL_SIZE (INTG_MAX_NUM_OF_CORES) #define BUILD_FD(len) \ do { \ memset(&fmFd, 0, sizeof(t_DpaaFD)); \ DPAA_FD_SET_ADDR(&fmFd, p_HcFrame); \ DPAA_FD_SET_OFFSET(&fmFd, 0); \ DPAA_FD_SET_LENGTH(&fmFd, len); \ } while (0) #if defined(__MWERKS__) && !defined(__GNUC__) #pragma pack(push,1) #endif /* defined(__MWERKS__) && ... */ typedef struct t_FmPcdKgPortRegs { volatile uint32_t spReg; volatile uint32_t cppReg; } t_FmPcdKgPortRegs; typedef struct t_HcFrame { volatile uint32_t opcode; volatile uint32_t actionReg; volatile uint32_t extraReg; volatile uint32_t commandSequence; union { struct fman_kg_scheme_regs schemeRegs; struct fman_kg_scheme_regs schemeRegsWithoutCounter; t_FmPcdPlcrProfileRegs profileRegs; volatile uint32_t singleRegForWrite; /* for writing SP, CPP, profile counter */ t_FmPcdKgPortRegs portRegsForRead; volatile uint32_t clsPlanEntries[CLS_PLAN_NUM_PER_GRP]; t_FmPcdCcCapwapReassmTimeoutParams ccCapwapReassmTimeout; t_FmPcdCcReassmTimeoutParams ccReassmTimeout; } hcSpecificData; } t_HcFrame; #if defined(__MWERKS__) && !defined(__GNUC__) #pragma pack(pop) #endif /* defined(__MWERKS__) && ... */ typedef struct t_FmHc { t_Handle h_FmPcd; t_Handle h_HcPortDev; t_FmPcdQmEnqueueCallback *f_QmEnqueue; /**< A callback for enqueuing frames to the QM */ t_Handle h_QmArg; /**< A handle to the QM module */ uint8_t dataMemId; /**< Memory partition ID for data buffers */ uint32_t seqNum[HC_CMD_POOL_SIZE]; /* FIFO of seqNum to use when taking buffer */ uint32_t nextSeqNumLocation; /* seqNum location in seqNum[] for next buffer */ volatile bool enqueued[HC_CMD_POOL_SIZE]; /* HC is active - frame is enqueued and not confirmed yet */ t_HcFrame *p_Frm[HC_CMD_POOL_SIZE]; } t_FmHc; static t_Error FillBufPool(t_FmHc *p_FmHc) { uint32_t i; ASSERT_COND(p_FmHc); for (i = 0; i < HC_CMD_POOL_SIZE; i++) { #ifdef FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart((sizeof(t_HcFrame) + (16 - (sizeof(t_FmHc) % 16))), p_FmHc->dataMemId, 16); #else p_FmHc->p_Frm[i] = (t_HcFrame *)XX_MallocSmart(sizeof(t_HcFrame), p_FmHc->dataMemId, 16); #endif /* FM_LOCKUP_ALIGNMENT_ERRATA_FMAN_SW004 */ if (!p_FmHc->p_Frm[i]) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FM HC frames!")); } /* Initialize FIFO of seqNum to use during GetBuf */ for (i = 0; i < HC_CMD_POOL_SIZE; i++) { p_FmHc->seqNum[i] = i; } p_FmHc->nextSeqNumLocation = 0; return E_OK; } static __inline__ t_HcFrame * GetBuf(t_FmHc *p_FmHc, uint32_t *p_SeqNum) { uint32_t intFlags; ASSERT_COND(p_FmHc); intFlags = FmPcdLock(p_FmHc->h_FmPcd); if (p_FmHc->nextSeqNumLocation == HC_CMD_POOL_SIZE) { /* No more buffers */ FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); return NULL; } *p_SeqNum = p_FmHc->seqNum[p_FmHc->nextSeqNumLocation]; p_FmHc->nextSeqNumLocation++; FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); return p_FmHc->p_Frm[*p_SeqNum]; } static __inline__ void PutBuf(t_FmHc *p_FmHc, t_HcFrame *p_Buf, uint32_t seqNum) { uint32_t intFlags; UNUSED(p_Buf); intFlags = FmPcdLock(p_FmHc->h_FmPcd); ASSERT_COND(p_FmHc->nextSeqNumLocation); p_FmHc->nextSeqNumLocation--; p_FmHc->seqNum[p_FmHc->nextSeqNumLocation] = seqNum; FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); } static __inline__ t_Error EnQFrm(t_FmHc *p_FmHc, t_DpaaFD *p_FmFd, uint32_t seqNum) { t_Error err = E_OK; uint32_t intFlags; uint32_t timeout=100; intFlags = FmPcdLock(p_FmHc->h_FmPcd); ASSERT_COND(!p_FmHc->enqueued[seqNum]); p_FmHc->enqueued[seqNum] = TRUE; FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); DBG(TRACE, ("Send Hc, SeqNum %d, buff@0x%x, fd offset 0x%x", seqNum, DPAA_FD_GET_ADDR(p_FmFd), DPAA_FD_GET_OFFSET(p_FmFd))); err = p_FmHc->f_QmEnqueue(p_FmHc->h_QmArg, (void *)p_FmFd); if (err) RETURN_ERROR(MINOR, err, ("HC enqueue failed")); while (p_FmHc->enqueued[seqNum] && --timeout) XX_UDelay(100); if (!timeout) RETURN_ERROR(MINOR, E_TIMEOUT, ("HC Callback, timeout exceeded")); return err; } t_Handle FmHcConfigAndInit(t_FmHcParams *p_FmHcParams) { t_FmHc *p_FmHc; t_FmPortParams fmPortParam; t_Error err; p_FmHc = (t_FmHc *)XX_Malloc(sizeof(t_FmHc)); if (!p_FmHc) { REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC obj")); return NULL; } memset(p_FmHc,0,sizeof(t_FmHc)); p_FmHc->h_FmPcd = p_FmHcParams->h_FmPcd; p_FmHc->f_QmEnqueue = p_FmHcParams->params.f_QmEnqueue; p_FmHc->h_QmArg = p_FmHcParams->params.h_QmArg; p_FmHc->dataMemId = DEFAULT_dataMemId; err = FillBufPool(p_FmHc); if (err != E_OK) { REPORT_ERROR(MAJOR, err, NO_MSG); FmHcFree(p_FmHc); return NULL; } if (!FmIsMaster(p_FmHcParams->h_Fm)) return (t_Handle)p_FmHc; memset(&fmPortParam, 0, sizeof(fmPortParam)); fmPortParam.baseAddr = p_FmHcParams->params.portBaseAddr; fmPortParam.portType = e_FM_PORT_TYPE_OH_HOST_COMMAND; fmPortParam.portId = p_FmHcParams->params.portId; fmPortParam.liodnBase = p_FmHcParams->params.liodnBase; fmPortParam.h_Fm = p_FmHcParams->h_Fm; fmPortParam.specificParams.nonRxParams.errFqid = p_FmHcParams->params.errFqid; fmPortParam.specificParams.nonRxParams.dfltFqid = p_FmHcParams->params.confFqid; fmPortParam.specificParams.nonRxParams.qmChannel = p_FmHcParams->params.qmChannel; p_FmHc->h_HcPortDev = FM_PORT_Config(&fmPortParam); if (!p_FmHc->h_HcPortDev) { REPORT_ERROR(MAJOR, E_INVALID_HANDLE, ("FM HC port!")); XX_Free(p_FmHc); return NULL; } err = FM_PORT_ConfigMaxFrameLength(p_FmHc->h_HcPortDev, (uint16_t)sizeof(t_HcFrame)); if (err != E_OK) { REPORT_ERROR(MAJOR, err, ("FM HC port init!")); FmHcFree(p_FmHc); return NULL; } /* final init */ err = FM_PORT_Init(p_FmHc->h_HcPortDev); if (err != E_OK) { REPORT_ERROR(MAJOR, err, ("FM HC port init!")); FmHcFree(p_FmHc); return NULL; } err = FM_PORT_Enable(p_FmHc->h_HcPortDev); if (err != E_OK) { REPORT_ERROR(MAJOR, err, ("FM HC port enable!")); FmHcFree(p_FmHc); return NULL; } return (t_Handle)p_FmHc; } void FmHcFree(t_Handle h_FmHc) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; int i; if (!p_FmHc) return; for (i=0; ip_Frm[i]) XX_FreeSmart(p_FmHc->p_Frm[i]); else break; if (p_FmHc->h_HcPortDev) FM_PORT_Free(p_FmHc->h_HcPortDev); XX_Free(p_FmHc); } /*****************************************************************************/ t_Error FmHcSetFramesDataMemory(t_Handle h_FmHc, uint8_t memId) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; int i; SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE); p_FmHc->dataMemId = memId; for (i=0; ip_Frm[i]) XX_FreeSmart(p_FmHc->p_Frm[i]); return FillBufPool(p_FmHc); } void FmHcTxConf(t_Handle h_FmHc, t_DpaaFD *p_Fd) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; uint32_t intFlags; ASSERT_COND(p_FmHc); intFlags = FmPcdLock(p_FmHc->h_FmPcd); p_HcFrame = (t_HcFrame *)PTR_MOVE(DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd)); DBG(TRACE, ("Hc Conf, SeqNum %d, FD@0x%x, fd offset 0x%x", p_HcFrame->commandSequence, DPAA_FD_GET_ADDR(p_Fd), DPAA_FD_GET_OFFSET(p_Fd))); if (!(p_FmHc->enqueued[p_HcFrame->commandSequence])) REPORT_ERROR(MINOR, E_INVALID_FRAME, ("Not an Host-Command frame received!")); else p_FmHc->enqueued[p_HcFrame->commandSequence] = FALSE; FmPcdUnlock(p_FmHc->h_FmPcd, intFlags); } t_Error FmHcPcdKgSetScheme(t_Handle h_FmHc, t_Handle h_Scheme, struct fman_kg_scheme_regs *p_SchemeRegs, bool updateCounter) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint8_t physicalSchemeId; uint32_t seqNum; p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, updateCounter); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; memcpy(&p_HcFrame->hcSpecificData.schemeRegs, p_SchemeRegs, sizeof(struct fman_kg_scheme_regs)); if (!updateCounter) { p_HcFrame->hcSpecificData.schemeRegs.kgse_dv0 = p_SchemeRegs->kgse_dv0; p_HcFrame->hcSpecificData.schemeRegs.kgse_dv1 = p_SchemeRegs->kgse_dv1; p_HcFrame->hcSpecificData.schemeRegs.kgse_ccbs = p_SchemeRegs->kgse_ccbs; p_HcFrame->hcSpecificData.schemeRegs.kgse_mv = p_SchemeRegs->kgse_mv; } p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdKgDeleteScheme(t_Handle h_FmHc, t_Handle h_Scheme) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme); uint32_t seqNum; p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; memset(&p_HcFrame->hcSpecificData.schemeRegs, 0, sizeof(struct fman_kg_scheme_regs)); p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdKgCcGetSetParams(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t requiredAction, uint32_t value) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint8_t relativeSchemeId; uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme); uint32_t tmpReg32 = 0; uint32_t seqNum; /* Scheme is locked by calling routine */ /* WARNING - this lock will not be efficient if other HC routine will attempt to change * "kgse_mode" or "kgse_om" without locking scheme ! */ relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); if (!FmPcdKgGetRequiredActionFlag(p_FmHc->h_FmPcd, relativeSchemeId) || !(FmPcdKgGetRequiredAction(p_FmHc->h_FmPcd, relativeSchemeId) & requiredAction)) { if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_PLCR)) { if ((FmPcdKgIsDirectPlcr(p_FmHc->h_FmPcd, relativeSchemeId) == FALSE) || (FmPcdKgIsDistrOnPlcrProfile(p_FmHc->h_FmPcd, relativeSchemeId) == TRUE)) RETURN_ERROR(MAJOR, E_NOT_SUPPORTED, ("In this situation PP can not be with distribution and has to be shared")); err = FmPcdPlcrCcGetSetParams(p_FmHc->h_FmPcd, FmPcdKgGetRelativeProfileId(p_FmHc->h_FmPcd, relativeSchemeId), requiredAction); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } else /* From here we deal with KG-Schemes only */ { /* Pre change general code */ p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } /* specific change */ if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && ((FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_DONE) && (FmPcdKgGetDoneAction(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_ENQ_FRAME))) { tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode; ASSERT_COND(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME)); p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; } if ((requiredAction & UPDATE_KG_NIA_CC_WA) && (FmPcdKgGetNextEngine(p_FmHc->h_FmPcd, relativeSchemeId) == e_FM_PCD_CC)) { tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode; ASSERT_COND(tmpReg32 & (NIA_ENG_FM_CTL | NIA_FM_CTL_AC_CC)); tmpReg32 &= ~NIA_FM_CTL_AC_CC; p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32 | NIA_FM_CTL_AC_PRE_CC; } if (requiredAction & UPDATE_KG_OPT_MODE) p_HcFrame->hcSpecificData.schemeRegs.kgse_om = value; if (requiredAction & UPDATE_KG_NIA) { tmpReg32 = p_HcFrame->hcSpecificData.schemeRegs.kgse_mode; tmpReg32 &= ~(NIA_ENG_MASK | NIA_AC_MASK); tmpReg32 |= value; p_HcFrame->hcSpecificData.schemeRegs.kgse_mode = tmpReg32; } /* Post change general code */ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, FALSE); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); } } return E_OK; } uint32_t FmHcPcdKgGetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint32_t retVal; uint8_t relativeSchemeId; uint8_t physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme); uint32_t seqNum; relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) { REPORT_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); return 0; } /* first read scheme and check that it is valid */ p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) { REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); return 0; } memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildReadSchemeActionReg(physicalSchemeId); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); err = EnQFrm(p_FmHc, &fmFd, seqNum); if (err != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); REPORT_ERROR(MINOR, err, NO_MSG); return 0; } if (!FmPcdKgHwSchemeIsValid(p_HcFrame->hcSpecificData.schemeRegs.kgse_mode)) { PutBuf(p_FmHc, p_HcFrame, seqNum); REPORT_ERROR(MAJOR, E_ALREADY_EXISTS, ("Scheme is invalid")); return 0; } retVal = p_HcFrame->hcSpecificData.schemeRegs.kgse_spc; PutBuf(p_FmHc, p_HcFrame, seqNum); return retVal; } t_Error FmHcPcdKgSetSchemeCounter(t_Handle h_FmHc, t_Handle h_Scheme, uint32_t value) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint8_t relativeSchemeId, physicalSchemeId; uint32_t seqNum; physicalSchemeId = FmPcdKgGetSchemeId(h_Scheme); relativeSchemeId = FmPcdKgGetRelativeSchemeId(p_FmHc->h_FmPcd, physicalSchemeId); if ( relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); /* first read scheme and check that it is valid */ p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWriteSchemeActionReg(physicalSchemeId, TRUE); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_COUNTER; /* write counter */ p_HcFrame->hcSpecificData.singleRegForWrite = value; p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); return err; } t_Error FmHcPcdKgSetClsPlan(t_Handle h_FmHc, t_FmPcdKgInterModuleClsPlanSet *p_Set) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint8_t i, idx; uint32_t seqNum; t_Error err = E_OK; ASSERT_COND(p_FmHc); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); for (i = p_Set->baseEntry; i < (p_Set->baseEntry+p_Set->numOfClsPlanEntries); i+=8) { memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWriteClsPlanBlockActionReg((uint8_t)(i / CLS_PLAN_NUM_PER_GRP)); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; idx = (uint8_t)(i - p_Set->baseEntry); - memcpy(&p_HcFrame->hcSpecificData.clsPlanEntries, &p_Set->vectors[idx], CLS_PLAN_NUM_PER_GRP*sizeof(uint32_t)); + memcpy(__DEVOLATILE(uint32_t *, &p_HcFrame->hcSpecificData.clsPlanEntries), &p_Set->vectors[idx], CLS_PLAN_NUM_PER_GRP*sizeof(uint32_t)); p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } } PutBuf(p_FmHc, p_HcFrame, seqNum); return err; } t_Error FmHcPcdKgDeleteClsPlan(t_Handle h_FmHc, uint8_t grpId) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_FmPcdKgInterModuleClsPlanSet *p_ClsPlanSet; p_ClsPlanSet = (t_FmPcdKgInterModuleClsPlanSet *)XX_Malloc(sizeof(t_FmPcdKgInterModuleClsPlanSet)); if (!p_ClsPlanSet) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Classification plan set")); memset(p_ClsPlanSet, 0, sizeof(t_FmPcdKgInterModuleClsPlanSet)); p_ClsPlanSet->baseEntry = FmPcdKgGetClsPlanGrpBase(p_FmHc->h_FmPcd, grpId); p_ClsPlanSet->numOfClsPlanEntries = FmPcdKgGetClsPlanGrpSize(p_FmHc->h_FmPcd, grpId); ASSERT_COND(p_ClsPlanSet->numOfClsPlanEntries <= FM_PCD_MAX_NUM_OF_CLS_PLANS); if (FmHcPcdKgSetClsPlan(p_FmHc, p_ClsPlanSet) != E_OK) { XX_Free(p_ClsPlanSet); RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); } XX_Free(p_ClsPlanSet); FmPcdKgDestroyClsPlanGrp(p_FmHc->h_FmPcd, grpId); return E_OK; } t_Error FmHcPcdCcCapwapTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcCapwapReassmTimeoutParams *p_CcCapwapReassmTimeoutParams ) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err; uint32_t seqNum; SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_CAPWAP_REASSM_TIMEOUT); memcpy(&p_HcFrame->hcSpecificData.ccCapwapReassmTimeout, p_CcCapwapReassmTimeoutParams, sizeof(t_FmPcdCcCapwapReassmTimeoutParams)); p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); return err; } t_Error FmHcPcdCcIpFragScratchPollCmd(t_Handle h_FmHc, bool fill, t_FmPcdCcFragScratchPoolCmdParams *p_FmPcdCcFragScratchPoolCmdParams) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err; uint32_t seqNum; SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_IP_FRAG_INITIALIZATION); p_HcFrame->actionReg = (uint32_t)(((fill == TRUE) ? 0 : 1) << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_CMD_SHIFT); p_HcFrame->actionReg |= p_FmPcdCcFragScratchPoolCmdParams->bufferPoolId << HC_HCOR_ACTION_REG_IP_FRAG_SCRATCH_POOL_BPID; if (fill == TRUE) { p_HcFrame->extraReg = p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers; } p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } p_FmPcdCcFragScratchPoolCmdParams->numOfBuffers = p_HcFrame->extraReg; PutBuf(p_FmHc, p_HcFrame, seqNum); return E_OK; } t_Error FmHcPcdCcTimeoutReassm(t_Handle h_FmHc, t_FmPcdCcReassmTimeoutParams *p_CcReassmTimeoutParams, uint8_t *p_Result) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err; uint32_t seqNum; SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC_REASSM_TIMEOUT); p_HcFrame->actionReg = (uint32_t)((p_CcReassmTimeoutParams->activate ? 0 : 1) << HC_HCOR_ACTION_REG_REASSM_TIMEOUT_ACTIVE_SHIFT); p_HcFrame->extraReg = (p_CcReassmTimeoutParams->tsbs << HC_HCOR_EXTRA_REG_REASSM_TIMEOUT_TSBS_SHIFT) | p_CcReassmTimeoutParams->iprcpt; p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } *p_Result = (uint8_t) ((p_HcFrame->actionReg >> HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_SHIFT) & HC_HCOR_ACTION_REG_REASSM_TIMEOUT_RES_MASK); PutBuf(p_FmHc, p_HcFrame, seqNum); return E_OK; } t_Error FmHcPcdPlcrCcGetSetParams(t_Handle h_FmHc,uint16_t absoluteProfileId, uint32_t requiredAction) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err; uint32_t tmpReg32 = 0; uint32_t requiredActionTmp, requiredActionFlag; uint32_t seqNum; SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); /* Profile is locked by calling routine */ /* WARNING - this lock will not be efficient if other HC routine will attempt to change * "fmpl_pegnia" "fmpl_peynia" or "fmpl_pernia" without locking Profile ! */ requiredActionTmp = FmPcdPlcrGetRequiredAction(p_FmHc->h_FmPcd, absoluteProfileId); requiredActionFlag = FmPcdPlcrGetRequiredActionFlag(p_FmHc->h_FmPcd, absoluteProfileId); if (!requiredActionFlag || !(requiredActionTmp & requiredAction)) { if (requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) { p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); /* first read scheme and check that it is valid */ memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); p_HcFrame->extraReg = 0x00008000; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegnia; if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); } tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(TRUE, FALSE, FALSE); p_HcFrame->extraReg = 0x00008000; p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_peynia; if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); } tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, TRUE, FALSE); p_HcFrame->extraReg = 0x00008000; p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } tmpReg32 = p_HcFrame->hcSpecificData.profileRegs.fmpl_pernia; if (!(tmpReg32 & (NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME))) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine of this policer profile has to be assigned to FM_PCD_DONE")); } tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); p_HcFrame->actionReg |= FmPcdPlcrBuildNiaProfileReg(FALSE, FALSE, TRUE); p_HcFrame->extraReg = 0x00008000; p_HcFrame->hcSpecificData.singleRegForWrite = tmpReg32; BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } PutBuf(p_FmHc, p_HcFrame, seqNum); } } return E_OK; } t_Error FmHcPcdPlcrSetProfile(t_Handle h_FmHc, t_Handle h_Profile, t_FmPcdPlcrProfileRegs *p_PlcrRegs) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_Error err = E_OK; uint16_t profileIndx; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint32_t seqNum; p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); profileIndx = FmPcdPlcrProfileGetAbsoluteId(h_Profile); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionRegs(profileIndx); p_HcFrame->extraReg = 0x00008000; memcpy(&p_HcFrame->hcSpecificData.profileRegs, p_PlcrRegs, sizeof(t_FmPcdPlcrProfileRegs)); p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdPlcrDeleteProfile(t_Handle h_FmHc, t_Handle h_Profile) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile); t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint32_t seqNum; p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); p_HcFrame->actionReg |= 0x00008000; p_HcFrame->extraReg = 0x00008000; memset(&p_HcFrame->hcSpecificData.profileRegs, 0, sizeof(t_FmPcdPlcrProfileRegs)); p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdPlcrSetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter, uint32_t value) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile); t_Error err = E_OK; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint32_t seqNum; /* first read scheme and check that it is valid */ p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildWritePlcrActionReg(absoluteProfileId); p_HcFrame->actionReg |= FmPcdPlcrBuildCounterProfileReg(counter); p_HcFrame->extraReg = 0x00008000; p_HcFrame->hcSpecificData.singleRegForWrite = value; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_PROFILE_CNT); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } uint32_t FmHcPcdPlcrGetProfileCounter(t_Handle h_FmHc, t_Handle h_Profile, e_FmPcdPlcrProfileCounters counter) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; uint16_t absoluteProfileId = FmPcdPlcrProfileGetAbsoluteId(h_Profile); t_Error err; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; uint32_t retVal = 0; uint32_t seqNum; SANITY_CHECK_RETURN_VALUE(h_FmHc, E_INVALID_HANDLE,0); /* first read scheme and check that it is valid */ p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) { REPORT_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); return 0; } memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_PLCR_PRFL); p_HcFrame->actionReg = FmPcdPlcrBuildReadPlcrActionReg(absoluteProfileId); p_HcFrame->extraReg = 0x00008000; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); err = EnQFrm(p_FmHc, &fmFd, seqNum); if (err != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); REPORT_ERROR(MINOR, err, NO_MSG); return 0; } switch (counter) { case e_FM_PCD_PLCR_PROFILE_GREEN_PACKET_TOTAL_COUNTER: retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_pegpc; break; case e_FM_PCD_PLCR_PROFILE_YELLOW_PACKET_TOTAL_COUNTER: retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_peypc; break; case e_FM_PCD_PLCR_PROFILE_RED_PACKET_TOTAL_COUNTER: retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perpc; break; case e_FM_PCD_PLCR_PROFILE_RECOLOURED_YELLOW_PACKET_TOTAL_COUNTER: retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perypc; break; case e_FM_PCD_PLCR_PROFILE_RECOLOURED_RED_PACKET_TOTAL_COUNTER: retVal = p_HcFrame->hcSpecificData.profileRegs.fmpl_perrpc; break; default: REPORT_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); } PutBuf(p_FmHc, p_HcFrame, seqNum); return retVal; } t_Error FmHcKgWriteSp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t spReg, bool add) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err = E_OK; uint32_t seqNum; ASSERT_COND(p_FmHc); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); /* first read SP register */ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildReadPortSchemeBindActionReg(hardwarePortId); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_PORT_REGS); if ((err = EnQFrm(p_FmHc, &fmFd, seqNum)) != E_OK) { PutBuf(p_FmHc, p_HcFrame, seqNum); RETURN_ERROR(MINOR, err, NO_MSG); } /* spReg is the first reg, so we can use it both for read and for write */ if (add) p_HcFrame->hcSpecificData.portRegsForRead.spReg |= spReg; else p_HcFrame->hcSpecificData.portRegsForRead.spReg &= ~spReg; p_HcFrame->actionReg = FmPcdKgBuildWritePortSchemeBindActionReg(hardwarePortId); BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcKgWriteCpp(t_Handle h_FmHc, uint8_t hardwarePortId, uint32_t cppReg) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err = E_OK; uint32_t seqNum; ASSERT_COND(p_FmHc); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); /* first read SP register */ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_KG_SCM); p_HcFrame->actionReg = FmPcdKgBuildWritePortClsPlanBindActionReg(hardwarePortId); p_HcFrame->extraReg = HC_HCOR_KG_SCHEME_REGS_MASK; p_HcFrame->hcSpecificData.singleRegForWrite = cppReg; p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdCcDoDynamicChange(t_Handle h_FmHc, uint32_t oldAdAddrOffset, uint32_t newAdAddrOffset) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err = E_OK; uint32_t seqNum; SANITY_CHECK_RETURN_ERROR(p_FmHc, E_INVALID_HANDLE); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_CC); p_HcFrame->actionReg = newAdAddrOffset; p_HcFrame->actionReg |= 0xc0000000; p_HcFrame->extraReg = oldAdAddrOffset; p_HcFrame->commandSequence = seqNum; BUILD_FD(SIZE_OF_HC_FRAME_READ_OR_CC_DYNAMIC); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FmHcPcdSync(t_Handle h_FmHc) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; t_HcFrame *p_HcFrame; t_DpaaFD fmFd; t_Error err = E_OK; uint32_t seqNum; ASSERT_COND(p_FmHc); p_HcFrame = GetBuf(p_FmHc, &seqNum); if (!p_HcFrame) RETURN_ERROR(MINOR, E_NO_MEMORY, ("HC Frame object")); memset(p_HcFrame, 0, sizeof(t_HcFrame)); /* first read SP register */ p_HcFrame->opcode = (uint32_t)(HC_HCOR_GBL | HC_HCOR_OPCODE_SYNC); p_HcFrame->actionReg = 0; p_HcFrame->extraReg = 0; p_HcFrame->commandSequence = seqNum; BUILD_FD(sizeof(t_HcFrame)); err = EnQFrm(p_FmHc, &fmFd, seqNum); PutBuf(p_FmHc, p_HcFrame, seqNum); if (err != E_OK) RETURN_ERROR(MINOR, err, NO_MSG); return E_OK; } t_Handle FmHcGetPort(t_Handle h_FmHc) { t_FmHc *p_FmHc = (t_FmHc*)h_FmHc; return p_FmHc->h_HcPortDev; } Index: head/sys/contrib/ncsw/Peripherals/FM/Pcd/fm_cc.c =================================================================== --- head/sys/contrib/ncsw/Peripherals/FM/Pcd/fm_cc.c (revision 351321) +++ head/sys/contrib/ncsw/Peripherals/FM/Pcd/fm_cc.c (revision 351322) @@ -1,7551 +1,7551 @@ /* * Copyright 2008-2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /****************************************************************************** @File fm_cc.c @Description FM Coarse Classifier implementation *//***************************************************************************/ #include #include #include "std_ext.h" #include "error_ext.h" #include "string_ext.h" #include "debug_ext.h" #include "fm_pcd_ext.h" #include "fm_muram_ext.h" #include "fm_common.h" #include "fm_pcd.h" #include "fm_hc.h" #include "fm_cc.h" #include "crc64.h" /****************************************/ /* static functions */ /****************************************/ static t_Error CcRootTryLock(t_Handle h_FmPcdCcTree) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; ASSERT_COND(h_FmPcdCcTree); if (FmPcdLockTryLock(p_FmPcdCcTree->p_Lock)) return E_OK; return ERROR_CODE(E_BUSY); } static void CcRootReleaseLock(t_Handle h_FmPcdCcTree) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; ASSERT_COND(h_FmPcdCcTree); FmPcdLockUnlock(p_FmPcdCcTree->p_Lock); } static void UpdateNodeOwner(t_FmPcdCcNode *p_CcNode, bool add) { uint32_t intFlags; ASSERT_COND(p_CcNode); intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); if (add) p_CcNode->owners++; else { ASSERT_COND(p_CcNode->owners); p_CcNode->owners--; } XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); } static __inline__ t_FmPcdStatsObj* DequeueStatsObj(t_List *p_List) { t_FmPcdStatsObj *p_StatsObj = NULL; t_List *p_Next; if (!NCSW_LIST_IsEmpty(p_List)) { p_Next = NCSW_LIST_FIRST(p_List); p_StatsObj = NCSW_LIST_OBJECT(p_Next, t_FmPcdStatsObj, node); ASSERT_COND(p_StatsObj); NCSW_LIST_DelAndInit(p_Next); } return p_StatsObj; } static __inline__ void EnqueueStatsObj(t_List *p_List, t_FmPcdStatsObj *p_StatsObj) { NCSW_LIST_AddToTail(&p_StatsObj->node, p_List); } static void FreeStatObjects(t_List *p_List, t_Handle h_FmMuram) { t_FmPcdStatsObj *p_StatsObj; while (!NCSW_LIST_IsEmpty(p_List)) { p_StatsObj = DequeueStatsObj(p_List); ASSERT_COND(p_StatsObj); FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters); XX_Free(p_StatsObj); } } static t_FmPcdStatsObj* GetStatsObj(t_FmPcdCcNode *p_CcNode) { t_FmPcdStatsObj* p_StatsObj; t_Handle h_FmMuram; ASSERT_COND(p_CcNode); /* If 'maxNumOfKeys' was passed, all statistics object were preallocated upon node initialization */ if (p_CcNode->maxNumOfKeys) { p_StatsObj = DequeueStatsObj(&p_CcNode->availableStatsLst); } else { h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram; ASSERT_COND(h_FmMuram); p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj)); if (!p_StatsObj) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("statistics object")); return NULL; } p_StatsObj->h_StatsAd = (t_Handle)FM_MURAM_AllocMem( h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!p_StatsObj->h_StatsAd) { XX_Free(p_StatsObj); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs")); return NULL; } MemSet8(p_StatsObj->h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); p_StatsObj->h_StatsCounters = (t_Handle)FM_MURAM_AllocMem( h_FmMuram, p_CcNode->countersArraySize, FM_PCD_CC_AD_TABLE_ALIGN); if (!p_StatsObj->h_StatsCounters) { FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); XX_Free(p_StatsObj); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters")); return NULL; } MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); } return p_StatsObj; } static void PutStatsObj(t_FmPcdCcNode *p_CcNode, t_FmPcdStatsObj *p_StatsObj) { t_Handle h_FmMuram; ASSERT_COND(p_CcNode); ASSERT_COND(p_StatsObj); /* If 'maxNumOfKeys' was passed, all statistics object were preallocated upon node initialization and now will be enqueued back to the list */ if (p_CcNode->maxNumOfKeys) { /* Nullify counters */ MemSet8(p_StatsObj->h_StatsCounters, 0, p_CcNode->countersArraySize); EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj); } else { h_FmMuram = ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram; ASSERT_COND(h_FmMuram); FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsAd); FM_MURAM_FreeMem(h_FmMuram, p_StatsObj->h_StatsCounters); XX_Free(p_StatsObj); } } static void SetStatsCounters(t_AdOfTypeStats *p_StatsAd, uint32_t statsCountersAddr) { uint32_t tmp = (statsCountersAddr & FM_PCD_AD_STATS_COUNTERS_ADDR_MASK); WRITE_UINT32(p_StatsAd->statsTableAddr, tmp); } static void UpdateStatsAd(t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, t_Handle h_Ad, uint64_t physicalMuramBase) { t_AdOfTypeStats *p_StatsAd; uint32_t statsCountersAddr, nextActionAddr, tmp; #if (DPAA_VERSION >= 11) uint32_t frameLengthRangesAddr; #endif /* (DPAA_VERSION >= 11) */ p_StatsAd = (t_AdOfTypeStats *)p_FmPcdCcStatsParams->h_StatsAd; tmp = FM_PCD_AD_STATS_TYPE; #if (DPAA_VERSION >= 11) if (p_FmPcdCcStatsParams->h_StatsFLRs) { frameLengthRangesAddr = (uint32_t)((XX_VirtToPhys( p_FmPcdCcStatsParams->h_StatsFLRs) - physicalMuramBase)); tmp |= (frameLengthRangesAddr & FM_PCD_AD_STATS_FLR_ADDR_MASK); } #endif /* (DPAA_VERSION >= 11) */ WRITE_UINT32(p_StatsAd->profileTableAddr, tmp); nextActionAddr = (uint32_t)((XX_VirtToPhys(h_Ad) - physicalMuramBase)); tmp = 0; tmp |= (uint32_t)((nextActionAddr << FM_PCD_AD_STATS_NEXT_ACTION_SHIFT) & FM_PCD_AD_STATS_NEXT_ACTION_MASK); tmp |= (FM_PCD_AD_STATS_NAD_EN | FM_PCD_AD_STATS_OP_CODE); #if (DPAA_VERSION >= 11) if (p_FmPcdCcStatsParams->h_StatsFLRs) tmp |= FM_PCD_AD_STATS_FLR_EN; #endif /* (DPAA_VERSION >= 11) */ WRITE_UINT32(p_StatsAd->nextActionIndx, tmp); statsCountersAddr = (uint32_t)((XX_VirtToPhys( p_FmPcdCcStatsParams->h_StatsCounters) - physicalMuramBase)); SetStatsCounters(p_StatsAd, statsCountersAddr); } static void FillAdOfTypeContLookup(t_Handle h_Ad, t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, t_Handle h_FmPcd, t_Handle p_CcNode, t_Handle h_Manip, t_Handle h_FrmReplic) { t_FmPcdCcNode *p_Node = (t_FmPcdCcNode *)p_CcNode; t_AdOfTypeContLookup *p_AdContLookup = (t_AdOfTypeContLookup *)h_Ad; t_Handle h_TmpAd; t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; uint32_t tmpReg32; t_Handle p_AdNewPtr = NULL; UNUSED(h_Manip); UNUSED(h_FrmReplic); /* there are 3 cases handled in this routine of building a "Continue lookup" type AD. * Case 1: No Manip. The action descriptor is built within the match table. * p_AdResult = p_AdNewPtr; * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized * either in the FmPcdManipUpdateAdResultForCc routine or it was already * initialized and returned here. * p_AdResult (within the match table) will be initialized after * this routine returns and point to the existing AD. * Case 3: Manip exists. The action descriptor is built within the match table. * FmPcdManipUpdateAdContLookupForCc returns a NULL p_AdNewPtr. */ /* As default, the "new" ptr is the current one. i.e. the content of the result * AD will be written into the match table itself (case (1))*/ p_AdNewPtr = p_AdContLookup; /* Initialize an action descriptor, if current statistics mode requires an Ad */ if (p_FmPcdCcStatsParams) { ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd); ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters); /* Swapping addresses between statistics Ad and the current lookup AD */ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd; p_FmPcdCcStatsParams->h_StatsAd = h_Ad; h_Ad = h_TmpAd; p_AdNewPtr = h_Ad; p_AdContLookup = h_Ad; /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase); } #if DPAA_VERSION >= 11 if (h_Manip && h_FrmReplic) FmPcdManipUpdateAdContLookupForCc( h_Manip, h_Ad, &p_AdNewPtr, (uint32_t)((XX_VirtToPhys( FrmReplicGroupGetSourceTableDescriptor(h_FrmReplic)) - p_FmPcd->physicalMuramBase))); else if (h_FrmReplic) FrmReplicGroupUpdateAd(h_FrmReplic, h_Ad, &p_AdNewPtr); else #endif /* (DPAA_VERSION >= 11) */ if (h_Manip) FmPcdManipUpdateAdContLookupForCc( h_Manip, h_Ad, &p_AdNewPtr, #ifdef FM_CAPWAP_SUPPORT /*no check for opcode of manip - this step can be reached only with capwap_applic_specific*/ (uint32_t)((XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase)) #else /* not FM_CAPWAP_SUPPORT */ (uint32_t)((XX_VirtToPhys(p_Node->h_Ad) - p_FmPcd->physicalMuramBase)) #endif /* not FM_CAPWAP_SUPPORT */ ); /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */ if (p_AdNewPtr) { /* cases (1) & (2) */ tmpReg32 = 0; tmpReg32 |= FM_PCD_AD_CONT_LOOKUP_TYPE; tmpReg32 |= p_Node->sizeOfExtraction ? ((p_Node->sizeOfExtraction - 1) << 24) : 0; tmpReg32 |= (uint32_t)(XX_VirtToPhys(p_Node->h_AdTable) - p_FmPcd->physicalMuramBase); WRITE_UINT32(p_AdContLookup->ccAdBase, tmpReg32); tmpReg32 = 0; tmpReg32 |= p_Node->numOfKeys << 24; tmpReg32 |= (p_Node->lclMask ? FM_PCD_AD_CONT_LOOKUP_LCL_MASK : 0); tmpReg32 |= p_Node->h_KeysMatchTable ? (uint32_t)(XX_VirtToPhys( p_Node->h_KeysMatchTable) - p_FmPcd->physicalMuramBase) : 0; WRITE_UINT32(p_AdContLookup->matchTblPtr, tmpReg32); tmpReg32 = 0; tmpReg32 |= p_Node->prsArrayOffset << 24; tmpReg32 |= p_Node->offset << 16; tmpReg32 |= p_Node->parseCode; WRITE_UINT32(p_AdContLookup->pcAndOffsets, tmpReg32); MemCpy8((void*)&p_AdContLookup->gmask, p_Node->p_GlblMask, CC_GLBL_MASK_SIZE); } } static t_Error AllocAndFillAdForContLookupManip(t_Handle h_CcNode) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint32_t intFlags; ASSERT_COND(p_CcNode); intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); if (!p_CcNode->h_Ad) { if (p_CcNode->maxNumOfKeys) p_CcNode->h_Ad = p_CcNode->h_TmpAd; else p_CcNode->h_Ad = (t_Handle)FM_MURAM_AllocMem( ((t_FmPcd *)(p_CcNode->h_FmPcd))->h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); if (!p_CcNode->h_Ad) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); MemSet8(p_CcNode->h_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); FillAdOfTypeContLookup(p_CcNode->h_Ad, NULL, p_CcNode->h_FmPcd, p_CcNode, NULL, NULL); } else XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); return E_OK; } static t_Error SetRequiredAction1( t_Handle h_FmPcd, uint32_t requiredAction, t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp, t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree) { t_AdOfTypeResult *p_AdTmp = (t_AdOfTypeResult *)h_AdTmp; uint32_t tmpReg32; t_Error err; t_FmPcdCcNode *p_CcNode; int i = 0; uint16_t tmp = 0; uint16_t profileId; uint8_t relativeSchemeId, physicalSchemeId; t_CcNodeInformation ccNodeInfo; for (i = 0; i < numOfEntries; i++) { if (i == 0) h_AdTmp = PTR_MOVE(h_AdTmp, i*FM_PCD_CC_AD_ENTRY_SIZE); else h_AdTmp = PTR_MOVE(h_AdTmp, FM_PCD_CC_AD_ENTRY_SIZE); switch (p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.nextEngine) { case (e_FM_PCD_CC): if (requiredAction) { p_CcNode = p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.ccParams.h_CcNode; ASSERT_COND(p_CcNode); if (p_CcNode->shadowAction == requiredAction) break; if ((requiredAction & UPDATE_CC_WITH_TREE) && !(p_CcNode->shadowAction & UPDATE_CC_WITH_TREE)) { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = h_Tree; EnqueueNodeInfoToRelevantLst(&p_CcNode->ccTreesLst, &ccNodeInfo, NULL); p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_TREE; } if ((requiredAction & UPDATE_CC_SHADOW_CLEAR) && !(p_CcNode->shadowAction & UPDATE_CC_SHADOW_CLEAR)) { p_CcNode->shadowAction = 0; } if ((requiredAction & UPDATE_CC_WITH_DELETE_TREE) && !(p_CcNode->shadowAction & UPDATE_CC_WITH_DELETE_TREE)) { DequeueNodeInfoFromRelevantLst(&p_CcNode->ccTreesLst, h_Tree, NULL); p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= UPDATE_CC_WITH_DELETE_TREE; } if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) tmp = (uint8_t)(p_CcNode->numOfKeys + 1); else tmp = p_CcNode->numOfKeys; err = SetRequiredAction1(h_FmPcd, requiredAction, p_CcNode->keyAndNextEngineParams, p_CcNode->h_AdTable, tmp, h_Tree); if (err != E_OK) return err; if (requiredAction != UPDATE_CC_SHADOW_CLEAR) p_CcNode->shadowAction |= requiredAction; } break; case (e_FM_PCD_KG): if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) { physicalSchemeId = FmPcdKgGetSchemeId( p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme); relativeSchemeId = FmPcdKgGetRelativeSchemeId( h_FmPcd, physicalSchemeId); if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); if (!FmPcdKgIsSchemeValidSw( p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid direct scheme.")); if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("For this action scheme has to be direct.")); err = FmPcdKgCcGetSetParams( h_FmPcd, p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.kgParams.h_DirectScheme, requiredAction, 0); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; } break; case (e_FM_PCD_PLCR): if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) { if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.overrideParams) RETURN_ERROR( MAJOR, E_NOT_SUPPORTED, ("In this initialization only overrideFqid can be initialized")); if (!p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.sharedProfile) RETURN_ERROR( MAJOR, E_NOT_SUPPORTED, ("In this initialization only overrideFqid can be initialized")); err = FmPcdPlcrGetAbsoluteIdByProfileParams( h_FmPcd, e_FM_PCD_PLCR_SHARED, NULL, p_CcKeyAndNextEngineParamsTmp[i].nextEngineParams.params.plcrParams.newRelativeProfileId, &profileId); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); err = FmPcdPlcrCcGetSetParams(h_FmPcd, profileId, requiredAction); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; } break; case (e_FM_PCD_DONE): if ((requiredAction & UPDATE_NIA_ENQ_WITHOUT_DMA) && !(p_CcKeyAndNextEngineParamsTmp[i].shadowAction & UPDATE_NIA_ENQ_WITHOUT_DMA)) { tmpReg32 = GET_UINT32(p_AdTmp->nia); if ((tmpReg32 & GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd)) != GET_NIA_BMI_AC_ENQ_FRAME(h_FmPcd)) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("Next engine was previously assigned not as PCD_DONE")); tmpReg32 |= NIA_BMI_AC_ENQ_FRAME_WITHOUT_DMA; WRITE_UINT32(p_AdTmp->nia, tmpReg32); p_CcKeyAndNextEngineParamsTmp[i].shadowAction |= requiredAction; } break; default: break; } } return E_OK; } static t_Error SetRequiredAction( t_Handle h_FmPcd, uint32_t requiredAction, t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParamsTmp, t_Handle h_AdTmp, uint16_t numOfEntries, t_Handle h_Tree) { t_Error err = SetRequiredAction1(h_FmPcd, requiredAction, p_CcKeyAndNextEngineParamsTmp, h_AdTmp, numOfEntries, h_Tree); if (err != E_OK) return err; return SetRequiredAction1(h_FmPcd, UPDATE_CC_SHADOW_CLEAR, p_CcKeyAndNextEngineParamsTmp, h_AdTmp, numOfEntries, h_Tree); } static t_Error ReleaseModifiedDataStructure( t_Handle h_FmPcd, t_List *h_FmPcdOldPointersLst, t_List *h_FmPcdNewPointersLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, bool useShadowStructs) { t_List *p_Pos; t_Error err = E_OK; t_CcNodeInformation ccNodeInfo, *p_CcNodeInformation; t_Handle h_Muram; t_FmPcdCcNode *p_FmPcdCcNextNode, *p_FmPcdCcWorkingOnNode; t_List *p_UpdateLst; uint32_t intFlags; SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_AdditionalParams->h_CurrentNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(h_FmPcdOldPointersLst, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(h_FmPcdNewPointersLst, E_INVALID_HANDLE); /* We don't update subtree of the new node with new tree because it was done in the previous stage */ if (p_AdditionalParams->h_NodeForAdd) { p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForAdd; if (!p_AdditionalParams->tree) p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; else p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; p_CcNodeInformation = FindNodeInfoInReleventLst( p_UpdateLst, p_AdditionalParams->h_CurrentNode, p_FmPcdCcNextNode->h_Spinlock); if (p_CcNodeInformation) p_CcNodeInformation->index++; else { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode; ccNodeInfo.index = 1; EnqueueNodeInfoToRelevantLst(p_UpdateLst, &ccNodeInfo, p_FmPcdCcNextNode->h_Spinlock); } if (p_AdditionalParams->h_ManipForAdd) { p_CcNodeInformation = FindNodeInfoInReleventLst( FmPcdManipGetNodeLstPointedOnThisManip( p_AdditionalParams->h_ManipForAdd), p_AdditionalParams->h_CurrentNode, FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForAdd)); if (p_CcNodeInformation) p_CcNodeInformation->index++; else { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = (t_Handle)p_AdditionalParams->h_CurrentNode; ccNodeInfo.index = 1; EnqueueNodeInfoToRelevantLst( FmPcdManipGetNodeLstPointedOnThisManip( p_AdditionalParams->h_ManipForAdd), &ccNodeInfo, FmPcdManipGetSpinlock( p_AdditionalParams->h_ManipForAdd)); } } } if (p_AdditionalParams->h_NodeForRmv) { p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_AdditionalParams->h_NodeForRmv; if (!p_AdditionalParams->tree) { p_UpdateLst = &p_FmPcdCcNextNode->ccPrevNodesLst; p_FmPcdCcWorkingOnNode = (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode); for (p_Pos = NCSW_LIST_FIRST(&p_FmPcdCcWorkingOnNode->ccTreesLst); p_Pos != (&p_FmPcdCcWorkingOnNode->ccTreesLst); p_Pos = NCSW_LIST_NEXT(p_Pos)) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcNodeInformation->h_CcNode); err = SetRequiredAction( h_FmPcd, UPDATE_CC_WITH_DELETE_TREE, &((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex], PTR_MOVE(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable, p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE), 1, p_CcNodeInformation->h_CcNode); } } else { p_UpdateLst = &p_FmPcdCcNextNode->ccTreeIdLst; err = SetRequiredAction( h_FmPcd, UPDATE_CC_WITH_DELETE_TREE, &((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams[p_AdditionalParams->savedKeyIndex], UINT_TO_PTR(((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->ccTreeBaseAddr + p_AdditionalParams->savedKeyIndex*FM_PCD_CC_AD_ENTRY_SIZE), 1, p_AdditionalParams->h_CurrentNode); } if (err) return err; /* We remove from the subtree of the removed node tree because it wasn't done in the previous stage Update ccPrevNodesLst or ccTreeIdLst of the removed node Update of the node owner */ p_CcNodeInformation = FindNodeInfoInReleventLst( p_UpdateLst, p_AdditionalParams->h_CurrentNode, p_FmPcdCcNextNode->h_Spinlock); ASSERT_COND(p_CcNodeInformation); ASSERT_COND(p_CcNodeInformation->index); p_CcNodeInformation->index--; if (p_CcNodeInformation->index == 0) DequeueNodeInfoFromRelevantLst(p_UpdateLst, p_AdditionalParams->h_CurrentNode, p_FmPcdCcNextNode->h_Spinlock); UpdateNodeOwner(p_FmPcdCcNextNode, FALSE); if (p_AdditionalParams->h_ManipForRmv) { p_CcNodeInformation = FindNodeInfoInReleventLst( FmPcdManipGetNodeLstPointedOnThisManip( p_AdditionalParams->h_ManipForRmv), p_AdditionalParams->h_CurrentNode, FmPcdManipGetSpinlock(p_AdditionalParams->h_ManipForRmv)); ASSERT_COND(p_CcNodeInformation); ASSERT_COND(p_CcNodeInformation->index); p_CcNodeInformation->index--; if (p_CcNodeInformation->index == 0) DequeueNodeInfoFromRelevantLst( FmPcdManipGetNodeLstPointedOnThisManip( p_AdditionalParams->h_ManipForRmv), p_AdditionalParams->h_CurrentNode, FmPcdManipGetSpinlock( p_AdditionalParams->h_ManipForRmv)); } } if (p_AdditionalParams->h_ManipForRmv) FmPcdManipUpdateOwner(p_AdditionalParams->h_ManipForRmv, FALSE); if (p_AdditionalParams->p_StatsObjForRmv) PutStatsObj((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode), p_AdditionalParams->p_StatsObjForRmv); #if (DPAA_VERSION >= 11) if (p_AdditionalParams->h_FrmReplicForRmv) FrmReplicGroupUpdateOwner(p_AdditionalParams->h_FrmReplicForRmv, FALSE/* remove */); #endif /* (DPAA_VERSION >= 11) */ if (!useShadowStructs) { h_Muram = FmPcdGetMuramHandle(h_FmPcd); ASSERT_COND(h_Muram); if ((p_AdditionalParams->tree && !((t_FmPcd *)h_FmPcd)->p_CcShadow) || (!p_AdditionalParams->tree && !((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->maxNumOfKeys)) { /* We release new AD which was allocated and updated for copy from to actual AD */ for (p_Pos = NCSW_LIST_FIRST(h_FmPcdNewPointersLst); p_Pos != (h_FmPcdNewPointersLst); p_Pos = NCSW_LIST_NEXT(p_Pos)) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcNodeInformation->h_CcNode); FM_MURAM_FreeMem(h_Muram, p_CcNodeInformation->h_CcNode); } } /* Free Old data structure if it has to be freed - new data structure was allocated*/ if (p_AdditionalParams->p_AdTableOld) FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_AdTableOld); if (p_AdditionalParams->p_KeysMatchTableOld) FM_MURAM_FreeMem(h_Muram, p_AdditionalParams->p_KeysMatchTableOld); } /* Update current modified node with changed fields if it's required*/ if (!p_AdditionalParams->tree) { if (p_AdditionalParams->p_AdTableNew) ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_AdTable = p_AdditionalParams->p_AdTableNew; if (p_AdditionalParams->p_KeysMatchTableNew) ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_KeysMatchTable = p_AdditionalParams->p_KeysMatchTableNew; /* Locking node's spinlock before updating 'keys and next engine' structure, as it maybe used to retrieve keys statistics */ intFlags = XX_LockIntrSpinlock( ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock); ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->numOfKeys = p_AdditionalParams->numOfKeys; memcpy(((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams, &p_AdditionalParams->keyAndNextEngineParams, sizeof(t_FmPcdCcKeyAndNextEngineParams) * (CC_MAX_NUM_OF_KEYS)); XX_UnlockIntrSpinlock( ((t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode))->h_Spinlock, intFlags); } else { uint8_t numEntries = ((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->numOfEntries; ASSERT_COND(numEntries < FM_PCD_MAX_NUM_OF_CC_GROUPS); memcpy(&((t_FmPcdCcTree *)(p_AdditionalParams->h_CurrentNode))->keyAndNextEngineParams, &p_AdditionalParams->keyAndNextEngineParams, sizeof(t_FmPcdCcKeyAndNextEngineParams) * numEntries); } ReleaseLst(h_FmPcdOldPointersLst); ReleaseLst(h_FmPcdNewPointersLst); XX_Free(p_AdditionalParams); return E_OK; } static t_Handle BuildNewAd( t_Handle h_Ad, t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, t_FmPcdCcNode *p_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_FmPcdCcNodeTmp; t_Handle h_OrigAd = NULL; p_FmPcdCcNodeTmp = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); if (!p_FmPcdCcNodeTmp) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_FmPcdCcNodeTmp")); return NULL; } memset(p_FmPcdCcNodeTmp, 0, sizeof(t_FmPcdCcNode)); p_FmPcdCcNodeTmp->numOfKeys = p_FmPcdModifyCcKeyAdditionalParams->numOfKeys; p_FmPcdCcNodeTmp->h_KeysMatchTable = p_FmPcdModifyCcKeyAdditionalParams->p_KeysMatchTableNew; p_FmPcdCcNodeTmp->h_AdTable = p_FmPcdModifyCcKeyAdditionalParams->p_AdTableNew; p_FmPcdCcNodeTmp->lclMask = p_CcNode->lclMask; p_FmPcdCcNodeTmp->parseCode = p_CcNode->parseCode; p_FmPcdCcNodeTmp->offset = p_CcNode->offset; p_FmPcdCcNodeTmp->prsArrayOffset = p_CcNode->prsArrayOffset; p_FmPcdCcNodeTmp->ctrlFlow = p_CcNode->ctrlFlow; p_FmPcdCcNodeTmp->ccKeySizeAccExtraction = p_CcNode->ccKeySizeAccExtraction; p_FmPcdCcNodeTmp->sizeOfExtraction = p_CcNode->sizeOfExtraction; p_FmPcdCcNodeTmp->glblMaskSize = p_CcNode->glblMaskSize; p_FmPcdCcNodeTmp->p_GlblMask = p_CcNode->p_GlblMask; if (p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_CC) { if (p_FmPcdCcNextEngineParams->h_Manip) { h_OrigAd = p_CcNode->h_Ad; if (AllocAndFillAdForContLookupManip( p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode) != E_OK) { REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); XX_Free(p_FmPcdCcNodeTmp); return NULL; } } FillAdOfTypeContLookup(h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp, h_OrigAd ? NULL : p_FmPcdCcNextEngineParams->h_Manip, NULL); } #if (DPAA_VERSION >= 11) if ((p_FmPcdCcNextEngineParams->nextEngine == e_FM_PCD_FR) && (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic)) { FillAdOfTypeContLookup( h_Ad, NULL, p_CcNode->h_FmPcd, p_FmPcdCcNodeTmp, p_FmPcdCcNextEngineParams->h_Manip, p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic); } #endif /* (DPAA_VERSION >= 11) */ XX_Free(p_FmPcdCcNodeTmp); - return E_OK; + return NULL; } static t_Error DynamicChangeHc( t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, bool useShadowStructs) { t_List *p_PosOld, *p_PosNew; uint32_t oldAdAddrOffset, newAdAddrOffset; uint16_t i = 0; t_Error err = E_OK; uint8_t numOfModifiedPtr; ASSERT_COND(h_FmPcd); ASSERT_COND(h_OldPointersLst); ASSERT_COND(h_NewPointersLst); numOfModifiedPtr = (uint8_t)NCSW_LIST_NumOfObjs(h_OldPointersLst); if (numOfModifiedPtr) { p_PosNew = NCSW_LIST_FIRST(h_NewPointersLst); p_PosOld = NCSW_LIST_FIRST(h_OldPointersLst); /* Retrieve address of new AD */ newAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd, p_PosNew); if (newAdAddrOffset == (uint32_t)ILLEGAL_BASE) { ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("New AD address")); } for (i = 0; i < numOfModifiedPtr; i++) { /* Retrieve address of current AD */ oldAdAddrOffset = FmPcdCcGetNodeAddrOffsetFromNodeInfo(h_FmPcd, p_PosOld); if (oldAdAddrOffset == (uint32_t)ILLEGAL_BASE) { ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Old AD address")); } /* Invoke host command to copy from new AD to old AD */ err = FmHcPcdCcDoDynamicChange(((t_FmPcd *)h_FmPcd)->h_Hc, oldAdAddrOffset, newAdAddrOffset); if (err) { ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); RETURN_ERROR( MAJOR, err, ("For part of nodes changes are done - situation is danger")); } p_PosOld = NCSW_LIST_NEXT(p_PosOld); } } return E_OK; } static t_Error DoDynamicChange( t_Handle h_FmPcd, t_List *h_OldPointersLst, t_List *h_NewPointersLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalParams, bool useShadowStructs) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)(p_AdditionalParams->h_CurrentNode); t_List *p_PosNew; t_CcNodeInformation *p_CcNodeInfo; t_FmPcdCcNextEngineParams nextEngineParams; t_Handle h_Ad; uint32_t keySize; t_Error err = E_OK; uint8_t numOfModifiedPtr; ASSERT_COND(h_FmPcd); memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams)); numOfModifiedPtr = (uint8_t)NCSW_LIST_NumOfObjs(h_OldPointersLst); if (numOfModifiedPtr) { p_PosNew = NCSW_LIST_FIRST(h_NewPointersLst); /* Invoke host-command to copy from the new Ad to existing Ads */ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); if (useShadowStructs) { /* When the host-command above has ended, the old structures are 'free'and we can update them by copying from the new shadow structures. */ if (p_CcNode->lclMask) keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); else keySize = p_CcNode->ccKeySizeAccExtraction; MemCpy8(p_AdditionalParams->p_KeysMatchTableOld, p_AdditionalParams->p_KeysMatchTableNew, p_CcNode->maxNumOfKeys * keySize * sizeof(uint8_t)); MemCpy8( p_AdditionalParams->p_AdTableOld, p_AdditionalParams->p_AdTableNew, (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE)); /* Retrieve the address of the allocated Ad */ p_CcNodeInfo = CC_NODE_F_OBJECT(p_PosNew); h_Ad = p_CcNodeInfo->h_CcNode; /* Build a new Ad that holds the old (now updated) structures */ p_AdditionalParams->p_KeysMatchTableNew = p_AdditionalParams->p_KeysMatchTableOld; p_AdditionalParams->p_AdTableNew = p_AdditionalParams->p_AdTableOld; nextEngineParams.nextEngine = e_FM_PCD_CC; nextEngineParams.params.ccParams.h_CcNode = (t_Handle)p_CcNode; BuildNewAd(h_Ad, p_AdditionalParams, p_CcNode, &nextEngineParams); /* HC to copy from the new Ad (old updated structures) to current Ad (uses shadow structures) */ err = DynamicChangeHc(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } } err = ReleaseModifiedDataStructure(h_FmPcd, h_OldPointersLst, h_NewPointersLst, p_AdditionalParams, useShadowStructs); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } #ifdef FM_CAPWAP_SUPPORT static bool IsCapwapApplSpecific(t_Handle h_Node) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_Node; bool isManipForCapwapApplSpecificBuild = FALSE; int i = 0; ASSERT_COND(h_Node); /* assumption that this function called only for INDEXED_FLOW_ID - so no miss*/ for (i = 0; i < p_CcNode->numOfKeys; i++) { if ( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip && FmPcdManipIsCapwapApplSpecific(p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip)) { isManipForCapwapApplSpecificBuild = TRUE; break; } } return isManipForCapwapApplSpecificBuild; } #endif /* FM_CAPWAP_SUPPORT */ static t_Error CcUpdateParam( t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort, t_FmPcdCcKeyAndNextEngineParams *p_CcKeyAndNextEngineParams, uint16_t numOfEntries, t_Handle h_Ad, bool validate, uint16_t level, t_Handle h_FmTree, bool modify) { t_FmPcdCcNode *p_CcNode; t_Error err; uint16_t tmp = 0; int i = 0; t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree; level++; if (p_CcTree->h_IpReassemblyManip) { err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort, p_CcTree->h_IpReassemblyManip, NULL, validate, level, h_FmTree, modify); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } if (p_CcTree->h_CapwapReassemblyManip) { err = FmPcdManipUpdate(h_FmPcd, h_PcdParams, h_FmPort, p_CcTree->h_CapwapReassemblyManip, NULL, validate, level, h_FmTree, modify); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } if (numOfEntries) { for (i = 0; i < numOfEntries; i++) { if (i == 0) h_Ad = PTR_MOVE(h_Ad, i*FM_PCD_CC_AD_ENTRY_SIZE); else h_Ad = PTR_MOVE(h_Ad, FM_PCD_CC_AD_ENTRY_SIZE); if (p_CcKeyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) { p_CcNode = p_CcKeyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; ASSERT_COND(p_CcNode); if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip) { err = FmPcdManipUpdate( h_FmPcd, NULL, h_FmPort, p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip, h_Ad, validate, level, h_FmTree, modify); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } if (p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine != e_FM_PCD_INVALID) tmp = (uint8_t)(p_CcNode->numOfKeys + 1); else tmp = p_CcNode->numOfKeys; err = CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort, p_CcNode->keyAndNextEngineParams, tmp, p_CcNode->h_AdTable, validate, level, h_FmTree, modify); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } else { if (p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip) { err = FmPcdManipUpdate( h_FmPcd, NULL, h_FmPort, p_CcKeyAndNextEngineParams[i].nextEngineParams.h_Manip, h_Ad, validate, level, h_FmTree, modify); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } } } } return E_OK; } static ccPrivateInfo_t IcDefineCode(t_FmPcdCcNodeParams *p_CcNodeParam) { switch (p_CcNodeParam->extractCcParams.extractNonHdr.action) { case (e_FM_PCD_ACTION_EXACT_MATCH): switch (p_CcNodeParam->extractCcParams.extractNonHdr.src) { case (e_FM_PCD_EXTRACT_FROM_KEY): return CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH; case (e_FM_PCD_EXTRACT_FROM_HASH): return CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH; default: return CC_PRIVATE_INFO_NONE; } case (e_FM_PCD_ACTION_INDEXED_LOOKUP): switch (p_CcNodeParam->extractCcParams.extractNonHdr.src) { case (e_FM_PCD_EXTRACT_FROM_HASH): return CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP; case (e_FM_PCD_EXTRACT_FROM_FLOW_ID): return CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP; default: return CC_PRIVATE_INFO_NONE; } default: break; } return CC_PRIVATE_INFO_NONE; } static t_CcNodeInformation * DequeueAdditionalInfoFromRelevantLst( t_List *p_List) { t_CcNodeInformation *p_CcNodeInfo = NULL; if (!NCSW_LIST_IsEmpty(p_List)) { p_CcNodeInfo = CC_NODE_F_OBJECT(p_List->p_Next); NCSW_LIST_DelAndInit(&p_CcNodeInfo->node); } return p_CcNodeInfo; } void ReleaseLst(t_List *p_List) { t_CcNodeInformation *p_CcNodeInfo = NULL; if (!NCSW_LIST_IsEmpty(p_List)) { p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); while (p_CcNodeInfo) { XX_Free(p_CcNodeInfo); p_CcNodeInfo = DequeueAdditionalInfoFromRelevantLst(p_List); } } NCSW_LIST_Del(p_List); } static void DeleteNode(t_FmPcdCcNode *p_CcNode) { uint32_t i; if (!p_CcNode) return; if (p_CcNode->p_GlblMask) { XX_Free(p_CcNode->p_GlblMask); p_CcNode->p_GlblMask = NULL; } if (p_CcNode->h_KeysMatchTable) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_KeysMatchTable); p_CcNode->h_KeysMatchTable = NULL; } if (p_CcNode->h_AdTable) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_AdTable); p_CcNode->h_AdTable = NULL; } if (p_CcNode->h_Ad) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_Ad); p_CcNode->h_Ad = NULL; p_CcNode->h_TmpAd = NULL; } if (p_CcNode->h_StatsFLRs) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_CcNode->h_StatsFLRs); p_CcNode->h_StatsFLRs = NULL; } if (p_CcNode->h_Spinlock) { XX_FreeSpinlock(p_CcNode->h_Spinlock); p_CcNode->h_Spinlock = NULL; } /* Restore the original counters pointer instead of the mutual pointer (mutual to all hash buckets) */ if (p_CcNode->isHashBucket && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE)) p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].p_StatsObj->h_StatsCounters = p_CcNode->h_PrivMissStatsCounters; /* Releasing all currently used statistics objects, including 'miss' entry */ for (i = 0; i < p_CcNode->numOfKeys + 1; i++) if (p_CcNode->keyAndNextEngineParams[i].p_StatsObj) PutStatsObj(p_CcNode, p_CcNode->keyAndNextEngineParams[i].p_StatsObj); if (!NCSW_LIST_IsEmpty(&p_CcNode->availableStatsLst)) { t_Handle h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd); ASSERT_COND(h_FmMuram); FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); } NCSW_LIST_Del(&p_CcNode->availableStatsLst); ReleaseLst(&p_CcNode->ccPrevNodesLst); ReleaseLst(&p_CcNode->ccTreeIdLst); ReleaseLst(&p_CcNode->ccTreesLst); XX_Free(p_CcNode); } static void DeleteTree(t_FmPcdCcTree *p_FmPcdTree, t_FmPcd *p_FmPcd) { if (p_FmPcdTree) { if (p_FmPcdTree->ccTreeBaseAddr) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), UINT_TO_PTR(p_FmPcdTree->ccTreeBaseAddr)); p_FmPcdTree->ccTreeBaseAddr = 0; } ReleaseLst(&p_FmPcdTree->fmPortsLst); XX_Free(p_FmPcdTree); } } static void GetCcExtractKeySize(uint8_t parseCodeRealSize, uint8_t *parseCodeCcSize) { if ((parseCodeRealSize > 0) && (parseCodeRealSize < 2)) *parseCodeCcSize = 1; else if (parseCodeRealSize == 2) *parseCodeCcSize = 2; else if ((parseCodeRealSize > 2) && (parseCodeRealSize <= 4)) *parseCodeCcSize = 4; else if ((parseCodeRealSize > 4) && (parseCodeRealSize <= 8)) *parseCodeCcSize = 8; else if ((parseCodeRealSize > 8) && (parseCodeRealSize <= 16)) *parseCodeCcSize = 16; else if ((parseCodeRealSize > 16) && (parseCodeRealSize <= 24)) *parseCodeCcSize = 24; else if ((parseCodeRealSize > 24) && (parseCodeRealSize <= 32)) *parseCodeCcSize = 32; else if ((parseCodeRealSize > 32) && (parseCodeRealSize <= 40)) *parseCodeCcSize = 40; else if ((parseCodeRealSize > 40) && (parseCodeRealSize <= 48)) *parseCodeCcSize = 48; else if ((parseCodeRealSize > 48) && (parseCodeRealSize <= 56)) *parseCodeCcSize = 56; else *parseCodeCcSize = 0; } static void GetSizeHeaderField(e_NetHeaderType hdr, t_FmPcdFields field, uint8_t *parseCodeRealSize) { switch (hdr) { case (HEADER_TYPE_ETH): switch (field.eth) { case (NET_HEADER_FIELD_ETH_DA): *parseCodeRealSize = 6; break; case (NET_HEADER_FIELD_ETH_SA): *parseCodeRealSize = 6; break; case (NET_HEADER_FIELD_ETH_TYPE): *parseCodeRealSize = 2; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_PPPoE): switch (field.pppoe) { case (NET_HEADER_FIELD_PPPoE_PID): *parseCodeRealSize = 2; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported1")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_VLAN): switch (field.vlan) { case (NET_HEADER_FIELD_VLAN_TCI): *parseCodeRealSize = 2; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported2")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_MPLS): switch (field.mpls) { case (NET_HEADER_FIELD_MPLS_LABEL_STACK): *parseCodeRealSize = 4; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported3")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_IPv4): switch (field.ipv4) { case (NET_HEADER_FIELD_IPv4_DST_IP): case (NET_HEADER_FIELD_IPv4_SRC_IP): *parseCodeRealSize = 4; break; case (NET_HEADER_FIELD_IPv4_TOS): case (NET_HEADER_FIELD_IPv4_PROTO): *parseCodeRealSize = 1; break; case (NET_HEADER_FIELD_IPv4_DST_IP | NET_HEADER_FIELD_IPv4_SRC_IP): *parseCodeRealSize = 8; break; case (NET_HEADER_FIELD_IPv4_TTL): *parseCodeRealSize = 1; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported4")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_IPv6): switch (field.ipv6) { case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): *parseCodeRealSize = 4; break; case (NET_HEADER_FIELD_IPv6_NEXT_HDR): case (NET_HEADER_FIELD_IPv6_HOP_LIMIT): *parseCodeRealSize = 1; break; case (NET_HEADER_FIELD_IPv6_DST_IP): case (NET_HEADER_FIELD_IPv6_SRC_IP): *parseCodeRealSize = 16; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_IP): switch (field.ip) { case (NET_HEADER_FIELD_IP_DSCP): case (NET_HEADER_FIELD_IP_PROTO): *parseCodeRealSize = 1; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported5")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_GRE): switch (field.gre) { case (NET_HEADER_FIELD_GRE_TYPE): *parseCodeRealSize = 2; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported6")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_MINENCAP): switch (field.minencap) { case (NET_HEADER_FIELD_MINENCAP_TYPE): *parseCodeRealSize = 1; break; case (NET_HEADER_FIELD_MINENCAP_DST_IP): case (NET_HEADER_FIELD_MINENCAP_SRC_IP): *parseCodeRealSize = 4; break; case (NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): *parseCodeRealSize = 8; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported7")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_TCP): switch (field.tcp) { case (NET_HEADER_FIELD_TCP_PORT_SRC): case (NET_HEADER_FIELD_TCP_PORT_DST): *parseCodeRealSize = 2; break; case (NET_HEADER_FIELD_TCP_PORT_SRC | NET_HEADER_FIELD_TCP_PORT_DST): *parseCodeRealSize = 4; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported8")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; case (HEADER_TYPE_UDP): switch (field.udp) { case (NET_HEADER_FIELD_UDP_PORT_SRC): case (NET_HEADER_FIELD_UDP_PORT_DST): *parseCodeRealSize = 2; break; case (NET_HEADER_FIELD_UDP_PORT_SRC | NET_HEADER_FIELD_UDP_PORT_DST): *parseCodeRealSize = 4; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported9")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported10")); *parseCodeRealSize = CC_SIZE_ILLEGAL; break; } } t_Error ValidateNextEngineParams( t_Handle h_FmPcd, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, e_FmPcdCcStatsMode statsMode) { uint16_t absoluteProfileId; t_Error err = E_OK; uint8_t relativeSchemeId; if ((statsMode == e_FM_PCD_CC_STATS_MODE_NONE) && (p_FmPcdCcNextEngineParams->statisticsEn)) RETURN_ERROR( MAJOR, E_CONFLICT, ("Statistics are requested for a key, but statistics mode was set" "to 'NONE' upon initialization")); switch (p_FmPcdCcNextEngineParams->nextEngine) { case (e_FM_PCD_INVALID): err = E_NOT_SUPPORTED; break; case (e_FM_PCD_DONE): if ((p_FmPcdCcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) && p_FmPcdCcNextEngineParams->params.enqueueParams.overrideFqid) { if (!p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid) RETURN_ERROR( MAJOR, E_CONFLICT, ("When overrideFqid is set, newFqid must not be zero")); if (p_FmPcdCcNextEngineParams->params.enqueueParams.newFqid & ~0x00FFFFFF) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("fqidForCtrlFlow must be between 1 and 2^24-1")); } break; case (e_FM_PCD_KG): relativeSchemeId = FmPcdKgGetRelativeSchemeId( h_FmPcd, FmPcdKgGetSchemeId( p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme)); if (relativeSchemeId == FM_PCD_KG_NUM_OF_SCHEMES) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, NO_MSG); if (!FmPcdKgIsSchemeValidSw( p_FmPcdCcNextEngineParams->params.kgParams.h_DirectScheme)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("not valid schemeIndex in KG next engine param")); if (!KgIsSchemeAlwaysDirect(h_FmPcd, relativeSchemeId)) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("CC Node may point only to a scheme that is always direct.")); break; case (e_FM_PCD_PLCR): if (p_FmPcdCcNextEngineParams->params.plcrParams.overrideParams) { /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ if (p_FmPcdCcNextEngineParams->params.plcrParams.sharedProfile) { err = FmPcdPlcrGetAbsoluteIdByProfileParams( h_FmPcd, e_FM_PCD_PLCR_SHARED, NULL, p_FmPcdCcNextEngineParams->params.plcrParams.newRelativeProfileId, &absoluteProfileId); if (err) RETURN_ERROR(MAJOR, err, ("Shared profile offset is out of range")); if (!FmPcdPlcrIsProfileValid(h_FmPcd, absoluteProfileId)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Invalid profile")); } } break; case (e_FM_PCD_HASH): p_FmPcdCcNextEngineParams->nextEngine = e_FM_PCD_CC; case (e_FM_PCD_CC): if (!p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode) RETURN_ERROR(MAJOR, E_NULL_POINTER, ("handler to next Node is NULL")); break; #if (DPAA_VERSION >= 11) case (e_FM_PCD_FR): if (!p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic) err = E_NOT_SUPPORTED; break; #endif /* (DPAA_VERSION >= 11) */ default: RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Next engine is not correct")); } return err; } static uint8_t GetGenParseCode(e_FmPcdExtractFrom src, uint32_t offset, bool glblMask, uint8_t *parseArrayOffset, bool fromIc, ccPrivateInfo_t icCode) { if (!fromIc) { switch (src) { case (e_FM_PCD_EXTRACT_FROM_FRAME_START): if (glblMask) return CC_PC_GENERIC_WITH_MASK; else return CC_PC_GENERIC_WITHOUT_MASK; case (e_FM_PCD_EXTRACT_FROM_CURR_END_OF_PARSE): *parseArrayOffset = CC_PC_PR_NEXT_HEADER_OFFSET; if (offset) return CC_PR_OFFSET; else return CC_PR_WITHOUT_OFFSET; default: REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); return CC_PC_ILLEGAL; } } else { switch (icCode) { case (CC_PRIVATE_INFO_IC_KEY_EXACT_MATCH): *parseArrayOffset = 0x50; return CC_PC_GENERIC_IC_GMASK; case (CC_PRIVATE_INFO_IC_HASH_EXACT_MATCH): *parseArrayOffset = 0x48; return CC_PC_GENERIC_IC_GMASK; case (CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP): *parseArrayOffset = 0x48; return CC_PC_GENERIC_IC_HASH_INDEXED; case (CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP): *parseArrayOffset = 0x16; return CC_PC_GENERIC_IC_HASH_INDEXED; default: REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal 'extract from' src")); break; } } return CC_PC_ILLEGAL; } static uint8_t GetFullFieldParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex index, t_FmPcdFields field) { switch (hdr) { case (HEADER_TYPE_NONE): ASSERT_COND(FALSE); return CC_PC_ILLEGAL; case (HEADER_TYPE_ETH): switch (field.eth) { case (NET_HEADER_FIELD_ETH_DA): return CC_PC_FF_MACDST; case (NET_HEADER_FIELD_ETH_SA): return CC_PC_FF_MACSRC; case (NET_HEADER_FIELD_ETH_TYPE): return CC_PC_FF_ETYPE; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_VLAN): switch (field.vlan) { case (NET_HEADER_FIELD_VLAN_TCI): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_TCI1; if (index == e_FM_PCD_HDR_INDEX_LAST) return CC_PC_FF_TCI2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_MPLS): switch (field.mpls) { case (NET_HEADER_FIELD_MPLS_LABEL_STACK): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_MPLS1; if (index == e_FM_PCD_HDR_INDEX_LAST) return CC_PC_FF_MPLS_LAST; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS index")); return CC_PC_ILLEGAL; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_IPv4): switch (field.ipv4) { case (NET_HEADER_FIELD_IPv4_DST_IP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV4DST1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV4DST2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv4_TOS): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV4IPTOS_TC1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV4IPTOS_TC2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv4_PROTO): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV4PTYPE1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV4PTYPE2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv4_SRC_IP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV4SRC1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV4SRC2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv4_SRC_IP | NET_HEADER_FIELD_IPv4_DST_IP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV4SRC1_IPV4DST1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV4SRC2_IPV4DST2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv4 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv4_TTL): return CC_PC_FF_IPV4TTL; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_IPv6): switch (field.ipv6) { case (NET_HEADER_FIELD_IPv6_VER | NET_HEADER_FIELD_IPv6_FL | NET_HEADER_FIELD_IPv6_TC): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv6_NEXT_HDR): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV6PTYPE1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV6PTYPE2; if (index == e_FM_PCD_HDR_INDEX_LAST) return CC_PC_FF_IPPID; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv6_DST_IP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV6DST1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV6DST2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv6_SRC_IP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPV6SRC1; if (index == e_FM_PCD_HDR_INDEX_2) return CC_PC_FF_IPV6SRC2; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IPv6 index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IPv6_HOP_LIMIT): return CC_PC_FF_IPV6HOP_LIMIT; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_IP): switch (field.ip) { case (NET_HEADER_FIELD_IP_DSCP): if ((index == e_FM_PCD_HDR_INDEX_NONE) || (index == e_FM_PCD_HDR_INDEX_1)) return CC_PC_FF_IPDSCP; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index")); return CC_PC_ILLEGAL; case (NET_HEADER_FIELD_IP_PROTO): if (index == e_FM_PCD_HDR_INDEX_LAST) return CC_PC_FF_IPPID; REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP index")); return CC_PC_ILLEGAL; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_GRE): switch (field.gre) { case (NET_HEADER_FIELD_GRE_TYPE): return CC_PC_FF_GREPTYPE; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_MINENCAP): switch (field.minencap) { case (NET_HEADER_FIELD_MINENCAP_TYPE): return CC_PC_FF_MINENCAP_PTYPE; case (NET_HEADER_FIELD_MINENCAP_DST_IP): return CC_PC_FF_MINENCAP_IPDST; case (NET_HEADER_FIELD_MINENCAP_SRC_IP): return CC_PC_FF_MINENCAP_IPSRC; case (NET_HEADER_FIELD_MINENCAP_SRC_IP | NET_HEADER_FIELD_MINENCAP_DST_IP): return CC_PC_FF_MINENCAP_IPSRC_IPDST; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_TCP): switch (field.tcp) { case (NET_HEADER_FIELD_TCP_PORT_SRC): return CC_PC_FF_L4PSRC; case (NET_HEADER_FIELD_TCP_PORT_DST): return CC_PC_FF_L4PDST; case (NET_HEADER_FIELD_TCP_PORT_DST | NET_HEADER_FIELD_TCP_PORT_SRC): return CC_PC_FF_L4PSRC_L4PDST; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_PPPoE): switch (field.pppoe) { case (NET_HEADER_FIELD_PPPoE_PID): return CC_PC_FF_PPPPID; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } case (HEADER_TYPE_UDP): switch (field.udp) { case (NET_HEADER_FIELD_UDP_PORT_SRC): return CC_PC_FF_L4PSRC; case (NET_HEADER_FIELD_UDP_PORT_DST): return CC_PC_FF_L4PDST; case (NET_HEADER_FIELD_UDP_PORT_DST | NET_HEADER_FIELD_UDP_PORT_SRC): return CC_PC_FF_L4PSRC_L4PDST; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } } static uint8_t GetPrParseCode(e_NetHeaderType hdr, e_FmPcdHdrIndex hdrIndex, uint32_t offset, bool glblMask, uint8_t *parseArrayOffset) { bool offsetRelevant = FALSE; if (offset) offsetRelevant = TRUE; switch (hdr) { case (HEADER_TYPE_NONE): ASSERT_COND(FALSE); return CC_PC_ILLEGAL; case (HEADER_TYPE_ETH): *parseArrayOffset = (uint8_t)CC_PC_PR_ETH_OFFSET; break; case (HEADER_TYPE_USER_DEFINED_SHIM1): if (offset || glblMask) *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM1_OFFSET; else return CC_PC_PR_SHIM1; break; case (HEADER_TYPE_USER_DEFINED_SHIM2): if (offset || glblMask) *parseArrayOffset = (uint8_t)CC_PC_PR_USER_DEFINED_SHIM2_OFFSET; else return CC_PC_PR_SHIM2; break; case (HEADER_TYPE_LLC_SNAP): *parseArrayOffset = CC_PC_PR_USER_LLC_SNAP_OFFSET; break; case (HEADER_TYPE_PPPoE): *parseArrayOffset = CC_PC_PR_PPPOE_OFFSET; break; case (HEADER_TYPE_MPLS): if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) *parseArrayOffset = CC_PC_PR_MPLS1_OFFSET; else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) *parseArrayOffset = CC_PC_PR_MPLS_LAST_OFFSET; else { REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal MPLS header index")); return CC_PC_ILLEGAL; } break; case (HEADER_TYPE_IPv4): case (HEADER_TYPE_IPv6): if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) *parseArrayOffset = CC_PC_PR_IP1_OFFSET; else if (hdrIndex == e_FM_PCD_HDR_INDEX_2) *parseArrayOffset = CC_PC_PR_IP_LAST_OFFSET; else { REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header index")); return CC_PC_ILLEGAL; } break; case (HEADER_TYPE_MINENCAP): *parseArrayOffset = CC_PC_PR_MINENC_OFFSET; break; case (HEADER_TYPE_GRE): *parseArrayOffset = CC_PC_PR_GRE_OFFSET; break; case (HEADER_TYPE_TCP): case (HEADER_TYPE_UDP): case (HEADER_TYPE_IPSEC_AH): case (HEADER_TYPE_IPSEC_ESP): case (HEADER_TYPE_DCCP): case (HEADER_TYPE_SCTP): *parseArrayOffset = CC_PC_PR_L4_OFFSET; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal IP header for this type of operation")); return CC_PC_ILLEGAL; } if (offsetRelevant) return CC_PR_OFFSET; else return CC_PR_WITHOUT_OFFSET; } static uint8_t GetFieldParseCode(e_NetHeaderType hdr, t_FmPcdFields field, uint32_t offset, uint8_t *parseArrayOffset, e_FmPcdHdrIndex hdrIndex) { bool offsetRelevant = FALSE; if (offset) offsetRelevant = TRUE; switch (hdr) { case (HEADER_TYPE_NONE): ASSERT_COND(FALSE); break; case (HEADER_TYPE_ETH): switch (field.eth) { case (NET_HEADER_FIELD_ETH_TYPE): *parseArrayOffset = CC_PC_PR_ETYPE_LAST_OFFSET; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } break; case (HEADER_TYPE_VLAN): switch (field.vlan) { case (NET_HEADER_FIELD_VLAN_TCI): if ((hdrIndex == e_FM_PCD_HDR_INDEX_NONE) || (hdrIndex == e_FM_PCD_HDR_INDEX_1)) *parseArrayOffset = CC_PC_PR_VLAN1_OFFSET; else if (hdrIndex == e_FM_PCD_HDR_INDEX_LAST) *parseArrayOffset = CC_PC_PR_VLAN2_OFFSET; break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Extraction not supported")); return CC_PC_ILLEGAL; } break; default: REPORT_ERROR(MAJOR, E_NOT_SUPPORTED, ("Illegal header ")); return CC_PC_ILLEGAL; } if (offsetRelevant) return CC_PR_OFFSET; else return CC_PR_WITHOUT_OFFSET; } static void FillAdOfTypeResult(t_Handle h_Ad, t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, t_FmPcd *p_FmPcd, t_FmPcdCcNextEngineParams *p_CcNextEngineParams) { t_AdOfTypeResult *p_AdResult = (t_AdOfTypeResult *)h_Ad; t_Handle h_TmpAd; uint32_t tmp = 0, tmpNia = 0; uint16_t profileId; t_Handle p_AdNewPtr = NULL; t_Error err = E_OK; /* There are 3 cases handled in this routine of building a "result" type AD. * Case 1: No Manip. The action descriptor is built within the match table. * Case 2: Manip exists. A new AD is created - p_AdNewPtr. It is initialized * either in the FmPcdManipUpdateAdResultForCc routine or it was already * initialized and returned here. * p_AdResult (within the match table) will be initialized after * this routine returns and point to the existing AD. * Case 3: Manip exists. The action descriptor is built within the match table. * FmPcdManipUpdateAdResultForCc returns a NULL p_AdNewPtr. * * If statistics were enabled and the statistics mode of this node requires * a statistics Ad, it will be placed after the result Ad and before the * manip Ad, if manip Ad exists here. */ /* As default, the "new" ptr is the current one. i.e. the content of the result * AD will be written into the match table itself (case (1))*/ p_AdNewPtr = p_AdResult; /* Initialize an action descriptor, if current statistics mode requires an Ad */ if (p_FmPcdCcStatsParams) { ASSERT_COND(p_FmPcdCcStatsParams->h_StatsAd); ASSERT_COND(p_FmPcdCcStatsParams->h_StatsCounters); /* Swapping addresses between statistics Ad and the current lookup AD addresses */ h_TmpAd = p_FmPcdCcStatsParams->h_StatsAd; p_FmPcdCcStatsParams->h_StatsAd = h_Ad; h_Ad = h_TmpAd; p_AdNewPtr = h_Ad; p_AdResult = h_Ad; /* Init statistics Ad and connect current lookup AD as 'next action' from statistics Ad */ UpdateStatsAd(p_FmPcdCcStatsParams, h_Ad, p_FmPcd->physicalMuramBase); } /* Create manip and return p_AdNewPtr to either a new descriptor or NULL */ if (p_CcNextEngineParams->h_Manip) FmPcdManipUpdateAdResultForCc(p_CcNextEngineParams->h_Manip, p_CcNextEngineParams, h_Ad, &p_AdNewPtr); /* if (p_AdNewPtr = NULL) --> Done. (case (3)) */ if (p_AdNewPtr) { /* case (1) and (2) */ switch (p_CcNextEngineParams->nextEngine) { case (e_FM_PCD_DONE): if (p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_ENQ_FRAME) { if (p_CcNextEngineParams->params.enqueueParams.overrideFqid) { tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; tmp |= p_CcNextEngineParams->params.enqueueParams.newFqid; #if (DPAA_VERSION >= 11) tmp |= (p_CcNextEngineParams->params.enqueueParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK) << FM_PCD_AD_RESULT_VSP_SHIFT; #endif /* (DPAA_VERSION >= 11) */ } else { tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; tmp |= FM_PCD_AD_RESULT_PLCR_DIS; } } if (p_CcNextEngineParams->params.enqueueParams.action == e_FM_PCD_DROP_FRAME) tmpNia |= GET_NIA_BMI_AC_DISCARD_FRAME(p_FmPcd); else tmpNia |= GET_NIA_BMI_AC_ENQ_FRAME(p_FmPcd); break; case (e_FM_PCD_KG): if (p_CcNextEngineParams->params.kgParams.overrideFqid) { tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; tmp |= p_CcNextEngineParams->params.kgParams.newFqid; #if (DPAA_VERSION >= 11) tmp |= (p_CcNextEngineParams->params.kgParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK) << FM_PCD_AD_RESULT_VSP_SHIFT; #endif /* (DPAA_VERSION >= 11) */ } else { tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; tmp |= FM_PCD_AD_RESULT_PLCR_DIS; } tmpNia = NIA_KG_DIRECT; tmpNia |= NIA_ENG_KG; tmpNia |= NIA_KG_CC_EN; tmpNia |= FmPcdKgGetSchemeId( p_CcNextEngineParams->params.kgParams.h_DirectScheme); break; case (e_FM_PCD_PLCR): if (p_CcNextEngineParams->params.plcrParams.overrideParams) { tmp = FM_PCD_AD_RESULT_CONTRL_FLOW_TYPE; /* if private policer profile, it may be uninitialized yet, therefore no checks are done at this stage */ if (p_CcNextEngineParams->params.plcrParams.sharedProfile) { tmpNia |= NIA_PLCR_ABSOLUTE; err = FmPcdPlcrGetAbsoluteIdByProfileParams( (t_Handle)p_FmPcd, e_FM_PCD_PLCR_SHARED, NULL, p_CcNextEngineParams->params.plcrParams.newRelativeProfileId, &profileId); if (err != E_OK) return; } else profileId = p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; tmp |= p_CcNextEngineParams->params.plcrParams.newFqid; #if (DPAA_VERSION >= 11) tmp |= (p_CcNextEngineParams->params.plcrParams.newRelativeStorageProfileId & FM_PCD_AD_RESULT_VSP_MASK) << FM_PCD_AD_RESULT_VSP_SHIFT; #endif /* (DPAA_VERSION >= 11) */ WRITE_UINT32( p_AdResult->plcrProfile, (uint32_t)((uint32_t)profileId << FM_PCD_AD_PROFILEID_FOR_CNTRL_SHIFT)); } else tmp = FM_PCD_AD_RESULT_DATA_FLOW_TYPE; tmpNia |= NIA_ENG_PLCR | p_CcNextEngineParams->params.plcrParams.newRelativeProfileId; break; default: return; }WRITE_UINT32(p_AdResult->fqid, tmp); if (p_CcNextEngineParams->h_Manip) { tmp = GET_UINT32(p_AdResult->plcrProfile); tmp |= (uint32_t)(XX_VirtToPhys(p_AdNewPtr) - (p_FmPcd->physicalMuramBase)) >> 4; WRITE_UINT32(p_AdResult->plcrProfile, tmp); tmpNia |= FM_PCD_AD_RESULT_EXTENDED_MODE; tmpNia |= FM_PCD_AD_RESULT_NADEN; } #if (DPAA_VERSION >= 11) tmpNia |= FM_PCD_AD_RESULT_NO_OM_VSPE; #endif /* (DPAA_VERSION >= 11) */ WRITE_UINT32(p_AdResult->nia, tmpNia); } } static t_Error CcUpdateParams(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPort, t_Handle h_FmTree, bool validate) { t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_FmTree; return CcUpdateParam(h_FmPcd, h_PcdParams, h_FmPort, p_CcTree->keyAndNextEngineParams, p_CcTree->numOfEntries, UINT_TO_PTR(p_CcTree->ccTreeBaseAddr), validate, 0, h_FmTree, FALSE); } static void ReleaseNewNodeCommonPart( t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) { if (p_AdditionalInfo->p_AdTableNew) FM_MURAM_FreeMem( FmPcdGetMuramHandle( ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), p_AdditionalInfo->p_AdTableNew); if (p_AdditionalInfo->p_KeysMatchTableNew) FM_MURAM_FreeMem( FmPcdGetMuramHandle( ((t_FmPcdCcNode *)(p_AdditionalInfo->h_CurrentNode))->h_FmPcd), p_AdditionalInfo->p_KeysMatchTableNew); } static t_Error UpdateGblMask(t_FmPcdCcNode *p_CcNode, uint8_t keySize, uint8_t *p_Mask) { uint8_t prvGlblMaskSize = p_CcNode->glblMaskSize; if (p_Mask && !p_CcNode->glblMaskUpdated && (keySize <= 4) && !p_CcNode->lclMask) { if (p_CcNode->parseCode && (p_CcNode->parseCode != CC_PC_FF_TCI1) && (p_CcNode->parseCode != CC_PC_FF_TCI2) && (p_CcNode->parseCode != CC_PC_FF_MPLS1) && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST) && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && (p_CcNode->parseCode != CC_PC_FF_IPDSCP) && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2)) { p_CcNode->glblMaskSize = 0; p_CcNode->lclMask = TRUE; } else { memcpy(p_CcNode->p_GlblMask, p_Mask, (sizeof(uint8_t)) * keySize); p_CcNode->glblMaskUpdated = TRUE; p_CcNode->glblMaskSize = 4; } } else if (p_Mask && (keySize <= 4) && !p_CcNode->lclMask) { if (memcmp(p_CcNode->p_GlblMask, p_Mask, keySize) != 0) { p_CcNode->lclMask = TRUE; p_CcNode->glblMaskSize = 0; } } else if (!p_Mask && p_CcNode->glblMaskUpdated && (keySize <= 4)) { uint32_t tmpMask = 0xffffffff; if (memcmp(p_CcNode->p_GlblMask, &tmpMask, 4) != 0) { p_CcNode->lclMask = TRUE; p_CcNode->glblMaskSize = 0; } } else if (p_Mask) { p_CcNode->lclMask = TRUE; p_CcNode->glblMaskSize = 0; } /* In static mode (maxNumOfKeys > 0), local mask is supported only is mask support was enabled at initialization */ if (p_CcNode->maxNumOfKeys && (!p_CcNode->maskSupport) && p_CcNode->lclMask) { p_CcNode->lclMask = FALSE; p_CcNode->glblMaskSize = prvGlblMaskSize; return ERROR_CODE(E_NOT_SUPPORTED); } return E_OK; } static __inline__ t_Handle GetNewAd(t_Handle h_FmPcdCcNodeOrTree, bool isTree) { t_FmPcd *p_FmPcd; t_Handle h_Ad; if (isTree) p_FmPcd = (t_FmPcd *)(((t_FmPcdCcTree *)h_FmPcdCcNodeOrTree)->h_FmPcd); else p_FmPcd = (t_FmPcd *)(((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_FmPcd); if ((isTree && p_FmPcd->p_CcShadow) || (!isTree && ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->maxNumOfKeys)) { /* The allocated shadow is divided as follows: 0 . . . 16 . . . --------------------------------------------------- | Shadow | Shadow Keys | Shadow Next | | Ad | Match Table | Engine Table | | (16 bytes) | (maximal size) | (maximal size) | --------------------------------------------------- */ if (!p_FmPcd->p_CcShadow) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated")); return NULL; } h_Ad = p_FmPcd->p_CcShadow; } else { h_Ad = (t_Handle)FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!h_Ad) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor")); return NULL; } } return h_Ad; } static t_Error BuildNewNodeCommonPart( t_FmPcdCcNode *p_CcNode, int *size, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) { t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; if (p_CcNode->lclMask) *size = 2 * p_CcNode->ccKeySizeAccExtraction; else *size = p_CcNode->ccKeySizeAccExtraction; if (p_CcNode->maxNumOfKeys == 0) { p_AdditionalInfo->p_AdTableNew = (t_Handle)FM_MURAM_AllocMem( FmPcdGetMuramHandle(p_FmPcd), (uint32_t)((p_AdditionalInfo->numOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE), FM_PCD_CC_AD_TABLE_ALIGN); if (!p_AdditionalInfo->p_AdTableNew) RETURN_ERROR( MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); p_AdditionalInfo->p_KeysMatchTableNew = (t_Handle)FM_MURAM_AllocMem( FmPcdGetMuramHandle(p_FmPcd), (uint32_t)(*size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)), FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); if (!p_AdditionalInfo->p_KeysMatchTableNew) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_CcNode->h_FmPcd), p_AdditionalInfo->p_AdTableNew); p_AdditionalInfo->p_AdTableNew = NULL; RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); } MemSet8( (uint8_t*)p_AdditionalInfo->p_AdTableNew, 0, (uint32_t)((p_AdditionalInfo->numOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE)); MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0, *size * sizeof(uint8_t) * (p_AdditionalInfo->numOfKeys + 1)); } else { /* The allocated shadow is divided as follows: 0 . . . 16 . . . --------------------------------------------------- | Shadow | Shadow Keys | Shadow Next | | Ad | Match Table | Engine Table | | (16 bytes) | (maximal size) | (maximal size) | --------------------------------------------------- */ if (!p_FmPcd->p_CcShadow) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC Shadow not allocated")); p_AdditionalInfo->p_KeysMatchTableNew = PTR_MOVE(p_FmPcd->p_CcShadow, FM_PCD_CC_AD_ENTRY_SIZE); p_AdditionalInfo->p_AdTableNew = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, p_CcNode->keysMatchTableMaxSize); MemSet8( (uint8_t*)p_AdditionalInfo->p_AdTableNew, 0, (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE)); MemSet8((uint8_t*)p_AdditionalInfo->p_KeysMatchTableNew, 0, (*size) * sizeof(uint8_t) * (p_CcNode->maxNumOfKeys)); } p_AdditionalInfo->p_AdTableOld = p_CcNode->h_AdTable; p_AdditionalInfo->p_KeysMatchTableOld = p_CcNode->h_KeysMatchTable; return E_OK; } static t_Error BuildNewNodeAddOrMdfyKeyAndNextEngine( t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, t_FmPcdCcKeyParams *p_KeyParams, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo, bool add) { t_Error err = E_OK; t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; int size; int i = 0, j = 0; t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; uint32_t requiredAction = 0; bool prvLclMask; t_CcNodeInformation *p_CcNodeInformation; t_FmPcdCcStatsParams statsParams = { 0 }; t_List *p_Pos; t_FmPcdStatsObj *p_StatsObj; /* Check that new NIA is legal */ err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams, p_CcNode->statisticsMode); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); prvLclMask = p_CcNode->lclMask; /* Check that new key is not require update of localMask */ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction, p_KeyParams->p_Mask); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); /* Update internal data structure with new next engine for the given index */ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams, &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, p_KeyParams->p_Key, p_CcNode->userSizeOfExtraction); if ((p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } if (p_KeyParams->p_Mask) memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, p_KeyParams->p_Mask, p_CcNode->userSizeOfExtraction); else memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF, p_CcNode->userSizeOfExtraction); /* Update numOfKeys */ if (add) p_AdditionalInfo->numOfKeys = (uint8_t)(p_CcNode->numOfKeys + 1); else p_AdditionalInfo->numOfKeys = (uint8_t)p_CcNode->numOfKeys; /* Allocate new tables in MURAM: keys match table and action descriptors table */ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); /* Check that manip is legal and what requiredAction is necessary for this manip */ if (p_KeyParams->ccNextEngineParams.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_KeyParams->ccNextEngineParams, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction = requiredAction; p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; /* Update new Ad and new Key Table according to new requirement */ i = 0; for (j = 0; j < p_AdditionalInfo->numOfKeys; j++) { p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); if (j == keyIndex) { if (p_KeyParams->ccNextEngineParams.statisticsEn) { /* Allocate a statistics object that holds statistics AD and counters. - For added key - New statistics AD and counters pointer need to be allocated new statistics object. If statistics were enabled, we need to replace the existing descriptor with a new descriptor with nullified counters. */ p_StatsObj = GetStatsObj(p_CcNode); ASSERT_COND(p_StatsObj); /* Store allocated statistics object */ ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS); p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; statsParams.h_StatsAd = p_StatsObj->h_StatsAd; statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; #if (DPAA_VERSION >= 11) statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; #endif /* (DPAA_VERSION >= 11) */ /* Building action descriptor for the received new key */ NextStepAd(p_AdTableNewTmp, &statsParams, &p_KeyParams->ccNextEngineParams, p_FmPcd); } else { /* Building action descriptor for the received new key */ NextStepAd(p_AdTableNewTmp, NULL, &p_KeyParams->ccNextEngineParams, p_FmPcd); } /* Copy the received new key into keys match table */ p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j*size*sizeof(uint8_t)); MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeyParams->p_Key, p_CcNode->userSizeOfExtraction); /* Update mask for the received new key */ if (p_CcNode->lclMask) { if (p_KeyParams->p_Mask) { MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_KeyParams->p_Mask, p_CcNode->userSizeOfExtraction); } else if (p_CcNode->ccKeySizeAccExtraction > 4) { MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), 0xff, p_CcNode->userSizeOfExtraction); } else { MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->p_GlblMask, p_CcNode->userSizeOfExtraction); } } /* If key modification requested, the old entry is omitted and replaced by the new parameters */ if (!add) i++; } else { /* Copy existing action descriptors to the newly allocated Ad table */ p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); /* Copy existing keys and their masks to the newly allocated keys match table */ p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, i * size * sizeof(uint8_t)); if (p_CcNode->lclMask) { if (prvLclMask) { MemCpy8( PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->ccKeySizeAccExtraction); } else { p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t)); if (p_CcNode->ccKeySizeAccExtraction > 4) { MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), 0xff, p_CcNode->userSizeOfExtraction); } else { MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->p_GlblMask, p_CcNode->userSizeOfExtraction); } } } MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction); i++; } } /* Miss action descriptor */ p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE); p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i * FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); if (!NCSW_LIST_IsEmpty(&p_CcNode->ccTreesLst)) { NCSW_LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcNodeInformation->h_CcNode); /* Update the manipulation which has to be updated from parameters of the port */ /* It's has to be updated with restrictions defined in the function */ err = SetRequiredAction( p_CcNode->h_FmPcd, p_CcNode->shadowAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), 1, p_CcNodeInformation->h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); err = CcUpdateParam( p_CcNode->h_FmPcd, NULL, NULL, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], 1, PTR_MOVE(p_AdditionalInfo->p_AdTableNew, keyIndex*FM_PCD_CC_AD_ENTRY_SIZE), TRUE, p_CcNodeInformation->index, p_CcNodeInformation->h_CcNode, TRUE); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } } if (p_CcNode->lclMask) memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); if (p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForAdd = p_KeyParams->ccNextEngineParams.params.ccParams.h_CcNode; if (p_KeyParams->ccNextEngineParams.h_Manip) p_AdditionalInfo->h_ManipForAdd = p_KeyParams->ccNextEngineParams.h_Manip; #if (DPAA_VERSION >= 11) if ((p_KeyParams->ccNextEngineParams.nextEngine == e_FM_PCD_FR) && (p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForAdd = p_KeyParams->ccNextEngineParams.params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ if (!add) { if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) p_AdditionalInfo->h_ManipForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; /* If statistics were previously enabled, store the old statistics object to be released */ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) { p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; } #if (DPAA_VERSION >= 11) if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ } return E_OK; } static t_Error BuildNewNodeRemoveKey( t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) { int i = 0, j = 0; t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; int size; t_Error err = E_OK; /*save new numOfKeys*/ p_AdditionalInfo->numOfKeys = (uint16_t)(p_CcNode->numOfKeys - 1); /*function which allocates in the memory new KeyTbl, AdTbl*/ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); /*update new Ad and new Key Table according to new requirement*/ for (i = 0, j = 0; j < p_CcNode->numOfKeys; i++, j++) { if (j == keyIndex) j++; if (j == p_CcNode->numOfKeys) break; p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE); p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); p_KeysMatchTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableOld, j * size * sizeof(uint8_t)); p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, i * size * sizeof(uint8_t)); MemCpy8(p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, size * sizeof(uint8_t)); } p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, i * FM_PCD_CC_AD_ENTRY_SIZE); p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, j * FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; if (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) p_AdditionalInfo->h_ManipForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; /* If statistics were previously enabled, store the old statistics object to be released */ if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) { p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; } #if (DPAA_VERSION >= 11) if ((p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ return E_OK; } static t_Error BuildNewNodeModifyKey( t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, uint8_t *p_Key, uint8_t *p_Mask, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) { t_FmPcd *p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; t_Error err = E_OK; t_Handle p_AdTableNewTmp, p_KeysMatchTableNewTmp; t_Handle p_KeysMatchTableOldTmp, p_AdTableOldTmp; int size; int i = 0, j = 0; bool prvLclMask; t_FmPcdStatsObj *p_StatsObj, tmpStatsObj; p_AdditionalInfo->numOfKeys = p_CcNode->numOfKeys; prvLclMask = p_CcNode->lclMask; /* Check that new key is not require update of localMask */ err = UpdateGblMask(p_CcNode, p_CcNode->ccKeySizeAccExtraction, p_Mask); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); /* Update internal data structure with new next engine for the given index */ memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].key, p_Key, p_CcNode->userSizeOfExtraction); if (p_Mask) memcpy(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, p_Mask, p_CcNode->userSizeOfExtraction); else memset(p_AdditionalInfo->keyAndNextEngineParams[keyIndex].mask, 0xFF, p_CcNode->userSizeOfExtraction); /*function which build in the memory new KeyTbl, AdTbl*/ err = BuildNewNodeCommonPart(p_CcNode, &size, p_AdditionalInfo); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); /*fill the New AdTable and New KeyTable*/ for (j = 0, i = 0; j < p_AdditionalInfo->numOfKeys; j++, i++) { p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j*FM_PCD_CC_AD_ENTRY_SIZE); p_AdTableOldTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableOld, i*FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); if (j == keyIndex) { ASSERT_COND(keyIndex < CC_MAX_NUM_OF_KEYS); if (p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) { /* As statistics were enabled, we need to update the existing statistics descriptor with a new nullified counters. */ p_StatsObj = GetStatsObj(p_CcNode); ASSERT_COND(p_StatsObj); SetStatsCounters( p_AdTableNewTmp, (uint32_t)((XX_VirtToPhys(p_StatsObj->h_StatsCounters) - p_FmPcd->physicalMuramBase))); tmpStatsObj.h_StatsAd = p_StatsObj->h_StatsAd; tmpStatsObj.h_StatsCounters = p_StatsObj->h_StatsCounters; /* As we need to replace only the counters, we build a new statistics object that holds the old AD and the new counters - this will be the currently used statistics object. The newly allocated AD is not required and may be released back to the available objects with the previous counters pointer. */ p_StatsObj->h_StatsAd = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd; p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd = tmpStatsObj.h_StatsAd; /* Store allocated statistics object */ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; /* As statistics were previously enabled, store the old statistics object to be released */ p_AdditionalInfo->p_StatsObjForRmv = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj; } p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); MemCpy8(p_KeysMatchTableNewTmp, p_Key, p_CcNode->userSizeOfExtraction); if (p_CcNode->lclMask) { if (p_Mask) MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_Mask, p_CcNode->userSizeOfExtraction); else if (p_CcNode->ccKeySizeAccExtraction > 4) MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), 0xff, p_CcNode->userSizeOfExtraction); else MemCpy8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->p_GlblMask, p_CcNode->userSizeOfExtraction); } } else { p_KeysMatchTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_KeysMatchTableNew, j * size * sizeof(uint8_t)); p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, i * size * sizeof(uint8_t)); if (p_CcNode->lclMask) { if (prvLclMask) MemCpy8( PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), PTR_MOVE(p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->userSizeOfExtraction); else { p_KeysMatchTableOldTmp = PTR_MOVE(p_CcNode->h_KeysMatchTable, i * (int)p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t)); if (p_CcNode->ccKeySizeAccExtraction > 4) MemSet8(PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), 0xff, p_CcNode->userSizeOfExtraction); else MemCpy8( PTR_MOVE(p_KeysMatchTableNewTmp, p_CcNode->ccKeySizeAccExtraction), p_CcNode->p_GlblMask, p_CcNode->userSizeOfExtraction); } } MemCpy8((void*)p_KeysMatchTableNewTmp, p_KeysMatchTableOldTmp, p_CcNode->ccKeySizeAccExtraction); } } p_AdTableNewTmp = PTR_MOVE(p_AdditionalInfo->p_AdTableNew, j * FM_PCD_CC_AD_ENTRY_SIZE); p_AdTableOldTmp = PTR_MOVE(p_CcNode->h_AdTable, i * FM_PCD_CC_AD_ENTRY_SIZE); MemCpy8(p_AdTableNewTmp, p_AdTableOldTmp, FM_PCD_CC_AD_ENTRY_SIZE); return E_OK; } static t_Error BuildNewNodeModifyNextEngine( t_Handle h_FmPcd, t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex, t_FmPcdCcNextEngineParams *p_CcNextEngineParams, t_List *h_OldLst, t_List *h_NewLst, t_FmPcdModifyCcKeyAdditionalParams *p_AdditionalInfo) { t_Error err = E_OK; uint32_t requiredAction = 0; t_List *p_Pos; t_CcNodeInformation *p_CcNodeInformation, ccNodeInfo; t_Handle p_Ad; t_FmPcdCcNode *p_FmPcdCcNode1 = NULL; t_FmPcdCcTree *p_FmPcdCcTree = NULL; t_FmPcdStatsObj *p_StatsObj; t_FmPcdCcStatsParams statsParams = { 0 }; ASSERT_COND(p_CcNextEngineParams); /* check that new NIA is legal */ if (!p_AdditionalInfo->tree) err = ValidateNextEngineParams( h_FmPcd, p_CcNextEngineParams, ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->statisticsMode); else /* Statistics are not supported for CC root */ err = ValidateNextEngineParams(h_FmPcd, p_CcNextEngineParams, e_FM_PCD_CC_STATS_MODE_NONE); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); /* Update internal data structure for next engine per index (index - key) */ memcpy(&p_AdditionalInfo->keyAndNextEngineParams[keyIndex].nextEngineParams, p_CcNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); /* Check that manip is legal and what requiredAction is necessary for this manip */ if (p_CcNextEngineParams->h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine(p_CcNextEngineParams, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } if (!p_AdditionalInfo->tree) { p_FmPcdCcNode1 = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; p_AdditionalInfo->numOfKeys = p_FmPcdCcNode1->numOfKeys; p_Ad = p_FmPcdCcNode1->h_AdTable; if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; if (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; #if (DPAA_VERSION >= 11) if ((p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForRmv = p_FmPcdCcNode1->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ } else { p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; p_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.ccParams.h_CcNode; if (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip) p_AdditionalInfo->h_ManipForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.h_Manip; #if (DPAA_VERSION >= 11) if ((p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForRmv = p_FmPcdCcTree->keyAndNextEngineParams[keyIndex].nextEngineParams.params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ } if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_CC) && p_CcNextEngineParams->h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNextEngineParams->params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } ASSERT_COND(p_Ad); memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = PTR_MOVE(p_Ad, keyIndex * FM_PCD_CC_AD_ENTRY_SIZE); /* If statistics were enabled, this Ad is the statistics Ad. Need to follow its nextAction to retrieve the actual Nia-Ad. If statistics should remain enabled, only the actual Nia-Ad should be modified. */ if ((!p_AdditionalInfo->tree) && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && (p_CcNextEngineParams->statisticsEn)) ccNodeInfo.h_CcNode = ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsAd; EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); p_Ad = GetNewAd(h_FmPcdCcNodeOrTree, p_AdditionalInfo->tree); if (!p_Ad) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptor")); MemSet8((uint8_t *)p_Ad, 0, FM_PCD_CC_AD_ENTRY_SIZE); /* If statistics were not enabled before, but requested now - Allocate a statistics object that holds statistics AD and counters. */ if ((!p_AdditionalInfo->tree) && (!((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && (p_CcNextEngineParams->statisticsEn)) { p_StatsObj = GetStatsObj((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree); ASSERT_COND(p_StatsObj); /* Store allocated statistics object */ p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = p_StatsObj; statsParams.h_StatsAd = p_StatsObj->h_StatsAd; statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; #if (DPAA_VERSION >= 11) statsParams.h_StatsFLRs = ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->h_StatsFLRs; #endif /* (DPAA_VERSION >= 11) */ NextStepAd(p_Ad, &statsParams, p_CcNextEngineParams, h_FmPcd); } else NextStepAd(p_Ad, NULL, p_CcNextEngineParams, h_FmPcd); ccNodeInfo.h_CcNode = p_Ad; EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL); p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction = requiredAction; p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction |= UPDATE_CC_WITH_TREE; if (!p_AdditionalInfo->tree) { ASSERT_COND(p_FmPcdCcNode1); if (!NCSW_LIST_IsEmpty(&p_FmPcdCcNode1->ccTreesLst)) { NCSW_LIST_FOR_EACH(p_Pos, &p_FmPcdCcNode1->ccTreesLst) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcNodeInformation->h_CcNode); /* Update the manipulation which has to be updated from parameters of the port it's has to be updated with restrictions defined in the function */ err = SetRequiredAction( p_FmPcdCcNode1->h_FmPcd, p_FmPcdCcNode1->shadowAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], p_Ad, 1, p_CcNodeInformation->h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); err = CcUpdateParam( p_FmPcdCcNode1->h_FmPcd, NULL, NULL, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], 1, p_Ad, TRUE, p_CcNodeInformation->index, p_CcNodeInformation->h_CcNode, TRUE); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } } } else { ASSERT_COND(p_FmPcdCcTree); err = SetRequiredAction( h_FmPcd, p_FmPcdCcTree->requiredAction | p_AdditionalInfo->keyAndNextEngineParams[keyIndex].requiredAction, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], p_Ad, 1, (t_Handle)p_FmPcdCcTree); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); err = CcUpdateParam(h_FmPcd, NULL, NULL, &p_AdditionalInfo->keyAndNextEngineParams[keyIndex], 1, p_Ad, TRUE, 0, (t_Handle)p_FmPcdCcTree, TRUE); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } if (p_CcNextEngineParams->nextEngine == e_FM_PCD_CC) p_AdditionalInfo->h_NodeForAdd = p_CcNextEngineParams->params.ccParams.h_CcNode; if (p_CcNextEngineParams->h_Manip) p_AdditionalInfo->h_ManipForAdd = p_CcNextEngineParams->h_Manip; /* If statistics were previously enabled, but now are disabled, store the old statistics object to be released */ if ((!p_AdditionalInfo->tree) && (((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj) && (!p_CcNextEngineParams->statisticsEn)) { p_AdditionalInfo->p_StatsObjForRmv = ((t_FmPcdCcNode *)h_FmPcdCcNodeOrTree)->keyAndNextEngineParams[keyIndex].p_StatsObj; p_AdditionalInfo->keyAndNextEngineParams[keyIndex].p_StatsObj = NULL; } #if (DPAA_VERSION >= 11) if ((p_CcNextEngineParams->nextEngine == e_FM_PCD_FR) && (p_CcNextEngineParams->params.frParams.h_FrmReplic)) p_AdditionalInfo->h_FrmReplicForAdd = p_CcNextEngineParams->params.frParams.h_FrmReplic; #endif /* (DPAA_VERSION >= 11) */ return E_OK; } static void UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode( t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst, t_FmPcdCcNextEngineParams **p_NextEngineParams) { t_CcNodeInformation *p_CcNodeInformation; t_FmPcdCcNode *p_NodePtrOnCurrentMdfNode = NULL; t_List *p_Pos; int i = 0; t_Handle p_AdTablePtOnCrntCurrentMdfNode/*, p_AdTableNewModified*/; t_CcNodeInformation ccNodeInfo; NCSW_LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccPrevNodesLst) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); p_NodePtrOnCurrentMdfNode = (t_FmPcdCcNode *)p_CcNodeInformation->h_CcNode; ASSERT_COND(p_NodePtrOnCurrentMdfNode); /* Search in the previous node which exact index points on this current modified node for getting AD */ for (i = 0; i < p_NodePtrOnCurrentMdfNode->numOfKeys + 1; i++) { if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) { if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) { if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) p_AdTablePtOnCrntCurrentMdfNode = p_CrntMdfNode->h_Ad; else if (p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj) p_AdTablePtOnCrntCurrentMdfNode = p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].p_StatsObj->h_StatsAd; else p_AdTablePtOnCrntCurrentMdfNode = PTR_MOVE(p_NodePtrOnCurrentMdfNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = p_AdTablePtOnCrntCurrentMdfNode; EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); if (!(*p_NextEngineParams)) *p_NextEngineParams = &p_NodePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams; } } } ASSERT_COND(i != p_NodePtrOnCurrentMdfNode->numOfKeys); } } static void UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode( t_FmPcdCcNode *p_CrntMdfNode, t_List *h_OldLst, t_FmPcdCcNextEngineParams **p_NextEngineParams) { t_CcNodeInformation *p_CcNodeInformation; t_FmPcdCcTree *p_TreePtrOnCurrentMdfNode = NULL; t_List *p_Pos; int i = 0; t_Handle p_AdTableTmp; t_CcNodeInformation ccNodeInfo; NCSW_LIST_FOR_EACH(p_Pos, &p_CrntMdfNode->ccTreeIdLst) { p_CcNodeInformation = CC_NODE_F_OBJECT(p_Pos); p_TreePtrOnCurrentMdfNode = (t_FmPcdCcTree *)p_CcNodeInformation->h_CcNode; ASSERT_COND(p_TreePtrOnCurrentMdfNode); /*search in the trees which exact index points on this current modified node for getting AD */ for (i = 0; i < p_TreePtrOnCurrentMdfNode->numOfEntries; i++) { if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) { if (p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode == (t_Handle)p_CrntMdfNode) { p_AdTableTmp = UINT_TO_PTR(p_TreePtrOnCurrentMdfNode->ccTreeBaseAddr + i*FM_PCD_CC_AD_ENTRY_SIZE); memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = p_AdTableTmp; EnqueueNodeInfoToRelevantLst(h_OldLst, &ccNodeInfo, NULL); if (!(*p_NextEngineParams)) *p_NextEngineParams = &p_TreePtrOnCurrentMdfNode->keyAndNextEngineParams[i].nextEngineParams; } } } ASSERT_COND(i == p_TreePtrOnCurrentMdfNode->numOfEntries); } } static t_FmPcdModifyCcKeyAdditionalParams * ModifyNodeCommonPart( t_Handle h_FmPcdCcNodeOrTree, uint16_t keyIndex, e_ModifyState modifyState, bool ttlCheck, bool hashCheck, bool tree) { t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams; int i = 0, j = 0; bool wasUpdate = FALSE; t_FmPcdCcNode *p_CcNode = NULL; t_FmPcdCcTree *p_FmPcdCcTree; uint16_t numOfKeys; t_FmPcdCcKeyAndNextEngineParams *p_KeyAndNextEngineParams; SANITY_CHECK_RETURN_VALUE(h_FmPcdCcNodeOrTree, E_INVALID_HANDLE, NULL); if (!tree) { p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNodeOrTree; numOfKeys = p_CcNode->numOfKeys; /* node has to be pointed by another node or tree */ p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc( sizeof(t_FmPcdCcKeyAndNextEngineParams) * (numOfKeys + 1)); if (!p_KeyAndNextEngineParams) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure")); return NULL; } memcpy(p_KeyAndNextEngineParams, p_CcNode->keyAndNextEngineParams, (numOfKeys + 1) * sizeof(t_FmPcdCcKeyAndNextEngineParams)); if (ttlCheck) { if ((p_CcNode->parseCode == CC_PC_FF_IPV4TTL) || (p_CcNode->parseCode == CC_PC_FF_IPV6HOP_LIMIT)) { XX_Free(p_KeyAndNextEngineParams); REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_FF_IPV4TTL or CC_PC_FF_IPV6HOP_LIMIT can not be used for this operation")); return NULL; } } if (hashCheck) { if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) { XX_Free(p_KeyAndNextEngineParams); REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("nodeId of CC_PC_GENERIC_IC_HASH_INDEXED can not be used for this operation")); return NULL; } } } else { p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcNodeOrTree; numOfKeys = p_FmPcdCcTree->numOfEntries; p_KeyAndNextEngineParams = (t_FmPcdCcKeyAndNextEngineParams *)XX_Malloc( sizeof(t_FmPcdCcKeyAndNextEngineParams) * FM_PCD_MAX_NUM_OF_CC_GROUPS); if (!p_KeyAndNextEngineParams) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Next engine and required action structure")); return NULL; } memcpy(p_KeyAndNextEngineParams, p_FmPcdCcTree->keyAndNextEngineParams, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); } p_FmPcdModifyCcKeyAdditionalParams = (t_FmPcdModifyCcKeyAdditionalParams *)XX_Malloc( sizeof(t_FmPcdModifyCcKeyAdditionalParams)); if (!p_FmPcdModifyCcKeyAdditionalParams) { XX_Free(p_KeyAndNextEngineParams); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Allocation of internal data structure FAILED")); return NULL; } memset(p_FmPcdModifyCcKeyAdditionalParams, 0, sizeof(t_FmPcdModifyCcKeyAdditionalParams)); p_FmPcdModifyCcKeyAdditionalParams->h_CurrentNode = h_FmPcdCcNodeOrTree; p_FmPcdModifyCcKeyAdditionalParams->savedKeyIndex = keyIndex; while (i < numOfKeys) { if ((j == keyIndex) && !wasUpdate) { if (modifyState == e_MODIFY_STATE_ADD) j++; else if (modifyState == e_MODIFY_STATE_REMOVE) i++; wasUpdate = TRUE; } else { memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j], p_KeyAndNextEngineParams + i, sizeof(t_FmPcdCcKeyAndNextEngineParams)); i++; j++; } } if (keyIndex == numOfKeys) { if (modifyState == e_MODIFY_STATE_ADD) j++; } memcpy(&p_FmPcdModifyCcKeyAdditionalParams->keyAndNextEngineParams[j], p_KeyAndNextEngineParams + numOfKeys, sizeof(t_FmPcdCcKeyAndNextEngineParams)); XX_Free(p_KeyAndNextEngineParams); return p_FmPcdModifyCcKeyAdditionalParams; } static t_Error UpdatePtrWhichPointOnCrntMdfNode( t_FmPcdCcNode *p_CcNode, t_FmPcdModifyCcKeyAdditionalParams *p_FmPcdModifyCcKeyAdditionalParams, t_List *h_OldLst, t_List *h_NewLst) { t_FmPcdCcNextEngineParams *p_NextEngineParams = NULL; t_CcNodeInformation ccNodeInfo = { 0 }; t_Handle h_NewAd; t_Handle h_OrigAd = NULL; /* Building a list of all action descriptors that point to the previous node */ if (!NCSW_LIST_IsEmpty(&p_CcNode->ccPrevNodesLst)) UpdateAdPtrOfNodesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst, &p_NextEngineParams); if (!NCSW_LIST_IsEmpty(&p_CcNode->ccTreeIdLst)) UpdateAdPtrOfTreesWhichPointsOnCrntMdfNode(p_CcNode, h_OldLst, &p_NextEngineParams); /* This node must be found as next engine of one of its previous nodes or trees*/ if (p_NextEngineParams) { /* Building a new action descriptor that points to the modified node */ h_NewAd = GetNewAd(p_CcNode, FALSE); if (!h_NewAd) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); MemSet8(h_NewAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); h_OrigAd = p_CcNode->h_Ad; BuildNewAd(h_NewAd, p_FmPcdModifyCcKeyAdditionalParams, p_CcNode, p_NextEngineParams); ccNodeInfo.h_CcNode = h_NewAd; EnqueueNodeInfoToRelevantLst(h_NewLst, &ccNodeInfo, NULL); if (p_NextEngineParams->h_Manip && !h_OrigAd) FmPcdManipUpdateOwner(p_NextEngineParams->h_Manip, FALSE); } return E_OK; } static void UpdateCcRootOwner(t_FmPcdCcTree *p_FmPcdCcTree, bool add) { ASSERT_COND(p_FmPcdCcTree); /* this routine must be protected by the calling routine! */ if (add) p_FmPcdCcTree->owners++; else { ASSERT_COND(p_FmPcdCcTree->owners); p_FmPcdCcTree->owners--; } } static t_Error CheckAndSetManipParamsWithCcNodeParams(t_FmPcdCcNode *p_CcNode) { t_Error err = E_OK; int i = 0; for (i = 0; i < p_CcNode->numOfKeys; i++) { if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) { err = FmPcdManipCheckParamsWithCcNodeParams( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip, (t_Handle)p_CcNode); if (err) return err; } } return err; } static t_Error ValidateAndCalcStatsParams(t_FmPcdCcNode *p_CcNode, t_FmPcdCcNodeParams *p_CcNodeParam, uint32_t *p_NumOfRanges, uint32_t *p_CountersArraySize) { e_FmPcdCcStatsMode statisticsMode = p_CcNode->statisticsMode; uint32_t i; UNUSED(p_CcNodeParam); switch (statisticsMode) { case e_FM_PCD_CC_STATS_MODE_NONE: for (i = 0; i < p_CcNode->numOfKeys; i++) if (p_CcNodeParam->keysParams.keyParams[i].ccNextEngineParams.statisticsEn) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Statistics cannot be enabled for key %d when statistics mode was set to 'NONE'", i)); return E_OK; case e_FM_PCD_CC_STATS_MODE_FRAME: case e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME: *p_NumOfRanges = 1; *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE; return E_OK; #if (DPAA_VERSION >= 11) case e_FM_PCD_CC_STATS_MODE_RMON: { uint16_t *p_FrameLengthRanges = p_CcNodeParam->keysParams.frameLengthRanges; uint32_t i; if (p_FrameLengthRanges[0] <= 0) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode")); if (p_FrameLengthRanges[0] == 0xFFFF) { *p_NumOfRanges = 1; *p_CountersArraySize = 2 * FM_PCD_CC_STATS_COUNTER_SIZE; return E_OK; } for (i = 1; i < FM_PCD_CC_STATS_MAX_NUM_OF_FLR; i++) { if (p_FrameLengthRanges[i - 1] >= p_FrameLengthRanges[i]) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Frame length range must be larger at least by 1 from preceding range")); /* Stop when last range is reached */ if (p_FrameLengthRanges[i] == 0xFFFF) break; } if ((i >= FM_PCD_CC_STATS_MAX_NUM_OF_FLR) || (p_FrameLengthRanges[i] != 0xFFFF)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Last Frame length range must be 0xFFFF")); *p_NumOfRanges = i + 1; /* Allocate an extra counter for byte count, as counters array always begins with byte count */ *p_CountersArraySize = (*p_NumOfRanges + 1) * FM_PCD_CC_STATS_COUNTER_SIZE; } return E_OK; #endif /* (DPAA_VERSION >= 11) */ default: RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Statistics mode")); } } static t_Error CheckParams(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam, t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc) { int tmp = 0; t_FmPcdCcKeyParams *p_KeyParams; t_Error err; uint32_t requiredAction = 0; /* Validate statistics parameters */ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam, &(p_CcNode->numOfStatsFLRs), &(p_CcNode->countersArraySize)); if (err) RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); /* Validate next engine parameters on Miss */ err = ValidateNextEngineParams( h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_CcNode->statisticsMode); if (err) RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, sizeof(t_FmPcdCcNextEngineParams)); p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction = requiredAction; if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) { p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; if (!p_KeyParams->p_Key) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("p_Key is not initialized")); err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams, p_CcNode->statisticsMode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); err = UpdateGblMask(p_CcNode, p_CcNodeParam->keysParams.keySize, p_KeyParams->p_Mask); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); if (p_KeyParams->ccNextEngineParams.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_KeyParams->ccNextEngineParams, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } /* Store 'key' parameters - key, mask (if passed by the user) */ memcpy(p_CcNode->keyAndNextEngineParams[tmp].key, p_KeyParams->p_Key, p_CcNodeParam->keysParams.keySize); if (p_KeyParams->p_Mask) memcpy(p_CcNode->keyAndNextEngineParams[tmp].mask, p_KeyParams->p_Mask, p_CcNodeParam->keysParams.keySize); else memset((void *)(p_CcNode->keyAndNextEngineParams[tmp].mask), 0xFF, p_CcNodeParam->keysParams.keySize); /* Store next engine parameters */ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } } if (p_CcNode->maxNumOfKeys) { if (p_CcNode->maxNumOfKeys < p_CcNode->numOfKeys) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("Number of keys exceed the provided maximal number of keys")); } *isKeyTblAlloc = TRUE; return E_OK; } static t_Error Ipv4TtlOrIpv6HopLimitCheckParams( t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam, t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc) { int tmp = 0; t_FmPcdCcKeyParams *p_KeyParams; t_Error err; uint8_t key = 0x01; uint32_t requiredAction = 0; if (p_CcNode->numOfKeys != 1) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'numOfKeys' is 1")); if ((p_CcNodeParam->keysParams.maxNumOfKeys) && (p_CcNodeParam->keysParams.maxNumOfKeys != 1)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT the maximal supported 'maxNumOfKeys' is 1")); /* Validate statistics parameters */ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam, &(p_CcNode->numOfStatsFLRs), &(p_CcNode->countersArraySize)); if (err) RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); err = ValidateNextEngineParams( h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_CcNodeParam->keysParams.statisticsMode); if (err) RETURN_ERROR(MAJOR, err, ("For this node MissNextEngineParams are not valid")); if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } memcpy(&p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, sizeof(t_FmPcdCcNextEngineParams)); p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].requiredAction = requiredAction; if ((p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNode->keyAndNextEngineParams[p_CcNode->numOfKeys].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) { p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; if (p_KeyParams->p_Mask) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Mask can not be initialized")); if (memcmp(p_KeyParams->p_Key, &key, 1) != 0) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For node of the type IPV4_TTL or IPV6_HOP_LIMIT p_Key has to be 1")); err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams, p_CcNode->statisticsMode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); if (p_KeyParams->ccNextEngineParams.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_KeyParams->ccNextEngineParams, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } /* Store 'key' parameters - key (fixed to 0x01), key size of 1 byte and full mask */ p_CcNode->keyAndNextEngineParams[tmp].key[0] = key; p_CcNode->keyAndNextEngineParams[tmp].mask[0] = 0xFF; /* Store NextEngine parameters */ memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; } *isKeyTblAlloc = FALSE; return E_OK; } static t_Error IcHashIndexedCheckParams(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam, t_FmPcdCcNode *p_CcNode, bool *isKeyTblAlloc) { int tmp = 0, countOnes = 0; t_FmPcdCcKeyParams *p_KeyParams; t_Error err; uint16_t glblMask = p_CcNodeParam->extractCcParams.extractNonHdr.icIndxMask; uint16_t countMask = (uint16_t)(glblMask >> 4); uint32_t requiredAction = 0; if (glblMask & 0x000f) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("icIndxMask has to be with last nibble 0")); while (countMask) { countOnes++; countMask = (uint16_t)(countMask >> 1); } if (!POWER_OF_2(p_CcNode->numOfKeys)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For Node of the type INDEXED numOfKeys has to be powerOfTwo")); if (p_CcNode->numOfKeys != ((uint32_t)1 << countOnes)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For Node of the type IC_HASH_INDEXED numOfKeys has to be powerOfTwo")); if (p_CcNodeParam->keysParams.maxNumOfKeys && (p_CcNodeParam->keysParams.maxNumOfKeys != p_CcNode->numOfKeys)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For Node of the type INDEXED 'maxNumOfKeys' should be 0 or equal 'numOfKeys'")); /* Validate statistics parameters */ err = ValidateAndCalcStatsParams(p_CcNode, p_CcNodeParam, &(p_CcNode->numOfStatsFLRs), &(p_CcNode->countersArraySize)); if (err) RETURN_ERROR(MAJOR, err, ("Invalid statistics parameters")); err = ValidateNextEngineParams( h_FmPcd, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_CcNode->statisticsMode); if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED) RETURN_ERROR( MAJOR, err, ("MissNextEngineParams for the node of the type IC_INDEX_HASH has to be UnInitialized")); for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) { p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; if (p_KeyParams->p_Mask || p_KeyParams->p_Key) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("For Node of the type IC_HASH_INDEXED p_Key or p_Mask has to be NULL")); if ((glblMask & (tmp * 16)) == (tmp * 16)) { err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams, p_CcNode->statisticsMode); if (err) RETURN_ERROR( MAJOR, err, ("This index has to be initialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask ")); if (p_KeyParams->ccNextEngineParams.h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_KeyParams->ccNextEngineParams, &requiredAction); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); p_CcNode->keyAndNextEngineParams[tmp].requiredAction = requiredAction; } memcpy(&p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams, &p_KeyParams->ccNextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); if ((p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode); if (err) RETURN_ERROR(MAJOR, err, (NO_MSG)); } } else { err = ValidateNextEngineParams(h_FmPcd, &p_KeyParams->ccNextEngineParams, p_CcNode->statisticsMode); if (GET_ERROR_TYPE(err) != E_NOT_SUPPORTED) RETURN_ERROR( MAJOR, err, ("This index has to be UnInitialized for the node of the type IC_INDEX_HASH according to settings of GlobalMask")); } } *isKeyTblAlloc = FALSE; glblMask = htobe16(glblMask); memcpy(PTR_MOVE(p_CcNode->p_GlblMask, 2), &glblMask, 2); return E_OK; } static t_Error ModifyNextEngineParamNode( t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_List h_OldPointersLst, h_NewPointersLst; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); if (keyIndex >= p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, FALSE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex, p_FmPcdCcNextEngineParams, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams); if (err) { XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } static t_Error FindKeyIndex(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, uint16_t *p_KeyIndex) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint8_t tmpMask[FM_PCD_MAX_SIZE_OF_KEY]; uint16_t i; ASSERT_COND(p_Key); ASSERT_COND(p_KeyIndex); ASSERT_COND(keySize < FM_PCD_MAX_SIZE_OF_KEY); if (keySize != p_CcNode->userSizeOfExtraction) RETURN_ERROR( MINOR, E_INVALID_VALUE, ("Key size doesn't match the extraction size of the node")); /* If user didn't pass a mask for this key, we'll look for full extraction mask */ if (!p_Mask) memset(tmpMask, 0xFF, keySize); for (i = 0; i < p_CcNode->numOfKeys; i++) { /* Comparing received key */ if (memcmp(p_Key, p_CcNode->keyAndNextEngineParams[i].key, keySize) == 0) { if (p_Mask) { /* If a user passed a mask for this key, it must match to the existing key's mask for a correct match */ if (memcmp(p_Mask, p_CcNode->keyAndNextEngineParams[i].mask, keySize) == 0) { *p_KeyIndex = i; return E_OK; } } else { /* If user didn't pass a mask for this key, check if the existing key mask is full extraction */ if (memcmp(tmpMask, p_CcNode->keyAndNextEngineParams[i].mask, keySize) == 0) { *p_KeyIndex = i; return E_OK; } } } } return ERROR_CODE(E_NOT_FOUND); } static t_Error CalcAndUpdateCcShadow(t_FmPcdCcNode *p_CcNode, bool isKeyTblAlloc, uint32_t *p_MatchTableSize, uint32_t *p_AdTableSize) { uint32_t shadowSize; t_Error err; /* Calculate keys table maximal size - each entry consists of a key and a mask, (if local mask support is requested) */ *p_MatchTableSize = p_CcNode->ccKeySizeAccExtraction * sizeof(uint8_t) * p_CcNode->maxNumOfKeys; if (p_CcNode->maskSupport) *p_MatchTableSize *= 2; /* Calculate next action descriptors table, including one more entry for miss */ *p_AdTableSize = (uint32_t)((p_CcNode->maxNumOfKeys + 1) * FM_PCD_CC_AD_ENTRY_SIZE); /* Calculate maximal shadow size of this node. All shadow structures will be used for runtime modifications host command. If keys table was allocated for this node, the keys table and next engines table may be modified in run time (entries added or removed), so shadow tables are requires. Otherwise, the only supported runtime modification is a specific next engine update and this requires shadow memory of a single AD */ /* Shadow size should be enough to hold the following 3 structures: * 1 - an action descriptor */ shadowSize = FM_PCD_CC_AD_ENTRY_SIZE; /* 2 - keys match table, if was allocated for the current node */ if (isKeyTblAlloc) shadowSize += *p_MatchTableSize; /* 3 - next action descriptors table */ shadowSize += *p_AdTableSize; /* Update shadow to the calculated size */ err = FmPcdUpdateCcShadow(p_CcNode->h_FmPcd, (uint32_t)shadowSize, FM_PCD_CC_AD_TABLE_ALIGN); if (err != E_OK) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node shadow")); } return E_OK; } static t_Error AllocStatsObjs(t_FmPcdCcNode *p_CcNode) { t_FmPcdStatsObj *p_StatsObj; t_Handle h_FmMuram, h_StatsAd, h_StatsCounters; uint32_t i; h_FmMuram = FmPcdGetMuramHandle(p_CcNode->h_FmPcd); if (!h_FmMuram) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); /* Allocate statistics ADs and statistics counter. An extra pair (AD + counters) will be allocated to support runtime modifications */ for (i = 0; i < p_CcNode->maxNumOfKeys + 2; i++) { /* Allocate list object structure */ p_StatsObj = XX_Malloc(sizeof(t_FmPcdStatsObj)); if (!p_StatsObj) { FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("Statistics object")); } memset(p_StatsObj, 0, sizeof(t_FmPcdStatsObj)); /* Allocate statistics AD from MURAM */ h_StatsAd = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!h_StatsAd) { FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); XX_Free(p_StatsObj); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics ADs")); } MemSet8(h_StatsAd, 0, FM_PCD_CC_AD_ENTRY_SIZE); /* Allocate statistics counters from MURAM */ h_StatsCounters = (t_Handle)FM_MURAM_AllocMem( h_FmMuram, p_CcNode->countersArraySize, FM_PCD_CC_AD_TABLE_ALIGN); if (!h_StatsCounters) { FreeStatObjects(&p_CcNode->availableStatsLst, h_FmMuram); FM_MURAM_FreeMem(h_FmMuram, h_StatsAd); XX_Free(p_StatsObj); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics counters")); } MemSet8(h_StatsCounters, 0, p_CcNode->countersArraySize); p_StatsObj->h_StatsAd = h_StatsAd; p_StatsObj->h_StatsCounters = h_StatsCounters; EnqueueStatsObj(&p_CcNode->availableStatsLst, p_StatsObj); } return E_OK; } static t_Error MatchTableGetKeyStatistics( t_FmPcdCcNode *p_CcNode, uint16_t keyIndex, t_FmPcdCcKeyStatistics *p_KeyStatistics) { uint32_t *p_StatsCounters, i; if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table")); if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key")); memset(p_KeyStatistics, 0, sizeof(t_FmPcdCcKeyStatistics)); p_StatsCounters = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters; ASSERT_COND(p_StatsCounters); p_KeyStatistics->byteCount = GET_UINT32(*p_StatsCounters); for (i = 1; i <= p_CcNode->numOfStatsFLRs; i++) { p_StatsCounters = PTR_MOVE(p_StatsCounters, FM_PCD_CC_STATS_COUNTER_SIZE); p_KeyStatistics->frameCount += GET_UINT32(*p_StatsCounters); #if (DPAA_VERSION >= 11) p_KeyStatistics->frameLengthRangeCount[i - 1] = GET_UINT32(*p_StatsCounters); #endif /* (DPAA_VERSION >= 11) */ } return E_OK; } static t_Error MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNode *p_CcNode, t_FmPcdCcNodeParams *p_CcNodeParam) { t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; t_FmPcdCcNode *p_FmPcdCcNextNode; t_Error err = E_OK; uint32_t tmp, keySize; bool glblMask = FALSE; t_FmPcdCcKeyParams *p_KeyParams; t_Handle h_FmMuram, p_KeysMatchTblTmp, p_AdTableTmp; #if (DPAA_VERSION >= 11) t_Handle h_StatsFLRs; #endif /* (DPAA_VERSION >= 11) */ bool fullField = FALSE; ccPrivateInfo_t icCode = CC_PRIVATE_INFO_NONE; bool isKeyTblAlloc, fromIc = FALSE; uint32_t matchTableSize, adTableSize; t_CcNodeInformation ccNodeInfo, *p_CcInformation; t_FmPcdStatsObj *p_StatsObj; t_FmPcdCcStatsParams statsParams = { 0 }; t_Handle h_Manip; ASSERT_COND(h_FmPcd); ASSERT_COND(p_CcNode); ASSERT_COND(p_CcNodeParam); p_CcNode->p_GlblMask = (t_Handle)XX_Malloc( CC_GLBL_MASK_SIZE * sizeof(uint8_t)); memset(p_CcNode->p_GlblMask, 0, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); p_CcNode->h_FmPcd = h_FmPcd; p_CcNode->numOfKeys = p_CcNodeParam->keysParams.numOfKeys; p_CcNode->maxNumOfKeys = p_CcNodeParam->keysParams.maxNumOfKeys; p_CcNode->maskSupport = p_CcNodeParam->keysParams.maskSupport; p_CcNode->statisticsMode = p_CcNodeParam->keysParams.statisticsMode; /* For backward compatibility - even if statistics mode is nullified, we'll fix it to frame mode so we can support per-key request for statistics using 'statisticsEn' in next engine parameters */ if (!p_CcNode->maxNumOfKeys && (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE)) p_CcNode->statisticsMode = e_FM_PCD_CC_STATS_MODE_FRAME; h_FmMuram = FmPcdGetMuramHandle(h_FmPcd); if (!h_FmMuram) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("FM MURAM")); INIT_LIST(&p_CcNode->ccPrevNodesLst); INIT_LIST(&p_CcNode->ccTreeIdLst); INIT_LIST(&p_CcNode->ccTreesLst); INIT_LIST(&p_CcNode->availableStatsLst); p_CcNode->h_Spinlock = XX_InitSpinlock(); if (!p_CcNode->h_Spinlock) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("CC node spinlock")); } if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_BY_HDR) && ((p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv4) || (p_CcNodeParam->extractCcParams.extractByHdr.hdr == HEADER_TYPE_IPv6)) && (p_CcNodeParam->extractCcParams.extractByHdr.type == e_FM_PCD_EXTRACT_FULL_FIELD) && ((p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv6 == NET_HEADER_FIELD_IPv6_HOP_LIMIT) || (p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField.ipv4 == NET_HEADER_FIELD_IPv4_TTL))) { err = Ipv4TtlOrIpv6HopLimitCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); glblMask = FALSE; } else if ((p_CcNodeParam->extractCcParams.type == e_FM_PCD_EXTRACT_NON_HDR) && ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_KEY) || (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_HASH) || (p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID))) { if ((p_CcNodeParam->extractCcParams.extractNonHdr.src == e_FM_PCD_EXTRACT_FROM_FLOW_ID) && (p_CcNodeParam->extractCcParams.extractNonHdr.offset != 0)) { DeleteNode(p_CcNode); RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("In the case of the extraction from e_FM_PCD_EXTRACT_FROM_FLOW_ID offset has to be 0")); } icCode = IcDefineCode(p_CcNodeParam); fromIc = TRUE; if (icCode == CC_PRIVATE_INFO_NONE) { DeleteNode(p_CcNode); RETURN_ERROR( MAJOR, E_INVALID_STATE, ("user asked extraction from IC and field in internal context or action wasn't initialized in the right way")); } if ((icCode == CC_PRIVATE_INFO_IC_DEQ_FQID_INDEX_LOOKUP) || (icCode == CC_PRIVATE_INFO_IC_HASH_INDEX_LOOKUP)) { err = IcHashIndexedCheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); glblMask = TRUE; } else { err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); if (p_CcNode->glblMaskSize) glblMask = TRUE; } } else { err = CheckParams(h_FmPcd, p_CcNodeParam, p_CcNode, &isKeyTblAlloc); if (p_CcNode->glblMaskSize) glblMask = TRUE; } if (err) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, err, NO_MSG); } switch (p_CcNodeParam->extractCcParams.type) { case (e_FM_PCD_EXTRACT_BY_HDR): switch (p_CcNodeParam->extractCcParams.extractByHdr.type) { case (e_FM_PCD_EXTRACT_FULL_FIELD): p_CcNode->parseCode = GetFullFieldParseCode( p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField); GetSizeHeaderField( p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fullField, &p_CcNode->sizeOfExtraction); fullField = TRUE; if ((p_CcNode->parseCode != CC_PC_FF_TCI1) && (p_CcNode->parseCode != CC_PC_FF_TCI2) && (p_CcNode->parseCode != CC_PC_FF_MPLS1) && (p_CcNode->parseCode != CC_PC_FF_MPLS_LAST) && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC1) && (p_CcNode->parseCode != CC_PC_FF_IPV4IPTOS_TC2) && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC1_IPV6FLOW1) && (p_CcNode->parseCode != CC_PC_FF_IPDSCP) && (p_CcNode->parseCode != CC_PC_FF_IPTOS_IPV6TC2_IPV6FLOW2) && glblMask) { glblMask = FALSE; p_CcNode->glblMaskSize = 4; p_CcNode->lclMask = TRUE; } break; case (e_FM_PCD_EXTRACT_FROM_HDR): p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.size; p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromHdr.offset; p_CcNode->parseCode = GetPrParseCode( p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex, p_CcNode->offset, glblMask, &p_CcNode->prsArrayOffset); break; case (e_FM_PCD_EXTRACT_FROM_FIELD): p_CcNode->offset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.offset; p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.size; p_CcNode->parseCode = GetFieldParseCode( p_CcNodeParam->extractCcParams.extractByHdr.hdr, p_CcNodeParam->extractCcParams.extractByHdr.extractByHdrType.fromField.field, p_CcNode->offset, &p_CcNode->prsArrayOffset, p_CcNodeParam->extractCcParams.extractByHdr.hdrIndex); break; default: DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); } break; case (e_FM_PCD_EXTRACT_NON_HDR): /* get the field code for the generic extract */ p_CcNode->sizeOfExtraction = p_CcNodeParam->extractCcParams.extractNonHdr.size; p_CcNode->offset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; p_CcNode->userOffset = p_CcNodeParam->extractCcParams.extractNonHdr.offset; p_CcNode->parseCode = GetGenParseCode( p_CcNodeParam->extractCcParams.extractNonHdr.src, p_CcNode->offset, glblMask, &p_CcNode->prsArrayOffset, fromIc, icCode); if (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED) { if ((p_CcNode->offset + p_CcNode->sizeOfExtraction) > 8) { DeleteNode(p_CcNode); RETURN_ERROR( MAJOR, E_INVALID_SELECTION, ("when node of the type CC_PC_GENERIC_IC_HASH_INDEXED offset + size can not be bigger then size of HASH 64 bits (8 bytes)")); } } if ((p_CcNode->parseCode == CC_PC_GENERIC_IC_GMASK) || (p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED)) { p_CcNode->offset += p_CcNode->prsArrayOffset; p_CcNode->prsArrayOffset = 0; } break; default: DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_SELECTION, NO_MSG); } if (p_CcNode->parseCode == CC_PC_ILLEGAL) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("illegal extraction type")); } if ((p_CcNode->sizeOfExtraction > FM_PCD_MAX_SIZE_OF_KEY) || !p_CcNode->sizeOfExtraction) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("sizeOfExatrction can not be greater than 56 and not 0")); } if (p_CcNodeParam->keysParams.keySize != p_CcNode->sizeOfExtraction) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); } p_CcNode->userSizeOfExtraction = p_CcNode->sizeOfExtraction; if (!glblMask) memset(p_CcNode->p_GlblMask, 0xff, CC_GLBL_MASK_SIZE * sizeof(uint8_t)); err = CheckAndSetManipParamsWithCcNodeParams(p_CcNode); if (err != E_OK) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("keySize has to be equal to sizeOfExtraction")); } /* Calculating matching table entry size by rounding up the user-defined size of extraction to valid entry size */ GetCcExtractKeySize(p_CcNode->sizeOfExtraction, &p_CcNode->ccKeySizeAccExtraction); /* If local mask is used, it is stored next to each key in the keys match table */ if (p_CcNode->lclMask) keySize = (uint32_t)(2 * p_CcNode->ccKeySizeAccExtraction); else keySize = p_CcNode->ccKeySizeAccExtraction; /* Update CC shadow with maximal size required by this node */ if (p_CcNode->maxNumOfKeys) { err = CalcAndUpdateCcShadow(p_CcNode, isKeyTblAlloc, &matchTableSize, &adTableSize); if (err != E_OK) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, err, NO_MSG); } p_CcNode->keysMatchTableMaxSize = matchTableSize; if (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_NONE) { err = AllocStatsObjs(p_CcNode); if (err != E_OK) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, err, NO_MSG); } } /* If manipulation will be initialized before this node, it will use the table descriptor in the AD table of previous node and this node will need an extra AD as his table descriptor. */ p_CcNode->h_TmpAd = (t_Handle)FM_MURAM_AllocMem( h_FmMuram, FM_PCD_CC_AD_ENTRY_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!p_CcNode->h_TmpAd) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC action descriptor")); } } else { matchTableSize = (uint32_t)(keySize * sizeof(uint8_t) * (p_CcNode->numOfKeys + 1)); adTableSize = (uint32_t)(FM_PCD_CC_AD_ENTRY_SIZE * (p_CcNode->numOfKeys + 1)); } #if (DPAA_VERSION >= 11) switch (p_CcNode->statisticsMode) { case e_FM_PCD_CC_STATS_MODE_RMON: /* If RMON statistics or RMON conditional statistics modes are requested, allocate frame length ranges array */ p_CcNode->h_StatsFLRs = FM_MURAM_AllocMem( h_FmMuram, (uint32_t)(p_CcNode->numOfStatsFLRs) * FM_PCD_CC_STATS_FLR_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!p_CcNode->h_StatsFLRs) { DeleteNode(p_CcNode); RETURN_ERROR( MAJOR, E_NO_MEMORY, ("MURAM allocation for CC frame length ranges array")); } /* Initialize using value received from the user */ for (tmp = 0; tmp < p_CcNode->numOfStatsFLRs; tmp++) { uint16_t flr = cpu_to_be16(p_CcNodeParam->keysParams.frameLengthRanges[tmp]); h_StatsFLRs = PTR_MOVE(p_CcNode->h_StatsFLRs, tmp * FM_PCD_CC_STATS_FLR_SIZE); MemCpy8(h_StatsFLRs, &flr, FM_PCD_CC_STATS_FLR_SIZE); } break; default: break; } #endif /* (DPAA_VERSION >= 11) */ /* Allocate keys match table. Not required for some CC nodes, for example for IPv4 TTL identification, IPv6 hop count identification, etc. */ if (isKeyTblAlloc) { p_CcNode->h_KeysMatchTable = (t_Handle)FM_MURAM_AllocMem( h_FmMuram, matchTableSize, FM_PCD_CC_KEYS_MATCH_TABLE_ALIGN); if (!p_CcNode->h_KeysMatchTable) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node key match table")); } MemSet8((uint8_t *)p_CcNode->h_KeysMatchTable, 0, matchTableSize); } /* Allocate action descriptors table */ p_CcNode->h_AdTable = (t_Handle)FM_MURAM_AllocMem(h_FmMuram, adTableSize, FM_PCD_CC_AD_TABLE_ALIGN); if (!p_CcNode->h_AdTable) { DeleteNode(p_CcNode); RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC node action descriptors table")); } MemSet8((uint8_t *)p_CcNode->h_AdTable, 0, adTableSize); p_KeysMatchTblTmp = p_CcNode->h_KeysMatchTable; p_AdTableTmp = p_CcNode->h_AdTable; /* For each key, create the key and the next step AD */ for (tmp = 0; tmp < p_CcNode->numOfKeys; tmp++) { p_KeyParams = &p_CcNodeParam->keysParams.keyParams[tmp]; if (p_KeysMatchTblTmp) { /* Copy the key */ MemCpy8((void*)p_KeysMatchTblTmp, p_KeyParams->p_Key, p_CcNode->sizeOfExtraction); /* Copy the key mask or initialize it to 0xFF..F */ if (p_CcNode->lclMask && p_KeyParams->p_Mask) { MemCpy8(PTR_MOVE(p_KeysMatchTblTmp, p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ p_KeyParams->p_Mask, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ } else if (p_CcNode->lclMask) { MemSet8(PTR_MOVE(p_KeysMatchTblTmp, p_CcNode->ccKeySizeAccExtraction), /* User's size of extraction rounded up to a valid matching table entry size */ 0xff, p_CcNode->sizeOfExtraction); /* Exact size of extraction as received from the user */ } p_KeysMatchTblTmp = PTR_MOVE(p_KeysMatchTblTmp, keySize * sizeof(uint8_t)); } /* Create the next action descriptor in the match table */ if (p_KeyParams->ccNextEngineParams.statisticsEn) { p_StatsObj = GetStatsObj(p_CcNode); ASSERT_COND(p_StatsObj); statsParams.h_StatsAd = p_StatsObj->h_StatsAd; statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; #if (DPAA_VERSION >= 11) statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; #endif /* (DPAA_VERSION >= 11) */ NextStepAd(p_AdTableTmp, &statsParams, &p_KeyParams->ccNextEngineParams, p_FmPcd); p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; } else { NextStepAd(p_AdTableTmp, NULL, &p_KeyParams->ccNextEngineParams, p_FmPcd); p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; } p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); } /* Update next engine for the 'miss' entry */ if (p_CcNodeParam->keysParams.ccNextEngineParamsForMiss.statisticsEn) { p_StatsObj = GetStatsObj(p_CcNode); ASSERT_COND(p_StatsObj); /* All 'bucket' nodes of a hash table should share the same statistics counters, allocated by the hash table. So, if this node is a bucket of a hash table, we'll replace the locally allocated counters with the shared counters. */ if (p_CcNode->isHashBucket) { ASSERT_COND(p_CcNode->h_MissStatsCounters); /* Store original counters pointer and replace it with mutual preallocated pointer */ p_CcNode->h_PrivMissStatsCounters = p_StatsObj->h_StatsCounters; p_StatsObj->h_StatsCounters = p_CcNode->h_MissStatsCounters; } statsParams.h_StatsAd = p_StatsObj->h_StatsAd; statsParams.h_StatsCounters = p_StatsObj->h_StatsCounters; #if (DPAA_VERSION >= 11) statsParams.h_StatsFLRs = p_CcNode->h_StatsFLRs; #endif /* (DPAA_VERSION >= 11) */ NextStepAd(p_AdTableTmp, &statsParams, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_FmPcd); p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = p_StatsObj; } else { NextStepAd(p_AdTableTmp, NULL, &p_CcNodeParam->keysParams.ccNextEngineParamsForMiss, p_FmPcd); p_CcNode->keyAndNextEngineParams[tmp].p_StatsObj = NULL; } /* This parameter will be used to initialize the "key length" field in the action descriptor that points to this node and it should be 0 for full field extraction */ if (fullField == TRUE) p_CcNode->sizeOfExtraction = 0; for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) { if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.nextEngine == e_FM_PCD_CC) { p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.params.ccParams.h_CcNode; p_CcInformation = FindNodeInfoInReleventLst( &p_FmPcdCcNextNode->ccPrevNodesLst, (t_Handle)p_CcNode, p_FmPcdCcNextNode->h_Spinlock); if (!p_CcInformation) { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; ccNodeInfo.index = 1; EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccPrevNodesLst, &ccNodeInfo, p_FmPcdCcNextNode->h_Spinlock); } else p_CcInformation->index++; if (p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip) { h_Manip = p_CcNode->keyAndNextEngineParams[tmp].nextEngineParams.h_Manip; p_CcInformation = FindNodeInfoInReleventLst( FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), (t_Handle)p_CcNode, FmPcdManipGetSpinlock(h_Manip)); if (!p_CcInformation) { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = (t_Handle)p_CcNode; ccNodeInfo.index = 1; EnqueueNodeInfoToRelevantLst( FmPcdManipGetNodeLstPointedOnThisManip(h_Manip), &ccNodeInfo, FmPcdManipGetSpinlock(h_Manip)); } else p_CcInformation->index++; } } } p_AdTableTmp = p_CcNode->h_AdTable; if (!FmPcdLockTryLockAll(h_FmPcd)) { FM_PCD_MatchTableDelete((t_Handle)p_CcNode); DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } /* Required action for each next engine */ for (tmp = 0; tmp < MIN(p_CcNode->numOfKeys + 1, CC_MAX_NUM_OF_KEYS); tmp++) { if (p_CcNode->keyAndNextEngineParams[tmp].requiredAction) { err = SetRequiredAction( h_FmPcd, p_CcNode->keyAndNextEngineParams[tmp].requiredAction, &p_CcNode->keyAndNextEngineParams[tmp], p_AdTableTmp, 1, NULL); if (err) { FmPcdLockUnlockAll(h_FmPcd); FM_PCD_MatchTableDelete((t_Handle)p_CcNode); RETURN_ERROR(MAJOR, err, NO_MSG); } p_AdTableTmp = PTR_MOVE(p_AdTableTmp, FM_PCD_CC_AD_ENTRY_SIZE); } } FmPcdLockUnlockAll(h_FmPcd); return E_OK; } /************************** End of static functions **************************/ /*****************************************************************************/ /* Inter-module API routines */ /*****************************************************************************/ t_CcNodeInformation* FindNodeInfoInReleventLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock) { t_CcNodeInformation *p_CcInformation; t_List *p_Pos; uint32_t intFlags; intFlags = XX_LockIntrSpinlock(h_Spinlock); for (p_Pos = NCSW_LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = NCSW_LIST_NEXT(p_Pos)) { p_CcInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcInformation->h_CcNode); if (p_CcInformation->h_CcNode == h_Info) { XX_UnlockIntrSpinlock(h_Spinlock, intFlags); return p_CcInformation; } } XX_UnlockIntrSpinlock(h_Spinlock, intFlags); return NULL; } void EnqueueNodeInfoToRelevantLst(t_List *p_List, t_CcNodeInformation *p_CcInfo, t_Handle h_Spinlock) { t_CcNodeInformation *p_CcInformation; uint32_t intFlags = 0; p_CcInformation = (t_CcNodeInformation *)XX_Malloc( sizeof(t_CcNodeInformation)); if (p_CcInformation) { memset(p_CcInformation, 0, sizeof(t_CcNodeInformation)); memcpy(p_CcInformation, p_CcInfo, sizeof(t_CcNodeInformation)); INIT_LIST(&p_CcInformation->node); if (h_Spinlock) intFlags = XX_LockIntrSpinlock(h_Spinlock); NCSW_LIST_AddToTail(&p_CcInformation->node, p_List); if (h_Spinlock) XX_UnlockIntrSpinlock(h_Spinlock, intFlags); } else REPORT_ERROR(MAJOR, E_NO_MEMORY, ("CC Node Information")); } void DequeueNodeInfoFromRelevantLst(t_List *p_List, t_Handle h_Info, t_Handle h_Spinlock) { t_CcNodeInformation *p_CcInformation = NULL; uint32_t intFlags = 0; t_List *p_Pos; if (h_Spinlock) intFlags = XX_LockIntrSpinlock(h_Spinlock); if (NCSW_LIST_IsEmpty(p_List)) { XX_RestoreAllIntr(intFlags); return; } for (p_Pos = NCSW_LIST_FIRST(p_List); p_Pos != (p_List); p_Pos = NCSW_LIST_NEXT(p_Pos)) { p_CcInformation = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcInformation); ASSERT_COND(p_CcInformation->h_CcNode); if (p_CcInformation->h_CcNode == h_Info) break; } if (p_CcInformation) { NCSW_LIST_DelAndInit(&p_CcInformation->node); XX_Free(p_CcInformation); } if (h_Spinlock) XX_UnlockIntrSpinlock(h_Spinlock, intFlags); } void NextStepAd(t_Handle h_Ad, t_FmPcdCcStatsParams *p_FmPcdCcStatsParams, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams, t_FmPcd *p_FmPcd) { switch (p_FmPcdCcNextEngineParams->nextEngine) { case (e_FM_PCD_KG): case (e_FM_PCD_PLCR): case (e_FM_PCD_DONE): /* if NIA is not CC, create a "result" type AD */ FillAdOfTypeResult(h_Ad, p_FmPcdCcStatsParams, p_FmPcd, p_FmPcdCcNextEngineParams); break; #if (DPAA_VERSION >= 11) case (e_FM_PCD_FR): if (p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic) { FillAdOfTypeContLookup( h_Ad, p_FmPcdCcStatsParams, p_FmPcd, p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, p_FmPcdCcNextEngineParams->h_Manip, p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic); FrmReplicGroupUpdateOwner( p_FmPcdCcNextEngineParams->params.frParams.h_FrmReplic, TRUE/* add */); } break; #endif /* (DPAA_VERSION >= 11) */ case (e_FM_PCD_CC): /* if NIA is not CC, create a TD to continue the CC lookup */ FillAdOfTypeContLookup( h_Ad, p_FmPcdCcStatsParams, p_FmPcd, p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, p_FmPcdCcNextEngineParams->h_Manip, NULL); UpdateNodeOwner(p_FmPcdCcNextEngineParams->params.ccParams.h_CcNode, TRUE); break; default: return; } } t_Error FmPcdCcTreeAddIPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_IpReassemblyManip, bool createSchemes) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; t_FmPcdCcNextEngineParams nextEngineParams; t_NetEnvParams netEnvParams; t_Handle h_Ad; bool isIpv6Present; uint8_t ipv4GroupId, ipv6GroupId; t_Error err; ASSERT_COND(p_FmPcdCcTree); /* this routine must be protected by the calling routine! */ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams)); memset(&netEnvParams, 0, sizeof(t_NetEnvParams)); h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); isIpv6Present = FmPcdManipIpReassmIsIpv6Hdr(h_IpReassemblyManip); if (isIpv6Present && (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2))) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR")); if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need two free entries for IPR")); nextEngineParams.nextEngine = e_FM_PCD_DONE; nextEngineParams.h_Manip = h_IpReassemblyManip; /* Lock tree */ err = CcRootTryLock(p_FmPcdCcTree); if (err) return ERROR_CODE(E_BUSY); if (p_FmPcdCcTree->h_IpReassemblyManip == h_IpReassemblyManip) { CcRootReleaseLock(p_FmPcdCcTree); return E_OK; } if ((p_FmPcdCcTree->h_IpReassemblyManip) && (p_FmPcdCcTree->h_IpReassemblyManip != h_IpReassemblyManip)) { CcRootReleaseLock(p_FmPcdCcTree); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("This tree was previously updated with different IPR")); } /* Initialize IPR for the first time for this tree */ if (isIpv6Present) { ipv6GroupId = p_FmPcdCcTree->numOfGrps++; p_FmPcdCcTree->fmPcdGroupParam[ipv6GroupId].baseGroupEntry = (FM_PCD_MAX_NUM_OF_CC_GROUPS - 2); if (createSchemes) { err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree, h_IpReassemblyManip, FALSE, ipv6GroupId); if (err) { p_FmPcdCcTree->numOfGrps--; CcRootReleaseLock(p_FmPcdCcTree); RETURN_ERROR(MAJOR, err, NO_MSG); } } NextStepAd( PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-2) * FM_PCD_CC_AD_ENTRY_SIZE), NULL, &nextEngineParams, h_FmPcd); } ipv4GroupId = p_FmPcdCcTree->numOfGrps++; p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].totalBitsMask = 0; p_FmPcdCcTree->fmPcdGroupParam[ipv4GroupId].baseGroupEntry = (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1); if (createSchemes) { err = FmPcdManipBuildIpReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree, h_IpReassemblyManip, TRUE, ipv4GroupId); if (err) { p_FmPcdCcTree->numOfGrps--; if (isIpv6Present) { p_FmPcdCcTree->numOfGrps--; FmPcdManipDeleteIpReassmSchemes(h_IpReassemblyManip); } CcRootReleaseLock(p_FmPcdCcTree); RETURN_ERROR(MAJOR, err, NO_MSG); } } NextStepAd( PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE), NULL, &nextEngineParams, h_FmPcd); p_FmPcdCcTree->h_IpReassemblyManip = h_IpReassemblyManip; CcRootReleaseLock(p_FmPcdCcTree); return E_OK; } t_Error FmPcdCcTreeAddCPR(t_Handle h_FmPcd, t_Handle h_FmTree, t_Handle h_NetEnv, t_Handle h_ReassemblyManip, bool createSchemes) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; t_FmPcdCcNextEngineParams nextEngineParams; t_NetEnvParams netEnvParams; t_Handle h_Ad; uint8_t groupId; t_Error err; ASSERT_COND(p_FmPcdCcTree); /* this routine must be protected by the calling routine! */ memset(&nextEngineParams, 0, sizeof(t_FmPcdCcNextEngineParams)); memset(&netEnvParams, 0, sizeof(t_NetEnvParams)); h_Ad = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); if (p_FmPcdCcTree->numOfEntries > (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("need one free entries for CPR")); nextEngineParams.nextEngine = e_FM_PCD_DONE; nextEngineParams.h_Manip = h_ReassemblyManip; /* Lock tree */ err = CcRootTryLock(p_FmPcdCcTree); if (err) return ERROR_CODE(E_BUSY); if (p_FmPcdCcTree->h_CapwapReassemblyManip == h_ReassemblyManip) { CcRootReleaseLock(p_FmPcdCcTree); return E_OK; } if ((p_FmPcdCcTree->h_CapwapReassemblyManip) && (p_FmPcdCcTree->h_CapwapReassemblyManip != h_ReassemblyManip)) { CcRootReleaseLock(p_FmPcdCcTree); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("This tree was previously updated with different CPR")); } groupId = p_FmPcdCcTree->numOfGrps++; p_FmPcdCcTree->fmPcdGroupParam[groupId].baseGroupEntry = (FM_PCD_MAX_NUM_OF_CC_GROUPS - 1); if (createSchemes) { err = FmPcdManipBuildCapwapReassmScheme(h_FmPcd, h_NetEnv, p_FmPcdCcTree, h_ReassemblyManip, groupId); if (err) { p_FmPcdCcTree->numOfGrps--; CcRootReleaseLock(p_FmPcdCcTree); RETURN_ERROR(MAJOR, err, NO_MSG); } } NextStepAd( PTR_MOVE(h_Ad, (FM_PCD_MAX_NUM_OF_CC_GROUPS-1) * FM_PCD_CC_AD_ENTRY_SIZE), NULL, &nextEngineParams, h_FmPcd); p_FmPcdCcTree->h_CapwapReassemblyManip = h_ReassemblyManip; CcRootReleaseLock(p_FmPcdCcTree); return E_OK; } t_Handle FmPcdCcTreeGetSavedManipParams(t_Handle h_FmTree) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; ASSERT_COND(p_FmPcdCcTree); return p_FmPcdCcTree->h_FmPcdCcSavedManipParams; } void FmPcdCcTreeSetSavedManipParams(t_Handle h_FmTree, t_Handle h_SavedManipParams) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmTree; ASSERT_COND(p_FmPcdCcTree); p_FmPcdCcTree->h_FmPcdCcSavedManipParams = h_SavedManipParams; } uint8_t FmPcdCcGetParseCode(t_Handle h_CcNode) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; ASSERT_COND(p_CcNode); return p_CcNode->parseCode; } uint8_t FmPcdCcGetOffset(t_Handle h_CcNode) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; ASSERT_COND(p_CcNode); return p_CcNode->offset; } uint16_t FmPcdCcGetNumOfKeys(t_Handle h_CcNode) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; ASSERT_COND(p_CcNode); return p_CcNode->numOfKeys; } t_Error FmPcdCcModifyNextEngineParamTree( t_Handle h_FmPcd, t_Handle h_FmPcdCcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; t_FmPcd *p_FmPcd; t_List h_OldPointersLst, h_NewPointersLst; uint16_t keyIndex; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((grpId <= 7), E_INVALID_VALUE); if (grpId >= p_FmPcdCcTree->numOfGrps) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); if (index >= p_FmPcdCcTree->fmPcdGroupParam[grpId].numOfEntriesInGroup) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("index > numOfEntriesInGroup")); p_FmPcd = (t_FmPcd *)h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); keyIndex = (uint16_t)(p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry + index); p_ModifyKeyParams = ModifyNodeCommonPart(p_FmPcdCcTree, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, FALSE, TRUE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); p_ModifyKeyParams->tree = TRUE; if (p_FmPcd->p_CcShadow && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } err = BuildNewNodeModifyNextEngine(p_FmPcd, p_FmPcdCcTree, keyIndex, p_FmPcdCcNextEngineParams, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams); if (err) { XX_Free(p_ModifyKeyParams); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); if (p_FmPcd->p_CcShadow) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } t_Error FmPcdCcRemoveKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; t_List h_OldPointersLst, h_NewPointersLst; bool useShadowStructs = FALSE; t_Error err = E_OK; if (keyIndex >= p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("impossible to remove key when numOfKeys <= keyIndex")); if (p_CcNode->h_FmPcd != h_FmPcd) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_REMOVE, TRUE, TRUE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys) { if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } useShadowStructs = TRUE; } err = BuildNewNodeRemoveKey(p_CcNode, keyIndex, p_ModifyKeyParams); if (err) { XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams, &h_OldPointersLst, &h_NewPointersLst); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, useShadowStructs); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } t_Error FmPcdCcModifyKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_List h_OldPointersLst, h_NewPointersLst; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; uint16_t tmpKeyIndex; bool useShadowStructs = FALSE; t_Error err = E_OK; if (keyIndex >= p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); if (keySize != p_CcNode->userSizeOfExtraction) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("size for ModifyKey has to be the same as defined in SetNode")); if (p_CcNode->h_FmPcd != h_FmPcd) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); err = FindKeyIndex(h_FmPcdCcNode, keySize, p_Key, p_Mask, &tmpKeyIndex); if (GET_ERROR_TYPE(err) != E_NOT_FOUND) RETURN_ERROR( MINOR, E_ALREADY_EXISTS, ("The received key and mask pair was already found in the match table of the provided node")); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, TRUE, TRUE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys) { if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } useShadowStructs = TRUE; } err = BuildNewNodeModifyKey(p_CcNode, keyIndex, p_Key, p_Mask, p_ModifyKeyParams); if (err) { XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams, &h_OldPointersLst, &h_NewPointersLst); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, useShadowStructs); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } t_Error FmPcdCcModifyMissNextEngineParamNode( t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_List h_OldPointersLst, h_NewPointersLst; uint16_t keyIndex; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_VALUE); keyIndex = p_CcNode->numOfKeys; p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, FALSE, TRUE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys && !TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } err = BuildNewNodeModifyNextEngine(h_FmPcd, p_CcNode, keyIndex, p_FmPcdCcNextEngineParams, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams); if (err) { XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, FALSE); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } t_Error FmPcdCcAddKey(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; t_List h_OldPointersLst, h_NewPointersLst; bool useShadowStructs = FALSE; uint16_t tmpKeyIndex; t_Error err = E_OK; if (keyIndex > p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_NOT_IN_RANGE, ("keyIndex > previously cleared last index + 1")); if (keySize != p_CcNode->userSizeOfExtraction) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step")); if (p_CcNode->h_FmPcd != h_FmPcd) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); if (p_CcNode->maxNumOfKeys) { if (p_CcNode->numOfKeys == p_CcNode->maxNumOfKeys) RETURN_ERROR( MAJOR, E_FULL, ("number of keys exceeds the maximal number of keys provided at node initialization time")); } else if (p_CcNode->numOfKeys == FM_PCD_MAX_NUM_OF_KEYS) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("number of keys can not be larger than %d", FM_PCD_MAX_NUM_OF_KEYS)); err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key, p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex); if (GET_ERROR_TYPE(err) != E_NOT_FOUND) RETURN_ERROR( MAJOR, E_ALREADY_EXISTS, ("The received key and mask pair was already found in the match table of the provided node")); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_ADD, TRUE, TRUE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys) { if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } useShadowStructs = TRUE; } err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex, p_FmPcdCcKeyParams, p_ModifyKeyParams, TRUE); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams, &h_OldPointersLst, &h_NewPointersLst); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, useShadowStructs); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } t_Error FmPcdCcModifyKeyAndNextEngine(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_FmPcdCcKeyParams) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_FmPcd *p_FmPcd; t_List h_OldPointersLst, h_NewPointersLst; t_FmPcdModifyCcKeyAdditionalParams *p_ModifyKeyParams; uint16_t tmpKeyIndex; bool useShadowStructs = FALSE; t_Error err = E_OK; if (keyIndex > p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex > previously cleared last index + 1")); if (keySize != p_CcNode->userSizeOfExtraction) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("keySize has to be defined as it was defined in initialization step")); if (p_CcNode->h_FmPcd != h_FmPcd) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("handler to FmPcd is different from the handle provided at node initialization time")); err = FindKeyIndex(h_FmPcdCcNode, keySize, p_FmPcdCcKeyParams->p_Key, p_FmPcdCcKeyParams->p_Mask, &tmpKeyIndex); if (GET_ERROR_TYPE(err) != E_NOT_FOUND) RETURN_ERROR( MINOR, E_ALREADY_EXISTS, ("The received key and mask pair was already found in the match table of the provided node")); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; INIT_LIST(&h_OldPointersLst); INIT_LIST(&h_NewPointersLst); p_ModifyKeyParams = ModifyNodeCommonPart(p_CcNode, keyIndex, e_MODIFY_STATE_CHANGE, TRUE, TRUE, FALSE); if (!p_ModifyKeyParams) RETURN_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); if (p_CcNode->maxNumOfKeys) { if (!TRY_LOCK(p_FmPcd->h_ShadowSpinlock, &p_FmPcd->shadowLock)) { XX_Free(p_ModifyKeyParams); return ERROR_CODE(E_BUSY); } useShadowStructs = TRUE; } err = BuildNewNodeAddOrMdfyKeyAndNextEngine(h_FmPcd, p_CcNode, keyIndex, p_FmPcdCcKeyParams, p_ModifyKeyParams, FALSE); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = UpdatePtrWhichPointOnCrntMdfNode(p_CcNode, p_ModifyKeyParams, &h_OldPointersLst, &h_NewPointersLst); if (err) { ReleaseNewNodeCommonPart(p_ModifyKeyParams); XX_Free(p_ModifyKeyParams); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); RETURN_ERROR(MAJOR, err, NO_MSG); } err = DoDynamicChange(p_FmPcd, &h_OldPointersLst, &h_NewPointersLst, p_ModifyKeyParams, useShadowStructs); if (p_CcNode->maxNumOfKeys) RELEASE_LOCK(p_FmPcd->shadowLock); return err; } uint32_t FmPcdCcGetNodeAddrOffsetFromNodeInfo(t_Handle h_FmPcd, t_Handle h_Pointer) { t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; t_CcNodeInformation *p_CcNodeInfo; SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, (uint32_t)ILLEGAL_BASE); p_CcNodeInfo = CC_NODE_F_OBJECT(h_Pointer); return (uint32_t)(XX_VirtToPhys(p_CcNodeInfo->h_CcNode) - p_FmPcd->physicalMuramBase); } t_Error FmPcdCcGetGrpParams(t_Handle h_FmPcdCcTree, uint8_t grpId, uint32_t *p_GrpBits, uint8_t *p_GrpBase) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); if (grpId >= p_FmPcdCcTree->numOfGrps) RETURN_ERROR(MAJOR, E_INVALID_HANDLE, ("grpId you asked > numOfGroup of relevant tree")); *p_GrpBits = p_FmPcdCcTree->fmPcdGroupParam[grpId].totalBitsMask; *p_GrpBase = p_FmPcdCcTree->fmPcdGroupParam[grpId].baseGroupEntry; return E_OK; } t_Error FmPcdCcBindTree(t_Handle h_FmPcd, t_Handle h_PcdParams, t_Handle h_FmPcdCcTree, uint32_t *p_Offset, t_Handle h_FmPort) { t_FmPcd *p_FmPcd = (t_FmPcd*)h_FmPcd; t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(h_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); /* this routine must be protected by the calling routine by locking all PCD modules! */ err = CcUpdateParams(h_FmPcd, h_PcdParams, h_FmPort, h_FmPcdCcTree, TRUE); if (err == E_OK) UpdateCcRootOwner(p_FmPcdCcTree, TRUE); *p_Offset = (uint32_t)(XX_VirtToPhys( UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr)) - p_FmPcd->physicalMuramBase); return err; } t_Error FmPcdCcUnbindTree(t_Handle h_FmPcd, t_Handle h_FmPcdCcTree) { t_FmPcdCcTree *p_FmPcdCcTree = (t_FmPcdCcTree *)h_FmPcdCcTree; /* this routine must be protected by the calling routine by locking all PCD modules! */ UNUSED(h_FmPcd); SANITY_CHECK_RETURN_ERROR(h_FmPcdCcTree, E_INVALID_HANDLE); UpdateCcRootOwner(p_FmPcdCcTree, FALSE); return E_OK; } t_Error FmPcdCcNodeTreeTryLock(t_Handle h_FmPcd, t_Handle h_FmPcdCcNode, t_List *p_List) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_FmPcdCcNode; t_List *p_Pos, *p_Tmp; t_CcNodeInformation *p_CcNodeInfo, nodeInfo; uint32_t intFlags; t_Error err = E_OK; intFlags = FmPcdLock(h_FmPcd); NCSW_LIST_FOR_EACH(p_Pos, &p_CcNode->ccTreesLst) { p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); ASSERT_COND(p_CcNodeInfo->h_CcNode); err = CcRootTryLock(p_CcNodeInfo->h_CcNode); if (err) { NCSW_LIST_FOR_EACH(p_Tmp, &p_CcNode->ccTreesLst) { if (p_Tmp == p_Pos) break; CcRootReleaseLock(p_CcNodeInfo->h_CcNode); } break; } memset(&nodeInfo, 0, sizeof(t_CcNodeInformation)); nodeInfo.h_CcNode = p_CcNodeInfo->h_CcNode; EnqueueNodeInfoToRelevantLst(p_List, &nodeInfo, NULL); } FmPcdUnlock(h_FmPcd, intFlags); CORE_MemoryBarrier(); return err; } void FmPcdCcNodeTreeReleaseLock(t_Handle h_FmPcd, t_List *p_List) { t_List *p_Pos; t_CcNodeInformation *p_CcNodeInfo; t_Handle h_FmPcdCcTree; uint32_t intFlags; intFlags = FmPcdLock(h_FmPcd); NCSW_LIST_FOR_EACH(p_Pos, p_List) { p_CcNodeInfo = CC_NODE_F_OBJECT(p_Pos); h_FmPcdCcTree = p_CcNodeInfo->h_CcNode; CcRootReleaseLock(h_FmPcdCcTree); } ReleaseLst(p_List); FmPcdUnlock(h_FmPcd, intFlags); CORE_MemoryBarrier(); } t_Error FmPcdUpdateCcShadow(t_FmPcd *p_FmPcd, uint32_t size, uint32_t align) { uint32_t intFlags; uint32_t newSize = 0, newAlign = 0; bool allocFail = FALSE; ASSERT_COND(p_FmPcd); if (!size) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("size must be larger then 0")); if (!POWER_OF_2(align)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("alignment must be power of 2")); newSize = p_FmPcd->ccShadowSize; newAlign = p_FmPcd->ccShadowAlign; /* Check if current shadow is large enough to hold the requested size */ if (size > p_FmPcd->ccShadowSize) newSize = size; /* Check if current shadow matches the requested alignment */ if (align > p_FmPcd->ccShadowAlign) newAlign = align; /* If a bigger shadow size or bigger shadow alignment are required, a new shadow will be allocated */ if ((newSize != p_FmPcd->ccShadowSize) || (newAlign != p_FmPcd->ccShadowAlign)) { intFlags = FmPcdLock(p_FmPcd); if (p_FmPcd->p_CcShadow) { FM_MURAM_FreeMem(FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->p_CcShadow); p_FmPcd->ccShadowSize = 0; p_FmPcd->ccShadowAlign = 0; } p_FmPcd->p_CcShadow = FM_MURAM_AllocMem(FmPcdGetMuramHandle(p_FmPcd), newSize, newAlign); if (!p_FmPcd->p_CcShadow) { allocFail = TRUE; /* If new shadow size allocation failed, re-allocate with previous parameters */ p_FmPcd->p_CcShadow = FM_MURAM_AllocMem( FmPcdGetMuramHandle(p_FmPcd), p_FmPcd->ccShadowSize, p_FmPcd->ccShadowAlign); } FmPcdUnlock(p_FmPcd, intFlags); if (allocFail) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Shadow memory")); p_FmPcd->ccShadowSize = newSize; p_FmPcd->ccShadowAlign = newAlign; } return E_OK; } #if (DPAA_VERSION >= 11) void FmPcdCcGetAdTablesThatPointOnReplicGroup(t_Handle h_Node, t_Handle h_ReplicGroup, t_List *p_AdTables, uint32_t *p_NumOfAdTables) { t_FmPcdCcNode *p_CurrentNode = (t_FmPcdCcNode *)h_Node; int i = 0; void * p_AdTable; t_CcNodeInformation ccNodeInfo; ASSERT_COND(h_Node); *p_NumOfAdTables = 0; /* search in the current node which exact index points on this current replicator group for getting AD */ for (i = 0; i < p_CurrentNode->numOfKeys + 1; i++) { if ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && ((p_CurrentNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic == (t_Handle)h_ReplicGroup))) { /* save the current ad table in the list */ /* this entry uses the input replicator group */ p_AdTable = PTR_MOVE(p_CurrentNode->h_AdTable, i*FM_PCD_CC_AD_ENTRY_SIZE); memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = p_AdTable; EnqueueNodeInfoToRelevantLst(p_AdTables, &ccNodeInfo, NULL); (*p_NumOfAdTables)++; } } ASSERT_COND(i != p_CurrentNode->numOfKeys); } #endif /* (DPAA_VERSION >= 11) */ /*********************** End of inter-module routines ************************/ /****************************************/ /* API Init unit functions */ /****************************************/ t_Handle FM_PCD_CcRootBuild(t_Handle h_FmPcd, t_FmPcdCcTreeParams *p_PcdGroupsParam) { t_FmPcd *p_FmPcd = (t_FmPcd *)h_FmPcd; t_Error err = E_OK; int i = 0, j = 0, k = 0; t_FmPcdCcTree *p_FmPcdCcTree; uint8_t numOfEntries; t_Handle p_CcTreeTmp; t_FmPcdCcGrpParams *p_FmPcdCcGroupParams; t_FmPcdCcKeyAndNextEngineParams *p_Params, *p_KeyAndNextEngineParams; t_NetEnvParams netEnvParams; uint8_t lastOne = 0; uint32_t requiredAction = 0; t_FmPcdCcNode *p_FmPcdCcNextNode; t_CcNodeInformation ccNodeInfo, *p_CcInformation; SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_PcdGroupsParam, E_INVALID_HANDLE, NULL); if (p_PcdGroupsParam->numOfGrps > FM_PCD_MAX_NUM_OF_CC_GROUPS) { REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfGrps should not exceed %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); return NULL; } p_FmPcdCcTree = (t_FmPcdCcTree*)XX_Malloc(sizeof(t_FmPcdCcTree)); if (!p_FmPcdCcTree) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("PCD tree structure")); return NULL; } memset(p_FmPcdCcTree, 0, sizeof(t_FmPcdCcTree)); p_FmPcdCcTree->h_FmPcd = h_FmPcd; p_Params = (t_FmPcdCcKeyAndNextEngineParams*)XX_Malloc( FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); memset(p_Params, 0, FM_PCD_MAX_NUM_OF_CC_GROUPS * sizeof(t_FmPcdCcKeyAndNextEngineParams)); INIT_LIST(&p_FmPcdCcTree->fmPortsLst); #ifdef FM_CAPWAP_SUPPORT if ((p_PcdGroupsParam->numOfGrps == 1) && (p_PcdGroupsParam->ccGrpParams[0].numOfDistinctionUnits == 0) && (p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].nextEngine == e_FM_PCD_CC) && p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode && IsCapwapApplSpecific(p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].params.ccParams.h_CcNode)) { p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip = FmPcdManipApplSpecificBuild(); if (!p_PcdGroupsParam->ccGrpParams[0].nextEnginePerEntriesInGrp[0].h_Manip) { DeleteTree(p_FmPcdCcTree,p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); return NULL; } } #endif /* FM_CAPWAP_SUPPORT */ numOfEntries = 0; p_FmPcdCcTree->netEnvId = FmPcdGetNetEnvId(p_PcdGroupsParam->h_NetEnv); for (i = 0; i < p_PcdGroupsParam->numOfGrps; i++) { p_FmPcdCcGroupParams = &p_PcdGroupsParam->ccGrpParams[i]; if (p_FmPcdCcGroupParams->numOfDistinctionUnits > FM_PCD_MAX_NUM_OF_CC_UNITS) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfDistinctionUnits (group %d) should not exceed %d", i, FM_PCD_MAX_NUM_OF_CC_UNITS)); return NULL; } p_FmPcdCcTree->fmPcdGroupParam[i].baseGroupEntry = numOfEntries; p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup = (uint8_t)(0x01 << p_FmPcdCcGroupParams->numOfDistinctionUnits); numOfEntries += p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; if (numOfEntries > FM_PCD_MAX_NUM_OF_CC_GROUPS) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("numOfEntries can not be larger than %d", FM_PCD_MAX_NUM_OF_CC_GROUPS)); return NULL; } if (lastOne) { if (p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup > lastOne) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_CONFLICT, ("numOfEntries per group must be set in descending order")); return NULL; } } lastOne = p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; netEnvParams.netEnvId = p_FmPcdCcTree->netEnvId; netEnvParams.numOfDistinctionUnits = p_FmPcdCcGroupParams->numOfDistinctionUnits; memcpy(netEnvParams.unitIds, &p_FmPcdCcGroupParams->unitIds, (sizeof(uint8_t)) * p_FmPcdCcGroupParams->numOfDistinctionUnits); err = PcdGetUnitsVector(p_FmPcd, &netEnvParams); if (err) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_FmPcdCcTree->fmPcdGroupParam[i].totalBitsMask = netEnvParams.vector; for (j = 0; j < p_FmPcdCcTree->fmPcdGroupParam[i].numOfEntriesInGroup; j++) { err = ValidateNextEngineParams( h_FmPcd, &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], e_FM_PCD_CC_STATS_MODE_NONE); if (err) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, err, (NO_MSG)); return NULL; } if (p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j].h_Manip) { err = FmPcdManipCheckParamsForCcNextEngine( &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], &requiredAction); if (err) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_INVALID_STATE, NO_MSG); return NULL; } } p_KeyAndNextEngineParams = p_Params + k; memcpy(&p_KeyAndNextEngineParams->nextEngineParams, &p_FmPcdCcGroupParams->nextEnginePerEntriesInGrp[j], sizeof(t_FmPcdCcNextEngineParams)); if ((p_KeyAndNextEngineParams->nextEngineParams.nextEngine == e_FM_PCD_CC) && p_KeyAndNextEngineParams->nextEngineParams.h_Manip) { err = AllocAndFillAdForContLookupManip( p_KeyAndNextEngineParams->nextEngineParams.params.ccParams.h_CcNode); if (err) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); return NULL; } } requiredAction |= UPDATE_CC_WITH_TREE; p_KeyAndNextEngineParams->requiredAction = requiredAction; k++; } } p_FmPcdCcTree->numOfEntries = (uint8_t)k; p_FmPcdCcTree->numOfGrps = p_PcdGroupsParam->numOfGrps; p_FmPcdCcTree->ccTreeBaseAddr = PTR_TO_UINT(FM_MURAM_AllocMem(FmPcdGetMuramHandle(h_FmPcd), (uint32_t)( FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE), FM_PCD_CC_TREE_ADDR_ALIGN)); if (!p_FmPcdCcTree->ccTreeBaseAddr) { DeleteTree(p_FmPcdCcTree, p_FmPcd); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for CC Tree")); return NULL; } MemSet8( UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr), 0, (uint32_t)(FM_PCD_MAX_NUM_OF_CC_GROUPS * FM_PCD_CC_AD_ENTRY_SIZE)); p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); for (i = 0; i < numOfEntries; i++) { p_KeyAndNextEngineParams = p_Params + i; NextStepAd(p_CcTreeTmp, NULL, &p_KeyAndNextEngineParams->nextEngineParams, p_FmPcd); p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); memcpy(&p_FmPcdCcTree->keyAndNextEngineParams[i], p_KeyAndNextEngineParams, sizeof(t_FmPcdCcKeyAndNextEngineParams)); if (p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) { p_FmPcdCcNextNode = (t_FmPcdCcNode*)p_FmPcdCcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; p_CcInformation = FindNodeInfoInReleventLst( &p_FmPcdCcNextNode->ccTreeIdLst, (t_Handle)p_FmPcdCcTree, p_FmPcdCcNextNode->h_Spinlock); if (!p_CcInformation) { memset(&ccNodeInfo, 0, sizeof(t_CcNodeInformation)); ccNodeInfo.h_CcNode = (t_Handle)p_FmPcdCcTree; ccNodeInfo.index = 1; EnqueueNodeInfoToRelevantLst(&p_FmPcdCcNextNode->ccTreeIdLst, &ccNodeInfo, p_FmPcdCcNextNode->h_Spinlock); } else p_CcInformation->index++; } } FmPcdIncNetEnvOwners(h_FmPcd, p_FmPcdCcTree->netEnvId); p_CcTreeTmp = UINT_TO_PTR(p_FmPcdCcTree->ccTreeBaseAddr); if (!FmPcdLockTryLockAll(p_FmPcd)) { FM_PCD_CcRootDelete(p_FmPcdCcTree); XX_Free(p_Params); DBG(TRACE, ("FmPcdLockTryLockAll failed")); return NULL; } for (i = 0; i < numOfEntries; i++) { if (p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction) { err = SetRequiredAction( h_FmPcd, p_FmPcdCcTree->keyAndNextEngineParams[i].requiredAction, &p_FmPcdCcTree->keyAndNextEngineParams[i], p_CcTreeTmp, 1, p_FmPcdCcTree); if (err) { FmPcdLockUnlockAll(p_FmPcd); FM_PCD_CcRootDelete(p_FmPcdCcTree); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); return NULL; } p_CcTreeTmp = PTR_MOVE(p_CcTreeTmp, FM_PCD_CC_AD_ENTRY_SIZE); } } FmPcdLockUnlockAll(p_FmPcd); p_FmPcdCcTree->p_Lock = FmPcdAcquireLock(p_FmPcd); if (!p_FmPcdCcTree->p_Lock) { FM_PCD_CcRootDelete(p_FmPcdCcTree); XX_Free(p_Params); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FM CC lock")); return NULL; } XX_Free(p_Params); return p_FmPcdCcTree; } t_Error FM_PCD_CcRootDelete(t_Handle h_CcTree) { t_FmPcd *p_FmPcd; t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; int i = 0; SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE); p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); FmPcdDecNetEnvOwners(p_FmPcd, p_CcTree->netEnvId); if (p_CcTree->owners) RETURN_ERROR( MAJOR, E_INVALID_SELECTION, ("the tree with this ID can not be removed because this tree is occupied, first - unbind this tree")); /* Delete ip-reassembly schemes if exist */ if (p_CcTree->h_IpReassemblyManip) { FmPcdManipDeleteIpReassmSchemes(p_CcTree->h_IpReassemblyManip); FmPcdManipUpdateOwner(p_CcTree->h_IpReassemblyManip, FALSE); } /* Delete capwap-reassembly schemes if exist */ if (p_CcTree->h_CapwapReassemblyManip) { FmPcdManipDeleteCapwapReassmSchemes(p_CcTree->h_CapwapReassemblyManip); FmPcdManipUpdateOwner(p_CcTree->h_CapwapReassemblyManip, FALSE); } for (i = 0; i < p_CcTree->numOfEntries; i++) { if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) UpdateNodeOwner( p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); if (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip) FmPcdManipUpdateOwner( p_CcTree->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); #ifdef FM_CAPWAP_SUPPORT if ((p_CcTree->numOfGrps == 1) && (p_CcTree->fmPcdGroupParam[0].numOfEntriesInGroup == 1) && (p_CcTree->keyAndNextEngineParams[0].nextEngineParams.nextEngine == e_FM_PCD_CC) && p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode && IsCapwapApplSpecific(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode)) { if (FM_PCD_ManipNodeDelete(p_CcTree->keyAndNextEngineParams[0].nextEngineParams.h_Manip) != E_OK) return E_INVALID_STATE; } #endif /* FM_CAPWAP_SUPPORT */ #if (DPAA_VERSION >= 11) if ((p_CcTree->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) FrmReplicGroupUpdateOwner( p_CcTree->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, FALSE); #endif /* (DPAA_VERSION >= 11) */ } if (p_CcTree->p_Lock) FmPcdReleaseLock(p_CcTree->h_FmPcd, p_CcTree->p_Lock); DeleteTree(p_CcTree, p_FmPcd); return E_OK; } t_Error FM_PCD_CcRootModifyNextEngine( t_Handle h_CcTree, uint8_t grpId, uint8_t index, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcd *p_FmPcd; t_FmPcdCcTree *p_CcTree = (t_FmPcdCcTree *)h_CcTree; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcTree, E_INVALID_STATE); p_FmPcd = (t_FmPcd *)p_CcTree->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcModifyNextEngineParamTree(p_FmPcd, p_CcTree, grpId, index, p_FmPcdCcNextEngineParams); FmPcdLockUnlockAll(p_FmPcd); if (err) { RETURN_ERROR(MAJOR, err, NO_MSG); } return E_OK; } t_Handle FM_PCD_MatchTableSet(t_Handle h_FmPcd, t_FmPcdCcNodeParams *p_CcNodeParam) { t_FmPcdCcNode *p_CcNode; t_Error err; SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_CcNodeParam, E_NULL_POINTER, NULL); p_CcNode = (t_FmPcdCcNode*)XX_Malloc(sizeof(t_FmPcdCcNode)); if (!p_CcNode) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("No memory")); return NULL; } memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); err = MatchTableSet(h_FmPcd, p_CcNode, p_CcNodeParam); switch(GET_ERROR_TYPE(err) ) { case E_OK: break; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return NULL; default: REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } return p_CcNode; } t_Error FM_PCD_MatchTableDelete(t_Handle h_CcNode) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; int i = 0; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_CcNode->h_FmPcd, E_INVALID_HANDLE); if (p_CcNode->owners) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("This node cannot be removed because it is occupied; first unbind this node")); for (i = 0; i < p_CcNode->numOfKeys; i++) if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) UpdateNodeOwner( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_CC) UpdateNodeOwner( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode, FALSE); /* Handle also Miss entry */ for (i = 0; i < p_CcNode->numOfKeys + 1; i++) { if (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip) FmPcdManipUpdateOwner( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.h_Manip, FALSE); #if (DPAA_VERSION >= 11) if ((p_CcNode->keyAndNextEngineParams[i].nextEngineParams.nextEngine == e_FM_PCD_FR) && (p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic)) { FrmReplicGroupUpdateOwner( p_CcNode->keyAndNextEngineParams[i].nextEngineParams.params.frParams.h_FrmReplic, FALSE); } #endif /* (DPAA_VERSION >= 11) */ } DeleteNode(p_CcNode); return E_OK; } t_Error FM_PCD_MatchTableAddKey(t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (keyIndex == FM_PCD_LAST_KEY_INDEX) keyIndex = p_CcNode->numOfKeys; if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcAddKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_KeyParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableRemoveKey(t_Handle h_CcNode, uint16_t keyIndex) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } return E_OK; } t_Error FM_PCD_MatchTableModifyKey(t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_Key, p_Mask); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableModifyNextEngine( t_Handle h_CcNode, uint16_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex, p_FmPcdCcNextEngineParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableModifyMissNextEngine( t_Handle h_CcNode, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcModifyMissNextEngineParamNode(p_FmPcd, p_CcNode, p_FmPcdCcNextEngineParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableModifyKeyAndNextEngine(t_Handle h_CcNode, uint16_t keyIndex, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, p_CcNode, keyIndex, keySize, p_KeyParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableFindNRemoveKey(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint16_t keyIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); if (GET_ERROR_TYPE(err) != E_OK) { FmPcdLockUnlockAll(p_FmPcd); RETURN_ERROR( MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); } err = FmPcdCcRemoveKey(p_FmPcd, p_CcNode, keyIndex); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableFindNModifyNextEngine( t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint16_t keyIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); if (GET_ERROR_TYPE(err) != E_OK) { FmPcdLockUnlockAll(p_FmPcd); RETURN_ERROR( MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); } err = ModifyNextEngineParamNode(p_FmPcd, p_CcNode, keyIndex, p_FmPcdCcNextEngineParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableFindNModifyKeyAndNextEngine( t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, t_FmPcdCcKeyParams *p_KeyParams) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint16_t keyIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); if (!FmPcdLockTryLockAll(p_FmPcd)) { DBG(TRACE, ("FmPcdLockTryLockAll failed")); return ERROR_CODE(E_BUSY); } err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); if (GET_ERROR_TYPE(err) != E_OK) { FmPcdLockUnlockAll(p_FmPcd); RETURN_ERROR( MAJOR, err, ("The received key and mask pair was not found in the match table of the provided node")); } err = FmPcdCcModifyKeyAndNextEngine(p_FmPcd, h_CcNode, keyIndex, keySize, p_KeyParams); FmPcdLockUnlockAll(p_FmPcd); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableFindNModifyKey(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, uint8_t *p_NewKey, uint8_t *p_NewMask) { t_FmPcd *p_FmPcd; t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; t_List h_List; uint16_t keyIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_NewKey, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); p_FmPcd = (t_FmPcd *)p_CcNode->h_FmPcd; SANITY_CHECK_RETURN_ERROR(p_FmPcd, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcd->h_Hc, E_INVALID_HANDLE); INIT_LIST(&h_List); err = FmPcdCcNodeTreeTryLock(p_FmPcd, p_CcNode, &h_List); if (err) { DBG(TRACE, ("Node's trees lock failed")); return ERROR_CODE(E_BUSY); } err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); if (GET_ERROR_TYPE(err) != E_OK) { FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the " "match table of the provided node")); } err = FmPcdCcModifyKey(p_FmPcd, p_CcNode, keyIndex, keySize, p_NewKey, p_NewMask); FmPcdCcNodeTreeReleaseLock(p_FmPcd, &h_List); switch(GET_ERROR_TYPE(err) ) { case E_OK: return E_OK; case E_BUSY: DBG(TRACE, ("E_BUSY error")); return ERROR_CODE(E_BUSY); default: RETURN_ERROR(MAJOR, err, NO_MSG); } } t_Error FM_PCD_MatchTableGetNextEngine( t_Handle h_CcNode, uint16_t keyIndex, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; SANITY_CHECK_RETURN_ERROR(p_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); if (keyIndex >= p_CcNode->numOfKeys) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("keyIndex exceeds current number of keys")); if (keyIndex > (FM_PCD_MAX_NUM_OF_KEYS - 1)) RETURN_ERROR( MAJOR, E_INVALID_VALUE, ("keyIndex can not be larger than %d", (FM_PCD_MAX_NUM_OF_KEYS - 1))); memcpy(p_FmPcdCcNextEngineParams, &p_CcNode->keyAndNextEngineParams[keyIndex].nextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); return E_OK; } uint32_t FM_PCD_MatchTableGetKeyCounter(t_Handle h_CcNode, uint16_t keyIndex) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint32_t *p_StatsCounters, frameCount; uint32_t intFlags; SANITY_CHECK_RETURN_VALUE(p_CcNode, E_INVALID_HANDLE, 0); if (p_CcNode->statisticsMode == e_FM_PCD_CC_STATS_MODE_NONE) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this match table")); return 0; } if ((p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_FRAME) && (p_CcNode->statisticsMode != e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Frame count is not supported in the statistics mode of this match table")); return 0; } intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); if (keyIndex >= p_CcNode->numOfKeys) { XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); return 0; } if (!p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj) { XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for this key")); return 0; } p_StatsCounters = p_CcNode->keyAndNextEngineParams[keyIndex].p_StatsObj->h_StatsCounters; ASSERT_COND(p_StatsCounters); /* The first counter is byte counter, so we need to advance to the next counter */ frameCount = GET_UINT32(*(uint32_t *)(PTR_MOVE(p_StatsCounters, FM_PCD_CC_STATS_COUNTER_SIZE))); XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); return frameCount; } t_Error FM_PCD_MatchTableGetKeyStatistics( t_Handle h_CcNode, uint16_t keyIndex, t_FmPcdCcKeyStatistics *p_KeyStatistics) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint32_t intFlags; t_Error err; SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); if (keyIndex >= p_CcNode->numOfKeys) RETURN_ERROR( MAJOR, E_INVALID_STATE, ("The provided keyIndex exceeds the number of keys in this match table")); err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics); XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FM_PCD_MatchTableGetMissStatistics( t_Handle h_CcNode, t_FmPcdCcKeyStatistics *p_MissStatistics) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint32_t intFlags; t_Error err; SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER); intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); err = MatchTableGetKeyStatistics(p_CcNode, p_CcNode->numOfKeys, p_MissStatistics); XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FM_PCD_MatchTableFindNGetKeyStatistics( t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t *p_Mask, t_FmPcdCcKeyStatistics *p_KeyStatistics) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint16_t keyIndex; uint32_t intFlags; t_Error err; SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); intFlags = XX_LockIntrSpinlock(p_CcNode->h_Spinlock); err = FindKeyIndex(p_CcNode, keySize, p_Key, p_Mask, &keyIndex); if (GET_ERROR_TYPE(err) != E_OK) { XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); RETURN_ERROR(MAJOR, err, ("The received key and mask pair was not found in the " "match table of the provided node")); } ASSERT_COND(keyIndex < p_CcNode->numOfKeys); err = MatchTableGetKeyStatistics(p_CcNode, keyIndex, p_KeyStatistics); XX_UnlockIntrSpinlock(p_CcNode->h_Spinlock, intFlags); if (err != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FM_PCD_MatchTableGetIndexedHashBucket(t_Handle h_CcNode, uint8_t keySize, uint8_t *p_Key, uint8_t hashShift, t_Handle *p_CcNodeBucketHandle, uint8_t *p_BucketIndex, uint16_t *p_LastIndex) { t_FmPcdCcNode *p_CcNode = (t_FmPcdCcNode *)h_CcNode; uint16_t glblMask; uint64_t crc64 = 0; SANITY_CHECK_RETURN_ERROR(h_CcNode, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR( p_CcNode->parseCode == CC_PC_GENERIC_IC_HASH_INDEXED, E_INVALID_STATE); SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_CcNodeBucketHandle, E_NULL_POINTER); memcpy(&glblMask, PTR_MOVE(p_CcNode->p_GlblMask, 2), 2); glblMask = be16toh(glblMask); crc64 = crc64_init(); crc64 = crc64_compute(p_Key, keySize, crc64); crc64 >>= hashShift; *p_BucketIndex = (uint8_t)(((crc64 >> (8 * (6 - p_CcNode->userOffset))) & glblMask) >> 4); if (*p_BucketIndex >= p_CcNode->numOfKeys) RETURN_ERROR(MINOR, E_NOT_IN_RANGE, ("bucket index!")); *p_CcNodeBucketHandle = p_CcNode->keyAndNextEngineParams[*p_BucketIndex].nextEngineParams.params.ccParams.h_CcNode; if (!*p_CcNodeBucketHandle) RETURN_ERROR(MINOR, E_NOT_FOUND, ("bucket!")); *p_LastIndex = ((t_FmPcdCcNode *)*p_CcNodeBucketHandle)->numOfKeys; return E_OK; } t_Handle FM_PCD_HashTableSet(t_Handle h_FmPcd, t_FmPcdHashTableParams *p_Param) { t_FmPcdCcNode *p_CcNodeHashTbl; t_FmPcdCcNodeParams *p_IndxHashCcNodeParam, *p_ExactMatchCcNodeParam; t_FmPcdCcNode *p_CcNode; t_Handle h_MissStatsCounters = NULL; t_FmPcdCcKeyParams *p_HashKeyParams; int i; uint16_t numOfSets, numOfWays, countMask, onesCount = 0; bool statsEnForMiss = FALSE; t_Error err; SANITY_CHECK_RETURN_VALUE(h_FmPcd, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_Param, E_NULL_POINTER, NULL); if (p_Param->maxNumOfKeys == 0) { REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Max number of keys must be higher then 0")); return NULL; } if (p_Param->hashResMask == 0) { REPORT_ERROR(MINOR, E_INVALID_VALUE, ("Hash result mask must differ from 0")); return NULL; } /*Fix: QorIQ SDK / QSDK-2131*/ if (p_Param->ccNextEngineParamsForMiss.nextEngine == e_FM_PCD_INVALID) { REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("Next PCD Engine for on-miss entry is invalid. On-miss entry is always required. You can use e_FM_PCD_DONE.")); return NULL; } #if (DPAA_VERSION >= 11) if (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_RMON) { REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("RMON statistics mode is not supported for hash table")); return NULL; } #endif /* (DPAA_VERSION >= 11) */ p_ExactMatchCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc( sizeof(t_FmPcdCcNodeParams)); if (!p_ExactMatchCcNodeParam) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_ExactMatchCcNodeParam")); return NULL; } memset(p_ExactMatchCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams)); p_IndxHashCcNodeParam = (t_FmPcdCcNodeParams*)XX_Malloc( sizeof(t_FmPcdCcNodeParams)); if (!p_IndxHashCcNodeParam) { XX_Free(p_ExactMatchCcNodeParam); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("p_IndxHashCcNodeParam")); return NULL; } memset(p_IndxHashCcNodeParam, 0, sizeof(t_FmPcdCcNodeParams)); /* Calculate number of sets and number of ways of the hash table */ countMask = (uint16_t)(p_Param->hashResMask >> 4); while (countMask) { onesCount++; countMask = (uint16_t)(countMask >> 1); } numOfSets = (uint16_t)(1 << onesCount); numOfWays = (uint16_t)DIV_CEIL(p_Param->maxNumOfKeys, numOfSets); if (p_Param->maxNumOfKeys % numOfSets) DBG(INFO, ("'maxNumOfKeys' is not a multiple of hash number of ways, so number of ways will be rounded up")); if ((p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_FRAME) || (p_Param->statisticsMode == e_FM_PCD_CC_STATS_MODE_BYTE_AND_FRAME)) { /* Allocating a statistics counters table that will be used by all 'miss' entries of the hash table */ h_MissStatsCounters = (t_Handle)FM_MURAM_AllocMem( FmPcdGetMuramHandle(h_FmPcd), 2 * FM_PCD_CC_STATS_COUNTER_SIZE, FM_PCD_CC_AD_TABLE_ALIGN); if (!h_MissStatsCounters) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("MURAM allocation for statistics table for hash miss")); XX_Free(p_IndxHashCcNodeParam); XX_Free(p_ExactMatchCcNodeParam); return NULL; } memset(h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); /* Always enable statistics for 'miss', so that a statistics AD will be initialized from the start. We'll store the requested 'statistics enable' value and it will be used when statistics are read by the user. */ statsEnForMiss = p_Param->ccNextEngineParamsForMiss.statisticsEn; p_Param->ccNextEngineParamsForMiss.statisticsEn = TRUE; } /* Building exact-match node params, will be used to create the hash buckets */ p_ExactMatchCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR; p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.src = e_FM_PCD_EXTRACT_FROM_KEY; p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.action = e_FM_PCD_ACTION_EXACT_MATCH; p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.offset = 0; p_ExactMatchCcNodeParam->extractCcParams.extractNonHdr.size = p_Param->matchKeySize; p_ExactMatchCcNodeParam->keysParams.maxNumOfKeys = numOfWays; p_ExactMatchCcNodeParam->keysParams.maskSupport = FALSE; p_ExactMatchCcNodeParam->keysParams.statisticsMode = p_Param->statisticsMode; p_ExactMatchCcNodeParam->keysParams.numOfKeys = 0; p_ExactMatchCcNodeParam->keysParams.keySize = p_Param->matchKeySize; p_ExactMatchCcNodeParam->keysParams.ccNextEngineParamsForMiss = p_Param->ccNextEngineParamsForMiss; p_HashKeyParams = p_IndxHashCcNodeParam->keysParams.keyParams; for (i = 0; i < numOfSets; i++) { /* Each exact-match node will be marked as a 'bucket' and provided with a pointer to statistics counters, to be used for 'miss' entry statistics */ p_CcNode = (t_FmPcdCcNode *)XX_Malloc(sizeof(t_FmPcdCcNode)); if (!p_CcNode) break; memset(p_CcNode, 0, sizeof(t_FmPcdCcNode)); p_CcNode->isHashBucket = TRUE; p_CcNode->h_MissStatsCounters = h_MissStatsCounters; err = MatchTableSet(h_FmPcd, p_CcNode, p_ExactMatchCcNodeParam); if (err) break; p_HashKeyParams[i].ccNextEngineParams.nextEngine = e_FM_PCD_CC; p_HashKeyParams[i].ccNextEngineParams.statisticsEn = FALSE; p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode = p_CcNode; } if (i < numOfSets) { for (i = i - 1; i >= 0; i--) FM_PCD_MatchTableDelete( p_HashKeyParams[i].ccNextEngineParams.params.ccParams.h_CcNode); FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters); REPORT_ERROR(MAJOR, E_NULL_POINTER, NO_MSG); XX_Free(p_IndxHashCcNodeParam); XX_Free(p_ExactMatchCcNodeParam); return NULL; } /* Creating indexed-hash CC node */ p_IndxHashCcNodeParam->extractCcParams.type = e_FM_PCD_EXTRACT_NON_HDR; p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.src = e_FM_PCD_EXTRACT_FROM_HASH; p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.action = e_FM_PCD_ACTION_INDEXED_LOOKUP; p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.icIndxMask = p_Param->hashResMask; p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.offset = p_Param->hashShift; p_IndxHashCcNodeParam->extractCcParams.extractNonHdr.size = 2; p_IndxHashCcNodeParam->keysParams.maxNumOfKeys = numOfSets; p_IndxHashCcNodeParam->keysParams.maskSupport = FALSE; p_IndxHashCcNodeParam->keysParams.statisticsMode = e_FM_PCD_CC_STATS_MODE_NONE; /* Number of keys of this node is number of sets of the hash */ p_IndxHashCcNodeParam->keysParams.numOfKeys = numOfSets; p_IndxHashCcNodeParam->keysParams.keySize = 2; p_CcNodeHashTbl = FM_PCD_MatchTableSet(h_FmPcd, p_IndxHashCcNodeParam); if (p_CcNodeHashTbl) { p_CcNodeHashTbl->kgHashShift = p_Param->kgHashShift; /* Storing the allocated counters for buckets 'miss' in the hash table and if statistics for miss were enabled. */ p_CcNodeHashTbl->h_MissStatsCounters = h_MissStatsCounters; p_CcNodeHashTbl->statsEnForMiss = statsEnForMiss; } XX_Free(p_IndxHashCcNodeParam); XX_Free(p_ExactMatchCcNodeParam); return p_CcNodeHashTbl; } t_Error FM_PCD_HashTableDelete(t_Handle h_HashTbl) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_FmPcd; t_Handle *p_HashBuckets, h_MissStatsCounters; uint16_t i, numOfBuckets; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); /* Store all hash buckets before the hash is freed */ numOfBuckets = p_HashTbl->numOfKeys; p_HashBuckets = (t_Handle *)XX_Malloc(numOfBuckets * sizeof(t_Handle)); if (!p_HashBuckets) RETURN_ERROR(MAJOR, E_NO_MEMORY, NO_MSG); for (i = 0; i < numOfBuckets; i++) p_HashBuckets[i] = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; h_FmPcd = p_HashTbl->h_FmPcd; h_MissStatsCounters = p_HashTbl->h_MissStatsCounters; /* Free the hash */ err = FM_PCD_MatchTableDelete(p_HashTbl); /* Free each hash bucket */ for (i = 0; i < numOfBuckets; i++) err |= FM_PCD_MatchTableDelete(p_HashBuckets[i]); XX_Free(p_HashBuckets); /* Free statistics counters for 'miss', if these were allocated */ if (h_MissStatsCounters) FM_MURAM_FreeMem(FmPcdGetMuramHandle(h_FmPcd), h_MissStatsCounters); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return E_OK; } t_Error FM_PCD_HashTableAddKey(t_Handle h_HashTbl, uint8_t keySize, t_FmPcdCcKeyParams *p_KeyParams) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t bucketIndex; uint16_t lastIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_KeyParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_KeyParams->p_Key, E_NULL_POINTER); if (p_KeyParams->p_Mask) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Keys masks not supported for hash table")); err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_KeyParams->p_Key, p_HashTbl->kgHashShift, &h_HashBucket, &bucketIndex, &lastIndex); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return FM_PCD_MatchTableAddKey(h_HashBucket, FM_PCD_LAST_KEY_INDEX, keySize, p_KeyParams); } t_Error FM_PCD_HashTableRemoveKey(t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t bucketIndex; uint16_t lastIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key, p_HashTbl->kgHashShift, &h_HashBucket, &bucketIndex, &lastIndex); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return FM_PCD_MatchTableFindNRemoveKey(h_HashBucket, keySize, p_Key, NULL); } t_Error FM_PCD_HashTableModifyNextEngine( t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t bucketIndex; uint16_t lastIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key, p_HashTbl->kgHashShift, &h_HashBucket, &bucketIndex, &lastIndex); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return FM_PCD_MatchTableFindNModifyNextEngine(h_HashBucket, keySize, p_Key, NULL, p_FmPcdCcNextEngineParams); } t_Error FM_PCD_HashTableModifyMissNextEngine( t_Handle h_HashTbl, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t i; bool nullifyMissStats = FALSE; t_Error err; SANITY_CHECK_RETURN_ERROR(h_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_FmPcdCcNextEngineParams, E_NULL_POINTER); if ((!p_HashTbl->h_MissStatsCounters) && (p_FmPcdCcNextEngineParams->statisticsEn)) RETURN_ERROR( MAJOR, E_CONFLICT, ("Statistics are requested for a key, but statistics mode was set" "to 'NONE' upon initialization")); if (p_HashTbl->h_MissStatsCounters) { if ((!p_HashTbl->statsEnForMiss) && (p_FmPcdCcNextEngineParams->statisticsEn)) nullifyMissStats = TRUE; if ((p_HashTbl->statsEnForMiss) && (!p_FmPcdCcNextEngineParams->statisticsEn)) { p_HashTbl->statsEnForMiss = FALSE; p_FmPcdCcNextEngineParams->statisticsEn = TRUE; } } for (i = 0; i < p_HashTbl->numOfKeys; i++) { h_HashBucket = p_HashTbl->keyAndNextEngineParams[i].nextEngineParams.params.ccParams.h_CcNode; err = FM_PCD_MatchTableModifyMissNextEngine(h_HashBucket, p_FmPcdCcNextEngineParams); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); } if (nullifyMissStats) { memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); memset(p_HashTbl->h_MissStatsCounters, 0, (2 * FM_PCD_CC_STATS_COUNTER_SIZE)); p_HashTbl->statsEnForMiss = TRUE; } return E_OK; } t_Error FM_PCD_HashTableGetMissNextEngine( t_Handle h_HashTbl, t_FmPcdCcNextEngineParams *p_FmPcdCcNextEngineParams) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_FmPcdCcNode *p_HashBucket; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); /* Miss next engine of each bucket was initialized with the next engine of the hash table */ p_HashBucket = p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode; memcpy(p_FmPcdCcNextEngineParams, &p_HashBucket->keyAndNextEngineParams[p_HashBucket->numOfKeys].nextEngineParams, sizeof(t_FmPcdCcNextEngineParams)); return E_OK; } t_Error FM_PCD_HashTableFindNGetKeyStatistics( t_Handle h_HashTbl, uint8_t keySize, uint8_t *p_Key, t_FmPcdCcKeyStatistics *p_KeyStatistics) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; uint8_t bucketIndex; uint16_t lastIndex; t_Error err; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_Key, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_KeyStatistics, E_NULL_POINTER); err = FM_PCD_MatchTableGetIndexedHashBucket(p_HashTbl, keySize, p_Key, p_HashTbl->kgHashShift, &h_HashBucket, &bucketIndex, &lastIndex); if (err) RETURN_ERROR(MAJOR, err, NO_MSG); return FM_PCD_MatchTableFindNGetKeyStatistics(h_HashBucket, keySize, p_Key, NULL, p_KeyStatistics); } t_Error FM_PCD_HashTableGetMissStatistics( t_Handle h_HashTbl, t_FmPcdCcKeyStatistics *p_MissStatistics) { t_FmPcdCcNode *p_HashTbl = (t_FmPcdCcNode *)h_HashTbl; t_Handle h_HashBucket; SANITY_CHECK_RETURN_ERROR(p_HashTbl, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_MissStatistics, E_NULL_POINTER); if (!p_HashTbl->statsEnForMiss) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("Statistics were not enabled for miss")); h_HashBucket = p_HashTbl->keyAndNextEngineParams[0].nextEngineParams.params.ccParams.h_CcNode; return FM_PCD_MatchTableGetMissStatistics(h_HashBucket, p_MissStatistics); } Index: head/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c =================================================================== --- head/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c (revision 351321) +++ head/sys/contrib/ncsw/Peripherals/QM/qm_portal_fqr.c (revision 351322) @@ -1,2735 +1,2734 @@ /****************************************************************************** © 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc. All rights reserved. This is proprietary source code of Freescale Semiconductor Inc., and its use is subject to the NetComm Device Drivers EULA. The copyright notice above does not evidence any actual or intended publication of such source code. ALTERNATIVELY, redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Freescale Semiconductor nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************/ /****************************************************************************** @File qm.c @Description QM & Portal implementation *//***************************************************************************/ #include #include #include #include "error_ext.h" #include "std_ext.h" #include "string_ext.h" #include "mm_ext.h" #include "qm.h" #include "qman_low.h" #include /****************************************/ /* static functions */ /****************************************/ #define SLOW_POLL_IDLE 1000 #define SLOW_POLL_BUSY 10 /* * Context entries are 32-bit. The qman driver uses the pointer to the queue as * its context, and the pointer is 64-byte aligned, per the XX_MallocSmart() * call. Take advantage of this fact to shove a 64-bit kernel pointer into a * 32-bit context integer, and back. * * XXX: This depends on the fact that VM_MAX_KERNEL_ADDRESS is less than 38-bit * count from VM_MIN_KERNEL_ADDRESS. If this ever changes, this needs to be * updated. */ CTASSERT((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) < (1ULL << 35)); static inline uint32_t aligned_int_from_ptr(const void *p) { uintptr_t ctx; ctx = (uintptr_t)p; KASSERT(ctx >= VM_MIN_KERNEL_ADDRESS, ("%p is too low!\n", p)); ctx -= VM_MIN_KERNEL_ADDRESS; KASSERT((ctx & 0x07) == 0, ("Pointer %p is not 8-byte aligned!\n", p)); return (ctx >> 3); } static inline void * ptr_from_aligned_int(uint32_t ctx) { uintptr_t p; p = ctx; p = VM_MIN_KERNEL_ADDRESS + (p << 3); return ((void *)p); } static t_Error qman_volatile_dequeue(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t vdqcr) { ASSERT_COND((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_retired)); ASSERT_COND(!(vdqcr & QM_VDQCR_FQID_MASK)); ASSERT_COND(!(p_Fq->flags & QMAN_FQ_STATE_VDQCR)); vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | p_Fq->fqid; NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); p_Fq->flags |= QMAN_FQ_STATE_VDQCR; qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, vdqcr); FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static const char *mcr_result_str(uint8_t result) { switch (result) { case QM_MCR_RESULT_NULL: return "QM_MCR_RESULT_NULL"; case QM_MCR_RESULT_OK: return "QM_MCR_RESULT_OK"; case QM_MCR_RESULT_ERR_FQID: return "QM_MCR_RESULT_ERR_FQID"; case QM_MCR_RESULT_ERR_FQSTATE: return "QM_MCR_RESULT_ERR_FQSTATE"; case QM_MCR_RESULT_ERR_NOTEMPTY: return "QM_MCR_RESULT_ERR_NOTEMPTY"; case QM_MCR_RESULT_PENDING: return "QM_MCR_RESULT_PENDING"; } return ""; } static t_Error qman_create_fq(t_QmPortal *p_QmPortal, uint32_t fqid, uint32_t flags, struct qman_fq *p_Fq) { struct qm_fqd fqd; struct qm_mcr_queryfq_np np; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; p_Fq->fqid = fqid; p_Fq->flags = flags; p_Fq->state = qman_fq_state_oos; p_Fq->cgr_groupid = 0; if (!(flags & QMAN_FQ_FLAG_RECOVER) || (flags & QMAN_FQ_FLAG_NO_MODIFY)) return E_OK; /* Everything else is RECOVER support */ NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq.fqid = fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QUERYFQ failed: %s", mcr_result_str(p_Mcr->result))); } fqd = p_Mcr->queryfq.fqd; p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq_np.fqid = fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("UERYFQ_NP failed: %s", mcr_result_str(p_Mcr->result))); } np = p_Mcr->queryfq_np; /* Phew, have queryfq and queryfq_np results, stitch together * the FQ object from those. */ p_Fq->cgr_groupid = fqd.cgid; switch (np.state & QM_MCR_NP_STATE_MASK) { case QM_MCR_NP_STATE_OOS: break; case QM_MCR_NP_STATE_RETIRED: p_Fq->state = qman_fq_state_retired; if (np.frm_cnt) p_Fq->flags |= QMAN_FQ_STATE_NE; break; case QM_MCR_NP_STATE_TEN_SCHED: case QM_MCR_NP_STATE_TRU_SCHED: case QM_MCR_NP_STATE_ACTIVE: p_Fq->state = qman_fq_state_sched; if (np.state & QM_MCR_NP_STATE_R) p_Fq->flags |= QMAN_FQ_STATE_CHANGING; break; case QM_MCR_NP_STATE_PARKED: p_Fq->state = qman_fq_state_parked; break; default: ASSERT_COND(FALSE); } if (fqd.fq_ctrl & QM_FQCTRL_CGE) p_Fq->state |= QMAN_FQ_STATE_CGR_EN; PUNLOCK(p_QmPortal); return E_OK; } static void qman_destroy_fq(struct qman_fq *p_Fq, uint32_t flags) { /* We don't need to lock the FQ as it is a pre-condition that the FQ be * quiesced. Instead, run some checks. */ UNUSED(flags); switch (p_Fq->state) { case qman_fq_state_parked: ASSERT_COND(flags & QMAN_FQ_DESTROY_PARKED); case qman_fq_state_oos: return; default: break; } ASSERT_COND(FALSE); } static t_Error qman_init_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t flags, struct qm_mcc_initfq *p_Opts) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res, myverb = (uint8_t)((flags & QMAN_INITFQ_FLAG_SCHED) ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED); SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_oos) || (p_Fq->state == qman_fq_state_parked), E_INVALID_STATE); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); /* Issue an INITFQ_[PARKED|SCHED] management command */ NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || ((p_Fq->state != qman_fq_state_oos) && (p_Fq->state != qman_fq_state_parked))) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); Mem2IOCpy32((void*)&p_Mcc->initfq, p_Opts, sizeof(struct qm_mcc_initfq)); qm_mc_commit(p_QmPortal->p_LowQmPortal, myverb); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == myverb); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE,("INITFQ failed: %s", mcr_result_str(res))); } if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_FQCTRL) { if (p_Mcc->initfq.fqd.fq_ctrl & QM_FQCTRL_CGE) p_Fq->flags |= QMAN_FQ_STATE_CGR_EN; else p_Fq->flags &= ~QMAN_FQ_STATE_CGR_EN; } if (p_Mcc->initfq.we_mask & QM_INITFQ_WE_CGID) p_Fq->cgr_groupid = p_Mcc->initfq.fqd.cgid; p_Fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? qman_fq_state_sched : qman_fq_state_parked; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static t_Error qman_retire_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, uint32_t *p_Flags, bool drain) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_Error err = E_OK; uint8_t res; SANITY_CHECK_RETURN_ERROR((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_sched), E_INVALID_STATE); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return E_INVALID_VALUE; NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || (p_Fq->state == qman_fq_state_retired) || (p_Fq->state == qman_fq_state_oos)) { err = E_BUSY; goto out; } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; if (drain) p_Mcc->alterfq.context_b = aligned_int_from_ptr(p_Fq); qm_mc_commit(p_QmPortal->p_LowQmPortal, (uint8_t)((drain)?QM_MCC_VERB_ALTER_RETIRE_CTXB:QM_MCC_VERB_ALTER_RETIRE)); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == (drain)?QM_MCR_VERB_ALTER_RETIRE_CTXB:QM_MCR_VERB_ALTER_RETIRE); res = p_Mcr->result; if (res == QM_MCR_RESULT_OK) { /* Process 'fq' right away, we'll ignore FQRNI */ if (p_Mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) p_Fq->flags |= QMAN_FQ_STATE_NE; if (p_Mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) p_Fq->flags |= QMAN_FQ_STATE_ORL; p_Fq->state = qman_fq_state_retired; } else if (res == QM_MCR_RESULT_PENDING) p_Fq->flags |= QMAN_FQ_STATE_CHANGING; else { XX_Print("ALTER_RETIRE failed: %s\n", mcr_result_str(res)); err = E_INVALID_STATE; } if (p_Flags) *p_Flags = p_Fq->flags; out: FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return err; } static t_Error qman_oos_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; ASSERT_COND(p_Fq->state == qman_fq_state_retired); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_BLOCKOOS) || (p_Fq->state != qman_fq_state_retired)) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_OOS); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_OOS failed: %s\n", mcr_result_str(res))); } p_Fq->state = qman_fq_state_oos; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } static t_Error qman_schedule_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; ASSERT_COND(p_Fq->state == qman_fq_state_parked); if (p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY) return ERROR_CODE(E_INVALID_VALUE); /* Issue a ALTERFQ_SCHED management command */ NCSW_PLOCK(p_QmPortal); FQLOCK(p_Fq); if ((p_Fq->flags & QMAN_FQ_STATE_CHANGING) || (p_Fq->state != qman_fq_state_parked)) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->alterfq.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_ALTER_SCHED); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); res = p_Mcr->result; if (res != QM_MCR_RESULT_OK) { FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("ALTER_SCHED failed: %s\n", mcr_result_str(res))); } p_Fq->state = qman_fq_state_sched; FQUNLOCK(p_Fq); PUNLOCK(p_QmPortal); return E_OK; } /* Inline helper to reduce nesting in LoopMessageRing() */ static __inline__ void fq_state_change(struct qman_fq *p_Fq, struct qm_mr_entry *p_Msg, uint8_t verb) { FQLOCK(p_Fq); switch(verb) { case QM_MR_VERB_FQRL: ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_ORL); p_Fq->flags &= ~QMAN_FQ_STATE_ORL; break; case QM_MR_VERB_FQRN: ASSERT_COND((p_Fq->state == qman_fq_state_parked) || (p_Fq->state == qman_fq_state_sched)); ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING); p_Fq->flags &= ~QMAN_FQ_STATE_CHANGING; if (p_Msg->fq.fqs & QM_MR_FQS_NOTEMPTY) p_Fq->flags |= QMAN_FQ_STATE_NE; if (p_Msg->fq.fqs & QM_MR_FQS_ORLPRESENT) p_Fq->flags |= QMAN_FQ_STATE_ORL; p_Fq->state = qman_fq_state_retired; break; case QM_MR_VERB_FQPN: ASSERT_COND(p_Fq->state == qman_fq_state_sched); ASSERT_COND(p_Fq->flags & QMAN_FQ_STATE_CHANGING); p_Fq->state = qman_fq_state_parked; } FQUNLOCK(p_Fq); } static t_Error freeDrainedFq(struct qman_fq *p_Fq) { t_QmFqr *p_QmFqr; uint32_t i; ASSERT_COND(p_Fq); p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr; ASSERT_COND(p_QmFqr); ASSERT_COND(!p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset]); p_QmFqr->p_DrainedFqs[p_Fq->fqidOffset] = TRUE; p_QmFqr->numOfDrainedFqids++; if (p_QmFqr->numOfDrainedFqids == p_QmFqr->numOfFqids) { for (i=0;inumOfFqids;i++) { if ((p_QmFqr->p_Fqs[i]->state == qman_fq_state_retired) && (qman_oos_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]) != E_OK)) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!")); qman_destroy_fq(p_QmFqr->p_Fqs[i], 0); XX_FreeSmart(p_QmFqr->p_Fqs[i]); } XX_Free(p_QmFqr->p_DrainedFqs); p_QmFqr->p_DrainedFqs = NULL; if (p_QmFqr->f_CompletionCB) { p_QmFqr->f_CompletionCB(p_QmFqr->h_App, p_QmFqr); XX_Free(p_QmFqr->p_Fqs); if (p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_Free(p_QmFqr); } } return E_OK; } static t_Error drainRetiredFq(struct qman_fq *p_Fq) { t_QmFqr *p_QmFqr; ASSERT_COND(p_Fq); p_QmFqr = (t_QmFqr *)p_Fq->h_QmFqr; ASSERT_COND(p_QmFqr); if (p_Fq->flags & QMAN_FQ_STATE_NE) { if (qman_volatile_dequeue(p_QmFqr->h_QmPortal, p_Fq, (QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY)) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("drain with volatile failed")); return E_OK; } else return freeDrainedFq(p_Fq); } static e_RxStoreResponse drainCB(t_Handle h_App, t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { UNUSED(h_App); UNUSED(h_QmFqr); UNUSED(h_QmPortal); UNUSED(fqidOffset); UNUSED(p_Frame); DBG(TRACE,("got fd for fqid %d", ((t_QmFqr *)h_QmFqr)->fqidBase + fqidOffset)); return e_RX_STORE_RESPONSE_CONTINUE; } static void cb_ern_dcErn(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { static int cnt = 0; UNUSED(p_Fq); UNUSED(p_Msg); UNUSED(h_App); UNUSED(h_QmPortal); XX_Print("cb_ern_dcErn_fqs() unimplemented %d\n", ++cnt); } static void cb_fqs(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { UNUSED(p_Msg); UNUSED(h_App); UNUSED(h_QmPortal); if (p_Fq->state == qman_fq_state_retired && !(p_Fq->flags & QMAN_FQ_STATE_ORL)) drainRetiredFq(p_Fq); } static void null_cb_mr(t_Handle h_App, t_Handle h_QmPortal, struct qman_fq *p_Fq, const struct qm_mr_entry *p_Msg) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; UNUSED(p_Fq);UNUSED(h_App); if ((p_Msg->verb & QM_MR_VERB_DC_ERN) == QM_MR_VERB_DC_ERN) XX_Print("Ignoring unowned MR frame on cpu %d, dc-portal 0x%02x.\n", p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->dcern.portal); else XX_Print("Ignoring unowned MR frame on cpu %d, verb 0x%02x.\n", p_QmPortal->p_LowQmPortal->config.cpu,p_Msg->verb); } static uint32_t LoopMessageRing(t_QmPortal *p_QmPortal, uint32_t is) { struct qm_mr_entry *p_Msg; if (is & QM_PIRQ_CSCI) { struct qm_mc_result *p_Mcr; struct qman_cgrs tmp; uint32_t mask; unsigned int i, j; NCSW_PLOCK(p_QmPortal); qm_mc_start(p_QmPortal->p_LowQmPortal); qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCONGESTION); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; /* cgrs[0] is the portal mask for its cg's, cgrs[1] is the previous state of cg's */ for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++) { /* get curent state */ tmp.q.__state[i] = p_Mcr->querycongestion.state.__state[i]; /* keep only cg's that are registered for this portal */ tmp.q.__state[i] &= p_QmPortal->cgrs[0].q.__state[i]; /* handle only cg's that changed their state from previous exception */ tmp.q.__state[i] ^= p_QmPortal->cgrs[1].q.__state[i]; /* update previous */ p_QmPortal->cgrs[1].q.__state[i] = p_Mcr->querycongestion.state.__state[i]; } PUNLOCK(p_QmPortal); /* if in interrupt */ /* call the callback routines for any CG with a changed state */ for (i = 0; i < QM_MAX_NUM_OF_CGS/32; i++) for(j=0, mask = 0x80000000; j<32 ; j++, mask>>=1) { if(tmp.q.__state[i] & mask) { t_QmCg *p_QmCg = (t_QmCg *)(p_QmPortal->cgsHandles[i*32 + j]); if(p_QmCg->f_Exception) p_QmCg->f_Exception(p_QmCg->h_App, e_QM_EX_CG_STATE_CHANGE); } } } if (is & QM_PIRQ_EQRI) { NCSW_PLOCK(p_QmPortal); qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); qm_eqcr_set_ithresh(p_QmPortal->p_LowQmPortal, 0); PUNLOCK(p_QmPortal); } if (is & QM_PIRQ_MRI) { mr_loop: qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal); if (p_Msg) { struct qman_fq *p_FqFqs = ptr_from_aligned_int(p_Msg->fq.contextB); struct qman_fq *p_FqErn = ptr_from_aligned_int(p_Msg->ern.tag); uint8_t verb =(uint8_t)(p_Msg->verb & QM_MR_VERB_TYPE_MASK); t_QmRejectedFrameInfo rejectedFrameInfo; memset(&rejectedFrameInfo, 0, sizeof(t_QmRejectedFrameInfo)); if (!(verb & QM_MR_VERB_DC_ERN)) { switch(p_Msg->ern.rc) { case(QM_MR_RC_CGR_TAILDROP): rejectedFrameInfo.rejectionCode = e_QM_RC_CG_TAILDROP; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_WRED): rejectedFrameInfo.rejectionCode = e_QM_RC_CG_WRED; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_FQ_TAILDROP): rejectedFrameInfo.rejectionCode = e_QM_RC_FQ_TAILDROP; rejectedFrameInfo.cg.cgId = (uint8_t)p_FqErn->cgr_groupid; break; case(QM_MR_RC_ERROR): break; default: REPORT_ERROR(MINOR, E_NOT_SUPPORTED, ("Unknown rejection code")); } if (!p_FqErn) p_QmPortal->p_NullCB->ern(p_QmPortal->h_App, NULL, p_QmPortal, 0, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo); else p_FqErn->cb.ern(p_FqErn->h_App, p_FqErn->h_QmFqr, p_QmPortal, p_FqErn->fqidOffset, (t_DpaaFD*)&p_Msg->ern.fd, &rejectedFrameInfo); } else if (verb == QM_MR_VERB_DC_ERN) { if (!p_FqErn) p_QmPortal->p_NullCB->dc_ern(NULL, p_QmPortal, NULL, p_Msg); else p_FqErn->cb.dc_ern(p_FqErn->h_App, p_QmPortal, p_FqErn, p_Msg); } else { if (verb == QM_MR_VERB_FQRNI) ; /* we drop FQRNIs on the floor */ else if (!p_FqFqs) p_QmPortal->p_NullCB->fqs(NULL, p_QmPortal, NULL, p_Msg); else if ((verb == QM_MR_VERB_FQRN) || (verb == QM_MR_VERB_FQRL) || (verb == QM_MR_VERB_FQPN)) { fq_state_change(p_FqFqs, p_Msg, verb); p_FqFqs->cb.fqs(p_FqFqs->h_App, p_QmPortal, p_FqFqs, p_Msg); } } qm_mr_next(p_QmPortal->p_LowQmPortal); qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1); goto mr_loop; } } return is & (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI); } static void LoopDequeueRing(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; int prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); while (res != qman_cb_dqrr_pause) { if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { /* Defer just means "skip it, I'll consume it myself later on" */ if (res != qman_cb_dqrr_defer) qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, (res == qman_cb_dqrr_park)); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { if (res == qman_cb_dqrr_park) /* The only thing to do for non-DCA is the park-request */ qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal); qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } } } static void LoopDequeueRingDcaOptimized(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; while (res != qman_cb_dqrr_pause) { qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); /* Defer just means "skip it, I'll consume it myself later on" */ if (res != qman_cb_dqrr_defer) qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, (res == qman_cb_dqrr_park)); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } } static void LoopDequeueRingOptimized(t_Handle h_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; enum qman_cb_dqrr_result res = qman_cb_dqrr_consume; e_RxStoreResponse tmpRes; t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; while (res != qman_cb_dqrr_pause) { qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) break; p_Fq = ptr_from_aligned_int(p_Dq->contextB); if (p_Dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* We only set QMAN_FQ_STATE_NE when retiring, so we only need * to check for clearing it when doing volatile dequeues. It's * one less thing to check in the critical path (SDQCR). */ tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; /* Check for VDQCR completion */ if (p_Dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) p_Fq->flags &= ~QMAN_FQ_STATE_VDQCR; if (p_Dq->stat & QM_DQRR_STAT_FQ_EMPTY) { p_Fq->flags &= ~QMAN_FQ_STATE_NE; freeDrainedFq(p_Fq); } } else { /* Interpret 'dq' from the owner's perspective. */ /* use portal default handlers */ ASSERT_COND(p_Dq->fqid); if (p_Fq) { tmpRes = p_Fq->cb.dqrr(p_Fq->h_App, p_Fq->h_QmFqr, p_QmPortal, p_Fq->fqidOffset, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; else if (p_Fq->state == qman_fq_state_waiting_parked) res = qman_cb_dqrr_park; } else { tmpRes = p_QmPortal->p_NullCB->dqrr(p_QmPortal->h_App, NULL, p_QmPortal, p_Dq->fqid, (t_DpaaFD*)&p_Dq->fd); if (tmpRes == e_RX_STORE_RESPONSE_PAUSE) res = qman_cb_dqrr_pause; } } /* Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both. */ ASSERT_COND((p_Dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || (res != qman_cb_dqrr_park)); if (res == qman_cb_dqrr_park) /* The only thing to do for non-DCA is the park-request */ qm_dqrr_park_ci(p_QmPortal->p_LowQmPortal); qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } } /* Portal interrupt handler */ static void portal_isr(void *ptr) { t_QmPortal *p_QmPortal = ptr; uint32_t event = 0; uint32_t enableEvents = qm_isr_enable_read(p_QmPortal->p_LowQmPortal); DBG(TRACE, ("software-portal %d got interrupt", p_QmPortal->p_LowQmPortal->config.cpu)); event |= (qm_isr_status_read(p_QmPortal->p_LowQmPortal) & enableEvents); qm_isr_status_clear(p_QmPortal->p_LowQmPortal, event); /* Only do fast-path handling if it's required */ if (/*(event & QM_PIRQ_DQRI) &&*/ (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_FAST)) p_QmPortal->f_LoopDequeueRingCB(p_QmPortal); if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ_SLOW) LoopMessageRing(p_QmPortal, event); } static t_Error qman_query_fq_np(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq, struct qm_mcr_queryfq_np *p_Np) { struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; uint8_t res; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->queryfq_np.fqid = p_Fq->fqid; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYFQ_NP); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); res = p_Mcr->result; if (res == QM_MCR_RESULT_OK) *p_Np = p_Mcr->queryfq_np; PUNLOCK(p_QmPortal); if (res != QM_MCR_RESULT_OK) RETURN_ERROR(MINOR, E_INVALID_STATE, ("QUERYFQ_NP failed: %s\n", mcr_result_str(res))); return E_OK; } static uint8_t QmCgGetCgId(t_Handle h_QmCg) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; return p_QmCg->id; } static t_Error qm_new_fq(t_QmPortal *p_QmPortal, uint32_t fqid, uint32_t fqidOffset, uint32_t channel, uint32_t wqid, uint16_t count, uint32_t flags, t_QmFqrCongestionAvoidanceParams *p_CgParams, t_QmContextA *p_ContextA, t_QmContextB *p_ContextB, bool initParked, t_Handle h_QmFqr, struct qman_fq **p_Fqs) { struct qman_fq *p_Fq = NULL; struct qm_mcc_initfq fq_opts; uint32_t i; t_Error err = E_OK; int gap, tmp; uint32_t tmpA, tmpN, ta=0, tn=0, initFqFlag; ASSERT_COND(p_QmPortal); ASSERT_COND(count); for(i=0;icb.dqrr = p_QmPortal->f_DfltFrame; p_Fq->cb.ern = p_QmPortal->f_RejectedFrame; p_Fq->cb.dc_ern = cb_ern_dcErn; p_Fq->cb.fqs = cb_fqs; p_Fq->h_App = p_QmPortal->h_App; p_Fq->h_QmFqr = h_QmFqr; p_Fq->fqidOffset = fqidOffset; p_Fqs[i] = p_Fq; if ((err = qman_create_fq(p_QmPortal,(uint32_t)(fqid + i), 0, p_Fqs[i])) != E_OK) break; } if (err != E_OK) { for(i=0;ih_QmCg); /* CG OAC and FQ TD may not be configured at the same time. if both are required, than we configure CG first, and the FQ TD later - see below. */ fq_opts.fqd.cgid = QmCgGetCgId(p_CgParams->h_QmCg); fq_opts.we_mask |= QM_INITFQ_WE_CGID; if(p_CgParams->overheadAccountingLength) { fq_opts.we_mask |= QM_INITFQ_WE_OAC; fq_opts.we_mask &= ~QM_INITFQ_WE_TDTHRESH; fq_opts.fqd.td_thresh = (uint16_t)(QM_FQD_TD_THRESH_OAC_EN | p_CgParams->overheadAccountingLength); } } if((flags & QM_FQCTRL_TDE) && (!p_CgParams->overheadAccountingLength)) { ASSERT_COND(p_CgParams->fqTailDropThreshold); fq_opts.we_mask |= QM_INITFQ_WE_TDTHRESH; /* express thresh as ta*2^tn */ gap = (int)p_CgParams->fqTailDropThreshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<overheadAccountingLength)) initFqFlag = 0; else initFqFlag = (uint32_t)(initParked?0:QMAN_INITFQ_FLAG_SCHED); if ((err = qman_init_fq(p_QmPortal, p_Fqs[0], initFqFlag, &fq_opts)) != E_OK) { for(i=0;ioverheadAccountingLength)) { ASSERT_COND(p_CgParams->fqTailDropThreshold); fq_opts.we_mask = QM_INITFQ_WE_TDTHRESH; /* express thresh as ta*2^tn */ gap = (int)p_CgParams->fqTailDropThreshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->fqTailDropThreshold - tmpA*(1<fqid += i; } return err; } static t_Error qm_free_fq(t_QmPortal *p_QmPortal, struct qman_fq *p_Fq) { uint32_t flags=0; if (qman_retire_fq(p_QmPortal, p_Fq, &flags, false) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!")); if (flags & QMAN_FQ_STATE_CHANGING) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("fq %d currently in use, will be retired", p_Fq->fqid)); if (flags & QMAN_FQ_STATE_NE) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed;" \ "Frame Queue Not Empty, Need to dequeue")); if (qman_oos_fq(p_QmPortal, p_Fq) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_oos_fq() failed!")); qman_destroy_fq(p_Fq,0); return E_OK; } static void qman_disable_portal(t_QmPortal *p_QmPortal) { NCSW_PLOCK(p_QmPortal); if (!(p_QmPortal->disable_count++)) qm_dqrr_set_maxfill(p_QmPortal->p_LowQmPortal, 0); PUNLOCK(p_QmPortal); } /* quiesce SDQCR/VDQCR, then drain till h/w wraps up anything it * was doing (5ms is more than enough to ensure it's done). */ static void clean_dqrr_mr(t_QmPortal *p_QmPortal) { struct qm_dqrr_entry *p_Dq; struct qm_mr_entry *p_Msg; int idle = 0; qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, 0); qm_dqrr_vdqcr_set(p_QmPortal->p_LowQmPortal, 0); drain_loop: qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); qmPortalMrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); p_Msg = qm_mr_current(p_QmPortal->p_LowQmPortal); if (p_Dq) { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } if (p_Msg) { qm_mr_next(p_QmPortal->p_LowQmPortal); qmPortalMrCciConsume(p_QmPortal->p_LowQmPortal, 1); } if (!p_Dq && !p_Msg) { if (++idle < 5) { XX_UDelay(1000); goto drain_loop; } } else { idle = 0; goto drain_loop; } } static t_Error qman_create_portal(t_QmPortal *p_QmPortal, uint32_t flags, uint32_t sdqcrFlags, uint8_t dqrrSize) { const struct qm_portal_config *p_Config = &(p_QmPortal->p_LowQmPortal->config); int ret = 0; t_Error err; uint32_t isdr; if ((err = qm_eqcr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalEqcrCCE)) != E_OK) RETURN_ERROR(MINOR, err, ("Qman EQCR initialization failed\n")); if (qm_dqrr_init(p_QmPortal->p_LowQmPortal, sdqcrFlags ? e_QmPortalDequeuePushMode : e_QmPortalDequeuePullMode, e_QmPortalPVB, (flags & QMAN_PORTAL_FLAG_DCA) ? e_QmPortalDqrrDCA : e_QmPortalDqrrCCI, dqrrSize, (flags & QMAN_PORTAL_FLAG_RSTASH) ? 1 : 0, (flags & QMAN_PORTAL_FLAG_DSTASH) ? 1 : 0)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR initialization failed")); goto fail_dqrr; } if (qm_mr_init(p_QmPortal->p_LowQmPortal, e_QmPortalPVB, e_QmPortalMrCCI)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR initialization failed")); goto fail_mr; } if (qm_mc_init(p_QmPortal->p_LowQmPortal)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MC initialization failed")); goto fail_mc; } if (qm_isr_init(p_QmPortal->p_LowQmPortal)) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("ISR initialization failed")); goto fail_isr; } /* static interrupt-gating controls */ qm_dqrr_set_ithresh(p_QmPortal->p_LowQmPortal, 12); qm_mr_set_ithresh(p_QmPortal->p_LowQmPortal, 4); qm_isr_set_iperiod(p_QmPortal->p_LowQmPortal, 100); p_QmPortal->options = flags; isdr = 0xffffffff; qm_isr_status_clear(p_QmPortal->p_LowQmPortal, 0xffffffff); qm_isr_enable_write(p_QmPortal->p_LowQmPortal, DEFAULT_portalExceptions); qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); if (flags & QMAN_PORTAL_FLAG_IRQ) { XX_SetIntr(p_Config->irq, portal_isr, p_QmPortal); XX_EnableIntr(p_Config->irq); qm_isr_uninhibit(p_QmPortal->p_LowQmPortal); } else /* without IRQ, we can't block */ flags &= ~QMAN_PORTAL_FLAG_WAIT; /* Need EQCR to be empty before continuing */ isdr ^= QM_PIRQ_EQCI; qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); ret = qm_eqcr_get_fill(p_QmPortal->p_LowQmPortal); if (ret) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("EQCR unclean")); goto fail_eqcr_empty; } isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI); qm_isr_disable_write(p_QmPortal->p_LowQmPortal, isdr); if (qm_dqrr_current(p_QmPortal->p_LowQmPortal) != NULL) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("DQRR unclean")); goto fail_dqrr_mr_empty; } if (qm_mr_current(p_QmPortal->p_LowQmPortal) != NULL) { REPORT_ERROR(MAJOR, E_INVALID_STATE, ("MR unclean")); goto fail_dqrr_mr_empty; } qm_isr_disable_write(p_QmPortal->p_LowQmPortal, 0); qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags); return E_OK; fail_dqrr_mr_empty: fail_eqcr_empty: qm_isr_finish(p_QmPortal->p_LowQmPortal); fail_isr: qm_mc_finish(p_QmPortal->p_LowQmPortal); fail_mc: qm_mr_finish(p_QmPortal->p_LowQmPortal); fail_mr: qm_dqrr_finish(p_QmPortal->p_LowQmPortal); fail_dqrr: qm_eqcr_finish(p_QmPortal->p_LowQmPortal); return ERROR_CODE(E_INVALID_STATE); } static void qman_destroy_portal(t_QmPortal *p_QmPortal) { /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or * something related to QM_PIRQ_EQCI, this may need fixing. */ qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); if (p_QmPortal->options & QMAN_PORTAL_FLAG_IRQ) { XX_DisableIntr(p_QmPortal->p_LowQmPortal->config.irq); XX_FreeIntr(p_QmPortal->p_LowQmPortal->config.irq); } qm_isr_finish(p_QmPortal->p_LowQmPortal); qm_mc_finish(p_QmPortal->p_LowQmPortal); qm_mr_finish(p_QmPortal->p_LowQmPortal); qm_dqrr_finish(p_QmPortal->p_LowQmPortal); qm_eqcr_finish(p_QmPortal->p_LowQmPortal); } static inline struct qm_eqcr_entry *try_eq_start(t_QmPortal *p_QmPortal) { struct qm_eqcr_entry *p_Eq; uint8_t avail; avail = qm_eqcr_get_avail(p_QmPortal->p_LowQmPortal); if (avail == EQCR_THRESH) qmPortalEqcrCcePrefetch(p_QmPortal->p_LowQmPortal); else if (avail < EQCR_THRESH) qmPortalEqcrCceUpdate(p_QmPortal->p_LowQmPortal); p_Eq = qm_eqcr_start(p_QmPortal->p_LowQmPortal); return p_Eq; } static t_Error qman_orp_update(t_QmPortal *p_QmPortal, uint32_t orpId, uint16_t orpSeqnum, uint32_t flags) { struct qm_eqcr_entry *p_Eq; NCSW_PLOCK(p_QmPortal); p_Eq = try_eq_start(p_QmPortal); if (!p_Eq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } if (flags & QMAN_ENQUEUE_FLAG_NESN) orpSeqnum |= QM_EQCR_SEQNUM_NESN; else /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */ orpSeqnum &= ~QM_EQCR_SEQNUM_NESN; p_Eq->seqnum = orpSeqnum; p_Eq->orp = orpId; qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)QM_EQCR_VERB_ORP); PUNLOCK(p_QmPortal); return E_OK; } static __inline__ t_Error CheckStashParams(t_QmFqrParams *p_QmFqrParams) { ASSERT_COND(p_QmFqrParams); if (p_QmFqrParams->stashingParams.frameAnnotationSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Annotation Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.frameDataSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Data Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.fqContextSize > QM_CONTEXTA_MAX_STASH_SIZE) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Frame Context Size Exceeded Max Stash Size(%d)", QM_CONTEXTA_MAX_STASH_SIZE)); if (p_QmFqrParams->stashingParams.fqContextSize) { if (!p_QmFqrParams->stashingParams.fqContextAddr) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be givven")); if (!IS_ALIGNED(p_QmFqrParams->stashingParams.fqContextAddr, CACHELINE_SIZE)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address Must be aligned to %d", CACHELINE_SIZE)); if (p_QmFqrParams->stashingParams.fqContextAddr & 0xffffff0000000000LL) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("FQ Context Address May be up to 40 bit")); } return E_OK; } static t_Error QmPortalRegisterCg(t_Handle h_QmPortal, t_Handle h_QmCg, uint8_t cgId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; /* cgrs[0] is the mask of registered CG's*/ if(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32))) RETURN_ERROR(MINOR, E_BUSY, ("CG already used")); p_QmPortal->cgrs[0].q.__state[cgId/32] |= 0x80000000 >> (cgId % 32); p_QmPortal->cgsHandles[cgId] = h_QmCg; return E_OK; } static t_Error QmPortalUnregisterCg(t_Handle h_QmPortal, uint8_t cgId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; /* cgrs[0] is the mask of registered CG's*/ if(!(p_QmPortal->cgrs[0].q.__state[cgId/32] & (0x80000000 >> (cgId % 32)))) RETURN_ERROR(MINOR, E_BUSY, ("CG is not in use")); p_QmPortal->cgrs[0].q.__state[cgId/32] &= ~0x80000000 >> (cgId % 32); p_QmPortal->cgsHandles[cgId] = NULL; return E_OK; } static e_DpaaSwPortal QmPortalGetSwPortalId(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; return (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu; } static t_Error CalcWredCurve(t_QmCgWredCurve *p_WredCurve, uint32_t *p_CurveWord) { uint32_t maxP, roundDown, roundUp, tmpA, tmpN; uint32_t ma=0, mn=0, slope, sa=0, sn=0, pn; int pres = 1000; int gap, tmp; /* TODO - change maxTh to uint64_t? if(p_WredCurve->maxTh > (1<<39)) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh is not in range"));*/ /* express maxTh as ma*2^mn */ gap = (int)p_WredCurve->maxTh; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_WredCurve->maxTh - tmpA*(1<maxTh = ma*(1<maxTh <= p_WredCurve->minTh) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("maxTh must be larger than minTh")); if(p_WredCurve->probabilityDenominator > 64) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("probabilityDenominator mustn't be 1-64")); /* first we translate from Cisco probabilityDenominator to 256 fixed denominator, result must be divisible by 4. */ /* we multiply by a fixed value to get better accuracy (without using floating point) */ maxP = (uint32_t)(256*1000/p_WredCurve->probabilityDenominator); if (maxP % 4*pres) { roundDown = maxP + (maxP % (4*pres)); roundUp = roundDown + 4*pres; if((roundUp - maxP) > (maxP - roundDown)) maxP = roundDown; else maxP = roundUp; } maxP = maxP/pres; ASSERT_COND(maxP <= 256); pn = (uint8_t)(maxP/4 - 1); if(maxP >= (p_WredCurve->maxTh - p_WredCurve->minTh)) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Due to probabilityDenominator selected, maxTh-minTh must be larger than %d", maxP)); pres = 1000000; slope = maxP*pres/(p_WredCurve->maxTh - p_WredCurve->minTh); /* express slope as sa/2^sn */ gap = (int)slope; for (tmpA=(uint32_t)(64*pres) ; tmpA<128*pres; tmpA += pres ) for (tmpN=7 ; tmpN<64; tmpN++ ) { tmp = ABS((int)(slope - tmpA/(1<=64); - sn = sn; ASSERT_COND(sn<64 && sn>=7); *p_CurveWord = ((ma << 24) | (mn << 19) | (sa << 12) | (sn << 6) | pn); return E_OK; } static t_Error QmPortalPullFrame(t_Handle h_QmPortal, uint32_t pdqcr, t_DpaaFD *p_Frame) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; int prefetch; uint32_t *p_Dst, *p_Src; ASSERT_COND(p_QmPortal); ASSERT_COND(p_Frame); SANITY_CHECK_RETURN_ERROR(p_QmPortal->pullMode, E_INVALID_STATE); NCSW_PLOCK(p_QmPortal); qm_dqrr_pdqcr_set(p_QmPortal->p_LowQmPortal, pdqcr); mb(); while (qm_dqrr_pdqcr_get(p_QmPortal->p_LowQmPortal)) ; prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); while(TRUE) { if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) continue; p_Fq = ptr_from_aligned_int(p_Dq->contextB); ASSERT_COND(p_Dq->fqid); p_Dst = (uint32_t *)p_Frame; p_Src = (uint32_t *)&p_Dq->fd; p_Dst[0] = p_Src[0]; p_Dst[1] = p_Src[1]; p_Dst[2] = p_Src[2]; p_Dst[3] = p_Src[3]; if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, false); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } break; } PUNLOCK(p_QmPortal); if (!(p_Dq->stat & QM_DQRR_STAT_FD_VALID)) return ERROR_CODE(E_EMPTY); return E_OK; } /****************************************/ /* API Init unit functions */ /****************************************/ t_Handle QM_PORTAL_Config(t_QmPortalParam *p_QmPortalParam) { t_QmPortal *p_QmPortal; uint32_t i; SANITY_CHECK_RETURN_VALUE(p_QmPortalParam, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_QmPortalParam->swPortalId < DPAA_MAX_NUM_OF_SW_PORTALS, E_INVALID_VALUE, 0); p_QmPortal = (t_QmPortal *)XX_Malloc(sizeof(t_QmPortal)); if (!p_QmPortal) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal obj!!!")); return NULL; } memset(p_QmPortal, 0, sizeof(t_QmPortal)); p_QmPortal->p_LowQmPortal = (struct qm_portal *)XX_Malloc(sizeof(struct qm_portal)); if (!p_QmPortal->p_LowQmPortal) { XX_Free(p_QmPortal); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Low qm p_QmPortal obj!!!")); return NULL; } memset(p_QmPortal->p_LowQmPortal, 0, sizeof(struct qm_portal)); p_QmPortal->p_QmPortalDriverParams = (t_QmPortalDriverParams *)XX_Malloc(sizeof(t_QmPortalDriverParams)); if (!p_QmPortal->p_QmPortalDriverParams) { XX_Free(p_QmPortal->p_LowQmPortal); XX_Free(p_QmPortal); REPORT_ERROR(MAJOR, E_NO_MEMORY, ("Qm Portal driver parameters")); return NULL; } memset(p_QmPortal->p_QmPortalDriverParams, 0, sizeof(t_QmPortalDriverParams)); p_QmPortal->p_LowQmPortal->addr.addr_ce = UINT_TO_PTR(p_QmPortalParam->ceBaseAddress); p_QmPortal->p_LowQmPortal->addr.addr_ci = UINT_TO_PTR(p_QmPortalParam->ciBaseAddress); p_QmPortal->p_LowQmPortal->config.irq = p_QmPortalParam->irq; p_QmPortal->p_LowQmPortal->config.bound = 0; p_QmPortal->p_LowQmPortal->config.cpu = (int)p_QmPortalParam->swPortalId; p_QmPortal->p_LowQmPortal->config.channel = (e_QmFQChannel)(e_QM_FQ_CHANNEL_SWPORTAL0 + p_QmPortalParam->swPortalId); p_QmPortal->p_LowQmPortal->bind_lock = XX_InitSpinlock(); p_QmPortal->h_Qm = p_QmPortalParam->h_Qm; p_QmPortal->f_DfltFrame = p_QmPortalParam->f_DfltFrame; p_QmPortal->f_RejectedFrame = p_QmPortalParam->f_RejectedFrame; p_QmPortal->h_App = p_QmPortalParam->h_App; p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset = p_QmPortalParam->fdLiodnOffset; p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = DEFAULT_dequeueDcaMode; p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames = DEFAULT_dequeueUpToThreeFrames; p_QmPortal->p_QmPortalDriverParams->commandType = DEFAULT_dequeueCommandType; p_QmPortal->p_QmPortalDriverParams->userToken = DEFAULT_dequeueUserToken; p_QmPortal->p_QmPortalDriverParams->specifiedWq = DEFAULT_dequeueSpecifiedWq; p_QmPortal->p_QmPortalDriverParams->dedicatedChannel = DEFAULT_dequeueDedicatedChannel; p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels = DEFAULT_dequeueDedicatedChannelHasPrecedenceOverPoolChannels; p_QmPortal->p_QmPortalDriverParams->poolChannelId = DEFAULT_dequeuePoolChannelId; p_QmPortal->p_QmPortalDriverParams->wqId = DEFAULT_dequeueWqId; for (i=0;ip_QmPortalDriverParams->poolChannels[i] = FALSE; p_QmPortal->p_QmPortalDriverParams->dqrrSize = DEFAULT_dqrrSize; p_QmPortal->p_QmPortalDriverParams->pullMode = DEFAULT_pullMode; return p_QmPortal; } t_Error QM_PORTAL_Init(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; uint32_t i, flags=0, sdqcrFlags=0; t_Error err; t_QmInterModulePortalInitParams qmParams; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE); memset(&qmParams, 0, sizeof(qmParams)); qmParams.portalId = (uint8_t)p_QmPortal->p_LowQmPortal->config.cpu; qmParams.liodn = p_QmPortal->p_QmPortalDriverParams->fdLiodnOffset; qmParams.dqrrLiodn = p_QmPortal->p_QmPortalDriverParams->dqrrLiodn; qmParams.fdFqLiodn = p_QmPortal->p_QmPortalDriverParams->fdFqLiodn; qmParams.stashDestQueue = p_QmPortal->p_QmPortalDriverParams->stashDestQueue; if ((err = QmGetSetPortalParams(p_QmPortal->h_Qm, &qmParams)) != E_OK) RETURN_ERROR(MAJOR, err, NO_MSG); flags = (uint32_t)(((p_QmPortal->p_LowQmPortal->config.irq == NO_IRQ) ? 0 : (QMAN_PORTAL_FLAG_IRQ | QMAN_PORTAL_FLAG_IRQ_FAST | QMAN_PORTAL_FLAG_IRQ_SLOW))); flags |= ((p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode) ? QMAN_PORTAL_FLAG_DCA : 0); flags |= (p_QmPortal->p_QmPortalDriverParams->dqrr)?QMAN_PORTAL_FLAG_RSTASH:0; flags |= (p_QmPortal->p_QmPortalDriverParams->fdFq)?QMAN_PORTAL_FLAG_DSTASH:0; p_QmPortal->pullMode = p_QmPortal->p_QmPortalDriverParams->pullMode; if (!p_QmPortal->pullMode) { sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dequeueUpToThreeFrames) ? QM_SDQCR_COUNT_UPTO3 : QM_SDQCR_COUNT_EXACT1; sdqcrFlags |= QM_SDQCR_TOKEN_SET(p_QmPortal->p_QmPortalDriverParams->userToken); sdqcrFlags |= QM_SDQCR_TYPE_SET(p_QmPortal->p_QmPortalDriverParams->commandType); if (!p_QmPortal->p_QmPortalDriverParams->specifiedWq) { /* sdqcrFlags |= QM_SDQCR_SOURCE_CHANNELS;*/ /* removed as the macro is '0' */ sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannelHasPrecedenceOverPoolChannels) ? QM_SDQCR_DEDICATED_PRECEDENCE : 0; sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_CHANNELS_DEDICATED : 0; for (i=0;ip_QmPortalDriverParams->poolChannels[i]) ? QM_SDQCR_CHANNELS_POOL(i+1) : 0); } else { sdqcrFlags |= QM_SDQCR_SOURCE_SPECIFICWQ; sdqcrFlags |= (p_QmPortal->p_QmPortalDriverParams->dedicatedChannel) ? QM_SDQCR_SPECIFICWQ_DEDICATED : QM_SDQCR_SPECIFICWQ_POOL(p_QmPortal->p_QmPortalDriverParams->poolChannelId); sdqcrFlags |= QM_SDQCR_SPECIFICWQ_WQ(p_QmPortal->p_QmPortalDriverParams->wqId); } } if ((flags & QMAN_PORTAL_FLAG_RSTASH) && (flags & QMAN_PORTAL_FLAG_DCA)) p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingDcaOptimized; else if ((flags & QMAN_PORTAL_FLAG_RSTASH) && !(flags & QMAN_PORTAL_FLAG_DCA)) p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRingOptimized; else p_QmPortal->f_LoopDequeueRingCB = LoopDequeueRing; if ((!p_QmPortal->f_RejectedFrame) || (!p_QmPortal->f_DfltFrame)) RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("f_RejectedFrame or f_DfltFrame callback not provided")); p_QmPortal->p_NullCB = (struct qman_fq_cb *)XX_Malloc(sizeof(struct qman_fq_cb)); if (!p_QmPortal->p_NullCB) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("FQ Null CB obj!!!")); memset(p_QmPortal->p_NullCB, 0, sizeof(struct qman_fq_cb)); p_QmPortal->p_NullCB->dqrr = p_QmPortal->f_DfltFrame; p_QmPortal->p_NullCB->ern = p_QmPortal->f_RejectedFrame; p_QmPortal->p_NullCB->dc_ern = p_QmPortal->p_NullCB->fqs = null_cb_mr; if (qman_create_portal(p_QmPortal, flags, sdqcrFlags, p_QmPortal->p_QmPortalDriverParams->dqrrSize) != E_OK) { RETURN_ERROR(MAJOR, E_NO_MEMORY, ("create portal failed")); } QmSetPortalHandle(p_QmPortal->h_Qm, (t_Handle)p_QmPortal, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu); XX_Free(p_QmPortal->p_QmPortalDriverParams); p_QmPortal->p_QmPortalDriverParams = NULL; DBG(TRACE, ("Qman-Portal %d @ %p:%p", p_QmPortal->p_LowQmPortal->config.cpu, p_QmPortal->p_LowQmPortal->addr.addr_ce, p_QmPortal->p_LowQmPortal->addr.addr_ci )); DBG(TRACE, ("Qman-Portal %d phys @ 0x%016llx:0x%016llx", p_QmPortal->p_LowQmPortal->config.cpu, (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ce), (uint64_t)XX_VirtToPhys(p_QmPortal->p_LowQmPortal->addr.addr_ci) )); return E_OK; } t_Error QM_PORTAL_Free(t_Handle h_QmPortal) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; if (!p_QmPortal) return ERROR_CODE(E_INVALID_HANDLE); ASSERT_COND(p_QmPortal->p_LowQmPortal); QmSetPortalHandle(p_QmPortal->h_Qm, NULL, (e_DpaaSwPortal)p_QmPortal->p_LowQmPortal->config.cpu); qman_destroy_portal(p_QmPortal); if (p_QmPortal->p_NullCB) XX_Free(p_QmPortal->p_NullCB); if (p_QmPortal->p_LowQmPortal->bind_lock) XX_FreeSpinlock(p_QmPortal->p_LowQmPortal->bind_lock); if(p_QmPortal->p_QmPortalDriverParams) XX_Free(p_QmPortal->p_QmPortalDriverParams); XX_Free(p_QmPortal->p_LowQmPortal); XX_Free(p_QmPortal); return E_OK; } t_Error QM_PORTAL_ConfigDcaMode(t_Handle h_QmPortal, bool enable) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_INVALID_HANDLE); p_QmPortal->p_QmPortalDriverParams->dequeueDcaMode = enable; return E_OK; } t_Error QM_PORTAL_ConfigStash(t_Handle h_QmPortal, t_QmPortalStashParam *p_StashParams) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR(p_StashParams, E_NULL_POINTER); p_QmPortal->p_QmPortalDriverParams->stashDestQueue = p_StashParams->stashDestQueue; p_QmPortal->p_QmPortalDriverParams->dqrrLiodn = p_StashParams->dqrrLiodn; p_QmPortal->p_QmPortalDriverParams->fdFqLiodn = p_StashParams->fdFqLiodn; p_QmPortal->p_QmPortalDriverParams->eqcr = p_StashParams->eqcr; p_QmPortal->p_QmPortalDriverParams->eqcrHighPri = p_StashParams->eqcrHighPri; p_QmPortal->p_QmPortalDriverParams->dqrr = p_StashParams->dqrr; p_QmPortal->p_QmPortalDriverParams->dqrrHighPri = p_StashParams->dqrrHighPri; p_QmPortal->p_QmPortalDriverParams->fdFq = p_StashParams->fdFq; p_QmPortal->p_QmPortalDriverParams->fdFqHighPri = p_StashParams->fdFqHighPri; p_QmPortal->p_QmPortalDriverParams->fdFqDrop = p_StashParams->fdFqDrop; return E_OK; } t_Error QM_PORTAL_ConfigPullMode(t_Handle h_QmPortal, bool pullMode) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_QmPortal->p_QmPortalDriverParams, E_NULL_POINTER); p_QmPortal->p_QmPortalDriverParams->pullMode = pullMode; return E_OK; } t_Error QM_PORTAL_AddPoolChannel(t_Handle h_QmPortal, uint8_t poolChannelId) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; uint32_t sdqcrFlags; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((poolChannelId < QM_MAX_NUM_OF_POOL_CHANNELS), E_INVALID_VALUE); sdqcrFlags = qm_dqrr_sdqcr_get(p_QmPortal->p_LowQmPortal); sdqcrFlags |= QM_SDQCR_CHANNELS_POOL(poolChannelId+1); qm_dqrr_sdqcr_set(p_QmPortal->p_LowQmPortal, sdqcrFlags); return E_OK; } t_Error QM_PORTAL_Poll(t_Handle h_QmPortal, e_QmPortalPollSource source) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); NCSW_PLOCK(p_QmPortal); if ((source == e_QM_PORTAL_POLL_SOURCE_CONTROL_FRAMES) || (source == e_QM_PORTAL_POLL_SOURCE_BOTH)) { uint32_t is = qm_isr_status_read(p_QmPortal->p_LowQmPortal); uint32_t active = LoopMessageRing(p_QmPortal, is); if (active) qm_isr_status_clear(p_QmPortal->p_LowQmPortal, active); } if ((source == e_QM_PORTAL_POLL_SOURCE_DATA_FRAMES) || (source == e_QM_PORTAL_POLL_SOURCE_BOTH)) p_QmPortal->f_LoopDequeueRingCB((t_Handle)p_QmPortal); PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_PORTAL_PollFrame(t_Handle h_QmPortal, t_QmPortalFrameInfo *p_frameInfo) { t_QmPortal *p_QmPortal = (t_QmPortal *)h_QmPortal; struct qm_dqrr_entry *p_Dq; struct qman_fq *p_Fq; int prefetch; SANITY_CHECK_RETURN_ERROR(p_QmPortal, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR(p_frameInfo, E_NULL_POINTER); NCSW_PLOCK(p_QmPortal); prefetch = !(p_QmPortal->options & QMAN_PORTAL_FLAG_RSTASH); if (prefetch) qmPortalDqrrPvbPrefetch(p_QmPortal->p_LowQmPortal); qmPortalDqrrPvbUpdate(p_QmPortal->p_LowQmPortal); p_Dq = qm_dqrr_current(p_QmPortal->p_LowQmPortal); if (!p_Dq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_EMPTY); } p_Fq = ptr_from_aligned_int(p_Dq->contextB); ASSERT_COND(p_Dq->fqid); if (p_Fq) { p_frameInfo->h_App = p_Fq->h_App; p_frameInfo->h_QmFqr = p_Fq->h_QmFqr; p_frameInfo->fqidOffset = p_Fq->fqidOffset; memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD)); } else { p_frameInfo->h_App = p_QmPortal->h_App; p_frameInfo->h_QmFqr = NULL; p_frameInfo->fqidOffset = p_Dq->fqid; memcpy((void*)&p_frameInfo->frame, (void*)&p_Dq->fd, sizeof(t_DpaaFD)); } if (p_QmPortal->options & QMAN_PORTAL_FLAG_DCA) { qmPortalDqrrDcaConsume1ptr(p_QmPortal->p_LowQmPortal, p_Dq, false); qm_dqrr_next(p_QmPortal->p_LowQmPortal); } else { qm_dqrr_next(p_QmPortal->p_LowQmPortal); qmPortalDqrrCciConsume(p_QmPortal->p_LowQmPortal, 1); } PUNLOCK(p_QmPortal); return E_OK; } t_Handle QM_FQR_Create(t_QmFqrParams *p_QmFqrParams) { t_QmFqr *p_QmFqr; uint32_t i, flags = 0; u_QmFqdContextA cnxtA; SANITY_CHECK_RETURN_VALUE(p_QmFqrParams, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_QmFqrParams->h_Qm, E_INVALID_HANDLE, NULL); if (p_QmFqrParams->shadowMode && (!p_QmFqrParams->useForce || p_QmFqrParams->numOfFqids != 1)) { REPORT_ERROR(MAJOR, E_CONFLICT, ("shadowMode must be use with useForce and numOfFqids==1!!!")); return NULL; } p_QmFqr = (t_QmFqr *)XX_MallocSmart(sizeof(t_QmFqr), 0, 64); if (!p_QmFqr) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQR obj!!!")); return NULL; } memset(p_QmFqr, 0, sizeof(t_QmFqr)); p_QmFqr->h_Qm = p_QmFqrParams->h_Qm; p_QmFqr->h_QmPortal = p_QmFqrParams->h_QmPortal; p_QmFqr->shadowMode = p_QmFqrParams->shadowMode; p_QmFqr->numOfFqids = (p_QmFqrParams->useForce && !p_QmFqrParams->numOfFqids) ? 1 : p_QmFqrParams->numOfFqids; if (!p_QmFqr->h_QmPortal) { p_QmFqr->h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_QmPortal, E_INVALID_HANDLE, NULL); } p_QmFqr->p_Fqs = (struct qman_fq **)XX_Malloc(sizeof(struct qman_fq *) * p_QmFqr->numOfFqids); if (!p_QmFqr->p_Fqs) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM FQs obj!!!")); QM_FQR_Free(p_QmFqr); return NULL; } memset(p_QmFqr->p_Fqs, 0, sizeof(struct qman_fq *) * p_QmFqr->numOfFqids); if (p_QmFqr->shadowMode) { struct qman_fq *p_Fq = NULL; p_QmFqr->fqidBase = p_QmFqrParams->qs.frcQ.fqid; p_Fq = (struct qman_fq *)XX_MallocSmart(sizeof(struct qman_fq), 0, 64); if (!p_Fq) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("FQ obj!!!")); QM_FQR_Free(p_QmFqr); return NULL; } memset(p_Fq, 0, sizeof(struct qman_fq)); p_Fq->cb.dqrr = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_DfltFrame; p_Fq->cb.ern = ((t_QmPortal*)p_QmFqr->h_QmPortal)->f_RejectedFrame; p_Fq->cb.dc_ern = cb_ern_dcErn; p_Fq->cb.fqs = cb_fqs; p_Fq->h_App = ((t_QmPortal*)p_QmFqr->h_QmPortal)->h_App; p_Fq->h_QmFqr = p_QmFqr; p_Fq->state = qman_fq_state_sched; p_Fq->fqid = p_QmFqr->fqidBase; p_QmFqr->p_Fqs[0] = p_Fq; } else { p_QmFqr->channel = p_QmFqrParams->channel; p_QmFqr->workQueue = p_QmFqrParams->wq; p_QmFqr->fqidBase = QmFqidGet(p_QmFqr->h_Qm, p_QmFqr->numOfFqids, p_QmFqrParams->qs.nonFrcQs.align, p_QmFqrParams->useForce, p_QmFqrParams->qs.frcQ.fqid); if (p_QmFqr->fqidBase == (uint32_t)ILLEGAL_BASE) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,("can't allocate a fqid")); QM_FQR_Free(p_QmFqr); return NULL; } if(p_QmFqrParams->congestionAvoidanceEnable && (p_QmFqrParams->congestionAvoidanceParams.h_QmCg == NULL) && (p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold == 0)) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,("NULL congestion group handle and no FQ Threshold")); QM_FQR_Free(p_QmFqr); return NULL; } if(p_QmFqrParams->congestionAvoidanceEnable) { if(p_QmFqrParams->congestionAvoidanceParams.h_QmCg) flags |= QM_FQCTRL_CGE; if(p_QmFqrParams->congestionAvoidanceParams.fqTailDropThreshold) flags |= QM_FQCTRL_TDE; } /* flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_ORP : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_CPCSTASH : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_FORCESFDR : 0; flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_AVOIDBLOCK : 0; */ flags |= (p_QmFqrParams->holdActive) ? QM_FQCTRL_HOLDACTIVE : 0; flags |= (p_QmFqrParams->preferInCache) ? QM_FQCTRL_LOCKINCACHE : 0; if (p_QmFqrParams->useContextAForStash) { if (CheckStashParams(p_QmFqrParams) != E_OK) { REPORT_ERROR(CRITICAL,E_INVALID_STATE,NO_MSG); QM_FQR_Free(p_QmFqr); return NULL; } memset(&cnxtA, 0, sizeof(cnxtA)); cnxtA.stashing.annotation_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameAnnotationSize, CACHELINE_SIZE); cnxtA.stashing.data_cl = DIV_CEIL(p_QmFqrParams->stashingParams.frameDataSize, CACHELINE_SIZE); cnxtA.stashing.context_cl = DIV_CEIL(p_QmFqrParams->stashingParams.fqContextSize, CACHELINE_SIZE); cnxtA.context_hi = (uint8_t)((p_QmFqrParams->stashingParams.fqContextAddr >> 32) & 0xff); cnxtA.context_lo = (uint32_t)(p_QmFqrParams->stashingParams.fqContextAddr); flags |= QM_FQCTRL_CTXASTASHING; } for(i=0;inumOfFqids;i++) if (qm_new_fq(p_QmFqr->h_QmPortal, p_QmFqr->fqidBase+i, i, p_QmFqr->channel, p_QmFqr->workQueue, 1/*p_QmFqr->numOfFqids*/, flags, (p_QmFqrParams->congestionAvoidanceEnable ? &p_QmFqrParams->congestionAvoidanceParams : NULL), p_QmFqrParams->useContextAForStash ? (t_QmContextA *)&cnxtA : p_QmFqrParams->p_ContextA, p_QmFqrParams->p_ContextB, p_QmFqrParams->initParked, p_QmFqr, &p_QmFqr->p_Fqs[i]) != E_OK) { QM_FQR_Free(p_QmFqr); return NULL; } } return p_QmFqr; } t_Error QM_FQR_Free(t_Handle h_QmFqr) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t i; if (!p_QmFqr) return ERROR_CODE(E_INVALID_HANDLE); if (p_QmFqr->p_Fqs) { for (i=0;inumOfFqids;i++) if (p_QmFqr->p_Fqs[i]) { if (!p_QmFqr->shadowMode) qm_free_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i]); XX_FreeSmart(p_QmFqr->p_Fqs[i]); } XX_Free(p_QmFqr->p_Fqs); } if (!p_QmFqr->shadowMode && p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_FreeSmart(p_QmFqr); return E_OK; } t_Error QM_FQR_FreeWDrain(t_Handle h_QmFqr, t_QmFqrDrainedCompletionCB *f_CompletionCB, bool deliverFrame, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t i; if (!p_QmFqr) return ERROR_CODE(E_INVALID_HANDLE); if (p_QmFqr->shadowMode) RETURN_ERROR(MAJOR, E_INVALID_OPERATION, ("QM_FQR_FreeWDrain can't be called to shadow FQR!!!. call QM_FQR_Free")); p_QmFqr->p_DrainedFqs = (bool *)XX_Malloc(sizeof(bool) * p_QmFqr->numOfFqids); if (!p_QmFqr->p_DrainedFqs) RETURN_ERROR(MAJOR, E_NO_MEMORY, ("QM Drained-FQs obj!!!. Try to Free without draining")); memset(p_QmFqr->p_DrainedFqs, 0, sizeof(bool) * p_QmFqr->numOfFqids); if (f_CompletionCB) { p_QmFqr->f_CompletionCB = f_CompletionCB; p_QmFqr->h_App = h_App; } if (deliverFrame) { if (!f_CallBack) { REPORT_ERROR(MAJOR, E_NULL_POINTER, ("f_CallBack must be given.")); XX_Free(p_QmFqr->p_DrainedFqs); return ERROR_CODE(E_NULL_POINTER); } QM_FQR_RegisterCB(p_QmFqr, f_CallBack, h_App); } else QM_FQR_RegisterCB(p_QmFqr, drainCB, h_App); for (i=0;inumOfFqids;i++) { if (qman_retire_fq(p_QmFqr->h_QmPortal, p_QmFqr->p_Fqs[i], 0, true) != E_OK) RETURN_ERROR(MAJOR, E_INVALID_STATE, ("qman_retire_fq() failed!")); if (p_QmFqr->p_Fqs[i]->flags & QMAN_FQ_STATE_CHANGING) DBG(INFO, ("fq %d currently in use, will be retired", p_QmFqr->p_Fqs[i]->fqid)); else drainRetiredFq(p_QmFqr->p_Fqs[i]); } if (!p_QmFqr->f_CompletionCB) { while(p_QmFqr->p_DrainedFqs) ; DBG(TRACE, ("QM-FQR with base %d completed", p_QmFqr->fqidBase)); XX_FreeSmart(p_QmFqr->p_Fqs); if (p_QmFqr->fqidBase) QmFqidPut(p_QmFqr->h_Qm, p_QmFqr->fqidBase); XX_FreeSmart(p_QmFqr); } return E_OK; } t_Error QM_FQR_RegisterCB(t_Handle h_QmFqr, t_QmReceivedFrameCallback *f_CallBack, t_Handle h_App) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; int i; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); for (i=0;inumOfFqids;i++) { p_QmFqr->p_Fqs[i]->cb.dqrr = f_CallBack; p_QmFqr->p_Fqs[i]->h_App = h_App; } return E_OK; } t_Error QM_FQR_Enqueue(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; t_QmPortal *p_QmPortal; struct qm_eqcr_entry *p_Eq; uint32_t *p_Dst, *p_Src; const struct qman_fq *p_Fq; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } p_QmPortal = (t_QmPortal *)h_QmPortal; p_Fq = p_QmFqr->p_Fqs[fqidOffset]; #ifdef QM_CHECKING if (p_Fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) RETURN_ERROR(MINOR, E_INVALID_VALUE, NO_MSG); if ((!(p_Fq->flags & QMAN_FQ_FLAG_NO_MODIFY)) && ((p_Fq->state == qman_fq_state_retired) || (p_Fq->state == qman_fq_state_oos))) return ERROR_CODE(E_BUSY); #endif /* QM_CHECKING */ NCSW_PLOCK(p_QmPortal); p_Eq = try_eq_start(p_QmPortal); if (!p_Eq) { PUNLOCK(p_QmPortal); return ERROR_CODE(E_BUSY); } p_Eq->fqid = p_Fq->fqid; p_Eq->tag = aligned_int_from_ptr(p_Fq); /* gcc does a dreadful job of the following; * eq->fd = *fd; * It causes the entire function to save/restore a wider range of * registers, and comes up with instruction-waste galore. This will do * until we can rework the function for better code-generation. */ p_Dst = (uint32_t *)&p_Eq->fd; p_Src = (uint32_t *)p_Frame; p_Dst[0] = p_Src[0]; p_Dst[1] = p_Src[1]; p_Dst[2] = p_Src[2]; p_Dst[3] = p_Src[3]; qmPortalEqcrPvbCommit(p_QmPortal->p_LowQmPortal, (uint8_t)(QM_EQCR_VERB_CMD_ENQUEUE/* | (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT))*/)); PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_FQR_PullFrame(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, t_DpaaFD *p_Frame) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; uint32_t pdqcr = 0; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR(p_Frame, E_NULL_POINTER); SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_oos) || (p_QmFqr->p_Fqs[fqidOffset]->state == qman_fq_state_parked), E_INVALID_STATE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } pdqcr |= QM_PDQCR_MODE_UNSCHEDULED; pdqcr |= QM_PDQCR_FQID(p_QmFqr->p_Fqs[fqidOffset]->fqid); return QmPortalPullFrame(h_QmPortal, pdqcr, p_Frame); } t_Error QM_FQR_Resume(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); if (!h_QmPortal) { SANITY_CHECK_RETURN_ERROR(p_QmFqr->h_Qm, E_INVALID_HANDLE); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_ERROR(h_QmPortal, E_INVALID_HANDLE); } return qman_schedule_fq(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset]); } t_Error QM_FQR_Suspend(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_ERROR(p_QmFqr, E_INVALID_HANDLE); SANITY_CHECK_RETURN_ERROR((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE); SANITY_CHECK_RETURN_ERROR((p_QmFqr->p_Fqs[fqidOffset]->flags & QM_FQCTRL_HOLDACTIVE), E_INVALID_STATE); UNUSED(h_QmPortal); p_QmFqr->p_Fqs[fqidOffset]->state = qman_fq_state_waiting_parked; return E_OK; } uint32_t QM_FQR_GetFqid(t_Handle h_QmFqr) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0); return p_QmFqr->fqidBase; } uint32_t QM_FQR_GetCounter(t_Handle h_QmFqr, t_Handle h_QmPortal, uint32_t fqidOffset, e_QmFqrCounters counter) { t_QmFqr *p_QmFqr = (t_QmFqr *)h_QmFqr; struct qm_mcr_queryfq_np queryfq_np; SANITY_CHECK_RETURN_VALUE(p_QmFqr, E_INVALID_HANDLE, 0); SANITY_CHECK_RETURN_VALUE((fqidOffset < p_QmFqr->numOfFqids), E_INVALID_VALUE, 0); if (!h_QmPortal) { SANITY_CHECK_RETURN_VALUE(p_QmFqr->h_Qm, E_INVALID_HANDLE, 0); h_QmPortal = QmGetPortalHandle(p_QmFqr->h_Qm); SANITY_CHECK_RETURN_VALUE(h_QmPortal, E_INVALID_HANDLE, 0); } if (qman_query_fq_np(h_QmPortal, p_QmFqr->p_Fqs[fqidOffset], &queryfq_np) != E_OK) return 0; switch (counter) { case e_QM_FQR_COUNTERS_FRAME : return queryfq_np.frm_cnt; case e_QM_FQR_COUNTERS_BYTE : return queryfq_np.byte_cnt; default : break; } /* should never get here */ ASSERT_COND(FALSE); return 0; } t_Handle QM_CG_Create(t_QmCgParams *p_CgParams) { t_QmCg *p_QmCg; t_QmPortal *p_QmPortal; t_Error err; uint32_t wredParams; uint32_t tmpA, tmpN, ta=0, tn=0; int gap, tmp; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; SANITY_CHECK_RETURN_VALUE(p_CgParams, E_INVALID_HANDLE, NULL); SANITY_CHECK_RETURN_VALUE(p_CgParams->h_Qm, E_INVALID_HANDLE, NULL); if(p_CgParams->notifyDcPortal && ((p_CgParams->dcPortalId == e_DPAA_DCPORTAL2) || (p_CgParams->dcPortalId == e_DPAA_DCPORTAL3))) { REPORT_ERROR(MAJOR, E_INVALID_VALUE, ("notifyDcPortal is invalid for this DC Portal")); return NULL; } if (!p_CgParams->h_QmPortal) { p_QmPortal = QmGetPortalHandle(p_CgParams->h_Qm); SANITY_CHECK_RETURN_VALUE(p_QmPortal, E_INVALID_STATE, NULL); } else p_QmPortal = p_CgParams->h_QmPortal; p_QmCg = (t_QmCg *)XX_Malloc(sizeof(t_QmCg)); if (!p_QmCg) { REPORT_ERROR(MAJOR, E_NO_MEMORY, ("QM CG obj!!!")); return NULL; } memset(p_QmCg, 0, sizeof(t_QmCg)); /* build CG struct */ p_QmCg->h_Qm = p_CgParams->h_Qm; p_QmCg->h_QmPortal = p_QmPortal; p_QmCg->h_App = p_CgParams->h_App; err = QmGetCgId(p_CgParams->h_Qm, &p_QmCg->id); if (err) { XX_Free(p_QmCg); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmGetCgId failed")); return NULL; } NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; err = QmPortalRegisterCg(p_QmPortal, p_QmCg, p_QmCg->id); if (err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalRegisterCg failed")); return NULL; } /* Build CGR command */ { #ifdef QM_CGS_NO_FRAME_MODE t_QmRevisionInfo revInfo; QmGetRevision(p_QmCg->h_Qm, &revInfo); if (!((revInfo.majorRev == 1) && (revInfo.minorRev == 0))) #endif /* QM_CGS_NO_FRAME_MODE */ if (p_CgParams->frameCount) { p_Mcc->initcgr.we_mask |= QM_CGR_WE_MODE; p_Mcc->initcgr.cgr.frame_mode = QM_CGR_EN; } } if (p_CgParams->wredEnable) { if (p_CgParams->wredParams.enableGreen) { err = CalcWredCurve(&p_CgParams->wredParams.greenCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G; p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams; } if (p_CgParams->wredParams.enableYellow) { err = CalcWredCurve(&p_CgParams->wredParams.yellowCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y; p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams; } if (p_CgParams->wredParams.enableRed) { err = CalcWredCurve(&p_CgParams->wredParams.redCurve, &wredParams); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MAJOR, err, NO_MSG); return NULL; } p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R; p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams; } } if (p_CgParams->tailDropEnable) { if (!p_CgParams->threshold) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("tailDropThreshold must be configured if tailDropEnable ")); return NULL; } p_Mcc->initcgr.cgr.cstd_en = QM_CGR_EN; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSTD_EN; } if (p_CgParams->threshold) { p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES; p_QmCg->f_Exception = p_CgParams->f_Exception; if (p_QmCg->f_Exception || p_CgParams->notifyDcPortal) { p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CSCN_EN | QM_CGR_WE_CSCN_TARG; /* if SW - set target, if HW - if FM, set HW target, otherwize, set SW target */ p_Mcc->initcgr.cgr.cscn_targ = 0; if (p_QmCg->f_Exception) p_Mcc->initcgr.cgr.cscn_targ = (uint32_t)QM_CGR_TARGET_SWP(QmPortalGetSwPortalId(p_QmCg->h_QmPortal)); if (p_CgParams->notifyDcPortal) p_Mcc->initcgr.cgr.cscn_targ |= (uint32_t)QM_CGR_TARGET_DCP(p_CgParams->dcPortalId); } /* express thresh as ta*2^tn */ gap = (int)p_CgParams->threshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(p_CgParams->threshold - tmpA*(1<initcgr.cgr.cs_thres.TA = ta; p_Mcc->initcgr.cgr.cs_thres.Tn = tn; } else if(p_CgParams->f_Exception) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("No threshold configured, but f_Exception defined")); return NULL; } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_INITCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_INITCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); REPORT_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); return NULL; } PUNLOCK(p_QmPortal); return p_QmCg; } t_Error QM_CG_Free(t_Handle h_QmCg) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; t_Error err; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask = QM_CGR_WE_MASK; err = QmFreeCgId(p_QmCg->h_Qm, p_QmCg->id); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmFreeCgId failed")); } err = QmPortalUnregisterCg(p_QmCg->h_QmPortal, p_QmCg->id); if(err) { XX_Free(p_QmCg); PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_STATE, ("QmPortalUnregisterCg failed")); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); XX_Free(p_QmCg); return E_OK; } t_Error QM_CG_SetException(t_Handle h_QmCg, e_QmExceptions exception, bool enable) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; if (!p_QmCg->f_Exception) RETURN_ERROR(MINOR, E_INVALID_VALUE, ("Either threshold or exception callback was not configured.")); NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask = QM_CGR_WE_CSCN_EN; if(exception == e_QM_EX_CG_STATE_CHANGE) { if(enable) p_Mcc->initcgr.cgr.cscn_en = QM_CGR_EN; } else { PUNLOCK(p_QmPortal); RETURN_ERROR(MAJOR, E_INVALID_VALUE, ("Illegal exception")); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_CG_ModifyWredCurve(t_Handle h_QmCg, t_QmCgModifyWredParams *p_QmCgModifyParams) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; uint32_t wredParams; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; t_Error err = E_OK; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result))); } switch(p_QmCgModifyParams->color) { case(e_QM_CG_COLOR_GREEN): if(!p_Mcr->querycgr.cgr.wr_en_g) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for green")); } break; case(e_QM_CG_COLOR_YELLOW): if(!p_Mcr->querycgr.cgr.wr_en_y) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for yellow")); } break; case(e_QM_CG_COLOR_RED): if(!p_Mcr->querycgr.cgr.wr_en_r) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("WRED is not enabled for red")); } break; } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; switch(p_QmCgModifyParams->color) { case(e_QM_CG_COLOR_GREEN): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_G | QM_CGR_WE_WR_PARM_G; p_Mcc->initcgr.cgr.wr_en_g = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_g.word = wredParams; break; case(e_QM_CG_COLOR_YELLOW): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_Y | QM_CGR_WE_WR_PARM_Y; p_Mcc->initcgr.cgr.wr_en_y = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_y.word = wredParams; break; case(e_QM_CG_COLOR_RED): err = CalcWredCurve(&p_QmCgModifyParams->wredParams, &wredParams); p_Mcc->initcgr.we_mask |= QM_CGR_WE_WR_EN_R | QM_CGR_WE_WR_PARM_R; p_Mcc->initcgr.cgr.wr_en_r = QM_CGR_EN; p_Mcc->initcgr.cgr.wr_parm_r.word = wredParams; break; } if (err) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, err, NO_MSG); } qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } t_Error QM_CG_ModifyTailDropThreshold(t_Handle h_QmCg, uint32_t threshold) { t_QmCg *p_QmCg = (t_QmCg *)h_QmCg; struct qm_mc_command *p_Mcc; struct qm_mc_result *p_Mcr; t_QmPortal *p_QmPortal; uint32_t tmpA, tmpN, ta=0, tn=0; int gap, tmp; SANITY_CHECK_RETURN_ERROR(p_QmCg, E_INVALID_HANDLE); p_QmPortal = (t_QmPortal *)p_QmCg->h_QmPortal; NCSW_PLOCK(p_QmPortal); p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_QUERYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("QM_MCC_VERB_QUERYCGR failed: %s", mcr_result_str(p_Mcr->result))); } if(!p_Mcr->querycgr.cgr.cstd_en) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("Tail Drop is not enabled!")); } p_Mcc = qm_mc_start(p_QmPortal->p_LowQmPortal); p_Mcc->initcgr.cgid = p_QmCg->id; p_Mcc->initcgr.we_mask |= QM_CGR_WE_CS_THRES; /* express thresh as ta*2^tn */ gap = (int)threshold; for (tmpA=0 ; tmpA<256; tmpA++ ) for (tmpN=0 ; tmpN<32; tmpN++ ) { tmp = ABS((int)(threshold - tmpA*(1<initcgr.cgr.cs_thres.TA = ta; p_Mcc->initcgr.cgr.cs_thres.Tn = tn; qm_mc_commit(p_QmPortal->p_LowQmPortal, QM_MCC_VERB_MODIFYCGR); while (!(p_Mcr = qm_mc_result(p_QmPortal->p_LowQmPortal))) ; ASSERT_COND((p_Mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_MODIFYCGR); if (p_Mcr->result != QM_MCR_RESULT_OK) { PUNLOCK(p_QmPortal); RETURN_ERROR(MINOR, E_INVALID_STATE, ("INITCGR failed: %s", mcr_result_str(p_Mcr->result))); } PUNLOCK(p_QmPortal); return E_OK; } Index: head/sys/contrib/ncsw/inc/Peripherals/dpaa_ext.h =================================================================== --- head/sys/contrib/ncsw/inc/Peripherals/dpaa_ext.h (revision 351321) +++ head/sys/contrib/ncsw/inc/Peripherals/dpaa_ext.h (revision 351322) @@ -1,215 +1,219 @@ /* Copyright (c) 2008-2012 Freescale Semiconductor, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /**************************************************************************//** @File dpaa_ext.h @Description DPAA Application Programming Interface. *//***************************************************************************/ #ifndef __DPAA_EXT_H #define __DPAA_EXT_H #include "std_ext.h" #include "error_ext.h" /**************************************************************************//** @Group DPAA_grp Data Path Acceleration Architecture API @Description DPAA API functions, definitions and enums. @{ *//***************************************************************************/ #if defined(__MWERKS__) && !defined(__GNUC__) #pragma pack(push,1) #endif /* defined(__MWERKS__) && ... */ #include +#ifndef __BYTE_ORDER__ #define __BYTE_ORDER__ BYTE_ORDER +#endif +#ifndef __ORDER_BIG_ENDIAN__ #define __ORDER_BIG_ENDIAN__ BIG_ENDIAN +#endif /**************************************************************************//** @Description Frame descriptor *//***************************************************************************/ typedef _Packed struct t_DpaaFD { #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ volatile uint8_t liodn; volatile uint8_t bpid; volatile uint8_t elion; volatile uint8_t addrh; volatile uint32_t addrl; #else volatile uint32_t addrl; volatile uint8_t addrh; volatile uint8_t elion; volatile uint8_t bpid; volatile uint8_t liodn; #endif volatile uint32_t length; /**< Frame length */ volatile uint32_t status; /**< FD status */ } _PackedType t_DpaaFD; /**************************************************************************//** @Description enum for defining frame format *//***************************************************************************/ typedef enum e_DpaaFDFormatType { e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF = 0x0, /**< Simple frame Single buffer; Offset and small length (9b OFFSET, 20b LENGTH) */ e_DPAA_FD_FORMAT_TYPE_LONG_SBSF = 0x2, /**< Simple frame, single buffer; big length (29b LENGTH ,No OFFSET) */ e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF = 0x4, /**< Simple frame, Scatter Gather table; Offset and small length (9b OFFSET, 20b LENGTH) */ e_DPAA_FD_FORMAT_TYPE_LONG_MBSF = 0x6, /**< Simple frame, Scatter Gather table; big length (29b LENGTH ,No OFFSET) */ e_DPAA_FD_FORMAT_TYPE_COMPOUND = 0x1, /**< Compound Frame (29b CONGESTION-WEIGHT No LENGTH or OFFSET) */ e_DPAA_FD_FORMAT_TYPE_DUMMY } e_DpaaFDFormatType; /**************************************************************************//** @Collection Frame descriptor macros *//***************************************************************************/ #define DPAA_FD_DD_MASK 0xc0000000 /**< FD DD field mask */ #define DPAA_FD_PID_MASK 0x3f000000 /**< FD PID field mask */ #define DPAA_FD_ELIODN_MASK 0x0000f000 /**< FD ELIODN field mask */ #define DPAA_FD_BPID_MASK 0x00ff0000 /**< FD BPID field mask */ #define DPAA_FD_ADDRH_MASK 0x000000ff /**< FD ADDRH field mask */ #define DPAA_FD_ADDRL_MASK 0xffffffff /**< FD ADDRL field mask */ #define DPAA_FD_FORMAT_MASK 0xe0000000 /**< FD FORMAT field mask */ #define DPAA_FD_OFFSET_MASK 0x1ff00000 /**< FD OFFSET field mask */ #define DPAA_FD_LENGTH_MASK 0x000fffff /**< FD LENGTH field mask */ #define DPAA_FD_GET_ADDRH(fd) ((t_DpaaFD *)fd)->addrh /**< Macro to get FD ADDRH field */ #define DPAA_FD_GET_ADDRL(fd) ((t_DpaaFD *)fd)->addrl /**< Macro to get FD ADDRL field */ #define DPAA_FD_GET_PHYS_ADDR(fd) ((physAddress_t)(((uint64_t)DPAA_FD_GET_ADDRH(fd) << 32) | (uint64_t)DPAA_FD_GET_ADDRL(fd))) /**< Macro to get FD ADDR field */ #define DPAA_FD_GET_FORMAT(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_FORMAT_MASK) >> (31-2)) /**< Macro to get FD FORMAT field */ #define DPAA_FD_GET_OFFSET(fd) ((((t_DpaaFD *)fd)->length & DPAA_FD_OFFSET_MASK) >> (31-11)) /**< Macro to get FD OFFSET field */ #define DPAA_FD_GET_LENGTH(fd) (((t_DpaaFD *)fd)->length & DPAA_FD_LENGTH_MASK) /**< Macro to get FD LENGTH field */ #define DPAA_FD_GET_STATUS(fd) ((t_DpaaFD *)fd)->status /**< Macro to get FD STATUS field */ #define DPAA_FD_GET_ADDR(fd) XX_PhysToVirt(DPAA_FD_GET_PHYS_ADDR(fd)) /**< Macro to get FD ADDR (virtual) */ #define DPAA_FD_SET_ADDRH(fd,val) ((t_DpaaFD *)fd)->addrh = (val) /**< Macro to set FD ADDRH field */ #define DPAA_FD_SET_ADDRL(fd,val) ((t_DpaaFD *)fd)->addrl = (val) /**< Macro to set FD ADDRL field */ #define DPAA_FD_SET_ADDR(fd,val) \ do { \ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \ DPAA_FD_SET_ADDRH(fd, ((uint32_t)(physAddr >> 32))); \ DPAA_FD_SET_ADDRL(fd, (uint32_t)physAddr); \ } while (0) /**< Macro to set FD ADDR field */ #define DPAA_FD_SET_FORMAT(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_FORMAT_MASK) | (((val) << (31-2))& DPAA_FD_FORMAT_MASK))) /**< Macro to set FD FORMAT field */ #define DPAA_FD_SET_OFFSET(fd,val) (((t_DpaaFD *)fd)->length = ((((t_DpaaFD *)fd)->length & ~DPAA_FD_OFFSET_MASK) | (((val) << (31-11))& DPAA_FD_OFFSET_MASK) )) /**< Macro to set FD OFFSET field */ #define DPAA_FD_SET_LENGTH(fd,val) (((t_DpaaFD *)fd)->length = (((t_DpaaFD *)fd)->length & ~DPAA_FD_LENGTH_MASK) | ((val) & DPAA_FD_LENGTH_MASK)) /**< Macro to set FD LENGTH field */ #define DPAA_FD_SET_STATUS(fd,val) ((t_DpaaFD *)fd)->status = (val) /**< Macro to set FD STATUS field */ /* @} */ /**************************************************************************//** @Description Frame Scatter/Gather Table Entry *//***************************************************************************/ typedef _Packed struct t_DpaaSGTE { volatile uint32_t addrh; /**< Buffer Address high */ volatile uint32_t addrl; /**< Buffer Address low */ volatile uint32_t length; /**< Buffer length */ volatile uint32_t offset; /**< SGTE offset */ } _PackedType t_DpaaSGTE; #define DPAA_NUM_OF_SG_TABLE_ENTRY 16 /**************************************************************************//** @Description Frame Scatter/Gather Table *//***************************************************************************/ typedef _Packed struct t_DpaaSGT { t_DpaaSGTE tableEntry[DPAA_NUM_OF_SG_TABLE_ENTRY]; /**< Structure that holds information about a single S/G entry. */ } _PackedType t_DpaaSGT; /**************************************************************************//** @Description Compound Frame Table *//***************************************************************************/ typedef _Packed struct t_DpaaCompTbl { t_DpaaSGTE outputBuffInfo; /**< Structure that holds information about the compound-frame output buffer; NOTE: this may point to a S/G table */ t_DpaaSGTE inputBuffInfo; /**< Structure that holds information about the compound-frame input buffer; NOTE: this may point to a S/G table */ } _PackedType t_DpaaCompTbl; /**************************************************************************//** @Collection Frame Scatter/Gather Table Entry macros *//***************************************************************************/ #define DPAA_SGTE_ADDRH_MASK 0x000000ff /**< SGTE ADDRH field mask */ #define DPAA_SGTE_ADDRL_MASK 0xffffffff /**< SGTE ADDRL field mask */ #define DPAA_SGTE_E_MASK 0x80000000 /**< SGTE Extension field mask */ #define DPAA_SGTE_F_MASK 0x40000000 /**< SGTE Final field mask */ #define DPAA_SGTE_LENGTH_MASK 0x3fffffff /**< SGTE LENGTH field mask */ #define DPAA_SGTE_BPID_MASK 0x00ff0000 /**< SGTE BPID field mask */ #define DPAA_SGTE_OFFSET_MASK 0x00001fff /**< SGTE OFFSET field mask */ #define DPAA_SGTE_GET_ADDRH(sgte) (((t_DpaaSGTE *)sgte)->addrh & DPAA_SGTE_ADDRH_MASK) /**< Macro to get SGTE ADDRH field */ #define DPAA_SGTE_GET_ADDRL(sgte) ((t_DpaaSGTE *)sgte)->addrl /**< Macro to get SGTE ADDRL field */ #define DPAA_SGTE_GET_PHYS_ADDR(sgte) ((physAddress_t)(((uint64_t)DPAA_SGTE_GET_ADDRH(sgte) << 32) | (uint64_t)DPAA_SGTE_GET_ADDRL(sgte))) /**< Macro to get FD ADDR field */ #define DPAA_SGTE_GET_EXTENSION(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_E_MASK) >> (31-0)) /**< Macro to get SGTE EXTENSION field */ #define DPAA_SGTE_GET_FINAL(sgte) ((((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_F_MASK) >> (31-1)) /**< Macro to get SGTE FINAL field */ #define DPAA_SGTE_GET_LENGTH(sgte) (((t_DpaaSGTE *)sgte)->length & DPAA_SGTE_LENGTH_MASK) /**< Macro to get SGTE LENGTH field */ #define DPAA_SGTE_GET_BPID(sgte) ((((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_BPID_MASK) >> (31-15)) /**< Macro to get SGTE BPID field */ #define DPAA_SGTE_GET_OFFSET(sgte) (((t_DpaaSGTE *)sgte)->offset & DPAA_SGTE_OFFSET_MASK) /**< Macro to get SGTE OFFSET field */ #define DPAA_SGTE_GET_ADDR(sgte) XX_PhysToVirt(DPAA_SGTE_GET_PHYS_ADDR(sgte)) #define DPAA_SGTE_SET_ADDRH(sgte,val) (((t_DpaaSGTE *)sgte)->addrh = ((((t_DpaaSGTE *)sgte)->addrh & ~DPAA_SGTE_ADDRH_MASK) | ((val) & DPAA_SGTE_ADDRH_MASK))) /**< Macro to set SGTE ADDRH field */ #define DPAA_SGTE_SET_ADDRL(sgte,val) ((t_DpaaSGTE *)sgte)->addrl = (val) /**< Macro to set SGTE ADDRL field */ #define DPAA_SGTE_SET_ADDR(sgte,val) \ do { \ uint64_t physAddr = (uint64_t)(XX_VirtToPhys(val)); \ DPAA_SGTE_SET_ADDRH(sgte, ((uint32_t)(physAddr >> 32))); \ DPAA_SGTE_SET_ADDRL(sgte, (uint32_t)physAddr); \ } while (0) /**< Macro to set SGTE ADDR field */ #define DPAA_SGTE_SET_EXTENSION(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_E_MASK) | (((val) << (31-0))& DPAA_SGTE_E_MASK))) /**< Macro to set SGTE EXTENSION field */ #define DPAA_SGTE_SET_FINAL(sgte,val) (((t_DpaaSGTE *)sgte)->length = ((((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_F_MASK) | (((val) << (31-1))& DPAA_SGTE_F_MASK))) /**< Macro to set SGTE FINAL field */ #define DPAA_SGTE_SET_LENGTH(sgte,val) (((t_DpaaSGTE *)sgte)->length = (((t_DpaaSGTE *)sgte)->length & ~DPAA_SGTE_LENGTH_MASK) | ((val) & DPAA_SGTE_LENGTH_MASK)) /**< Macro to set SGTE LENGTH field */ #define DPAA_SGTE_SET_BPID(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_BPID_MASK) | (((val) << (31-15))& DPAA_SGTE_BPID_MASK))) /**< Macro to set SGTE BPID field */ #define DPAA_SGTE_SET_OFFSET(sgte,val) (((t_DpaaSGTE *)sgte)->offset = ((((t_DpaaSGTE *)sgte)->offset & ~DPAA_SGTE_OFFSET_MASK) | (((val) << (31-31))& DPAA_SGTE_OFFSET_MASK) )) /**< Macro to set SGTE OFFSET field */ /* @} */ #if defined(__MWERKS__) && !defined(__GNUC__) #pragma pack(pop) #endif /* defined(__MWERKS__) && ... */ #define DPAA_LIODN_DONT_OVERRIDE (-1) /** @} */ /* end of DPAA_grp group */ #endif /* __DPAA_EXT_H */ Index: head/sys/contrib/ncsw/user/env/xx.c =================================================================== --- head/sys/contrib/ncsw/user/env/xx.c (revision 351321) +++ head/sys/contrib/ncsw/user/env/xx.c (revision 351322) @@ -1,811 +1,811 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error_ext.h" #include "std_ext.h" #include "list_ext.h" #include "mm_ext.h" /* Configuration */ /* Define the number of dTSEC ports active in system */ #define MALLOCSMART_DTSEC_IN_USE 4 /* * Calculate malloc's pool size for dTSEC's buffers. * We reserve 1MB pool for each dTSEC port. */ #define MALLOCSMART_POOL_SIZE \ (MALLOCSMART_DTSEC_IN_USE * 1024 * 1024) #define MALLOCSMART_SLICE_SIZE (PAGE_SIZE / 2) /* 2kB */ /* Defines */ #define MALLOCSMART_SIZE_TO_SLICE(x) \ (((x) + MALLOCSMART_SLICE_SIZE - 1) / MALLOCSMART_SLICE_SIZE) #define MALLOCSMART_SLICES \ MALLOCSMART_SIZE_TO_SLICE(MALLOCSMART_POOL_SIZE) /* Malloc Pool for NetCommSW */ MALLOC_DEFINE(M_NETCOMMSW, "NetCommSW", "NetCommSW software stack"); MALLOC_DEFINE(M_NETCOMMSW_MT, "NetCommSWTrack", "NetCommSW software allocation tracker"); /* MallocSmart data structures */ static void *XX_MallocSmartPool; static int XX_MallocSmartMap[MALLOCSMART_SLICES]; static struct mtx XX_MallocSmartLock; static struct mtx XX_MallocTrackLock; MTX_SYSINIT(XX_MallocSmartLockInit, &XX_MallocSmartLock, "NetCommSW MallocSmart Lock", MTX_DEF); MTX_SYSINIT(XX_MallocTrackLockInit, &XX_MallocTrackLock, "NetCommSW MallocTrack Lock", MTX_DEF); /* Interrupt info */ #define XX_INTR_FLAG_PREALLOCATED (1 << 0) #define XX_INTR_FLAG_BOUND (1 << 1) #define XX_INTR_FLAG_FMAN_FIX (1 << 2) struct XX_IntrInfo { driver_intr_t *handler; void *arg; int cpu; int flags; void *cookie; }; static struct XX_IntrInfo XX_IntrInfo[INTR_VECTORS]; /* Portal type identifiers */ enum XX_PortalIdent{ BM_PORTAL = 0, QM_PORTAL, }; /* Structure to store portals' properties */ struct XX_PortalInfo { vm_paddr_t portal_ce_pa[2][MAXCPU]; vm_paddr_t portal_ci_pa[2][MAXCPU]; uint32_t portal_ce_size[2][MAXCPU]; uint32_t portal_ci_size[2][MAXCPU]; vm_offset_t portal_ce_va[2]; vm_offset_t portal_ci_va[2]; uintptr_t portal_intr[2][MAXCPU]; }; static struct XX_PortalInfo XX_PInfo; void XX_Exit(int status) { panic("NetCommSW: Exit called with status %i", status); } void XX_Print(char *str, ...) { va_list ap; va_start(ap, str); vprintf(str, ap); va_end(ap); } void * XX_Malloc(uint32_t size) { void *p = (malloc(size, M_NETCOMMSW, M_NOWAIT)); return (p); } static int XX_MallocSmartMapCheck(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) if (XX_MallocSmartMap[i]) return (FALSE); return (TRUE); } static void XX_MallocSmartMapSet(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) XX_MallocSmartMap[i] = ((i == start) ? slices : -1); } static void XX_MallocSmartMapClear(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) XX_MallocSmartMap[i] = 0; } int XX_MallocSmartInit(void) { int error; error = E_OK; mtx_lock(&XX_MallocSmartLock); if (XX_MallocSmartPool) goto out; /* Allocate MallocSmart pool */ XX_MallocSmartPool = contigmalloc(MALLOCSMART_POOL_SIZE, M_NETCOMMSW, M_NOWAIT, 0, 0xFFFFFFFFFull, MALLOCSMART_POOL_SIZE, 0); if (!XX_MallocSmartPool) { error = E_NO_MEMORY; goto out; } out: mtx_unlock(&XX_MallocSmartLock); return (error); } void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment) { unsigned int i; vm_offset_t addr; addr = 0; /* Convert alignment and size to number of slices */ alignment = MALLOCSMART_SIZE_TO_SLICE(alignment); size = MALLOCSMART_SIZE_TO_SLICE(size); /* Lock resources */ mtx_lock(&XX_MallocSmartLock); /* Allocate region */ for (i = 0; i + size <= MALLOCSMART_SLICES; i += alignment) { if (XX_MallocSmartMapCheck(i, size)) { XX_MallocSmartMapSet(i, size); addr = (vm_offset_t)XX_MallocSmartPool + (i * MALLOCSMART_SLICE_SIZE); break; } } /* Unlock resources */ mtx_unlock(&XX_MallocSmartLock); return ((void *)addr); } void XX_FreeSmart(void *p) { unsigned int start, slices; /* Calculate first slice of region */ start = MALLOCSMART_SIZE_TO_SLICE((vm_offset_t)(p) - (vm_offset_t)XX_MallocSmartPool); /* Lock resources */ mtx_lock(&XX_MallocSmartLock); KASSERT(XX_MallocSmartMap[start] > 0, ("XX_FreeSmart: Double or mid-block free!\n")); /* Free region */ slices = XX_MallocSmartMap[start]; XX_MallocSmartMapClear(start, slices); /* Unlock resources */ mtx_unlock(&XX_MallocSmartLock); } void XX_Free(void *p) { free(p, M_NETCOMMSW); } uint32_t XX_DisableAllIntr(void) { return (intr_disable()); } void XX_RestoreAllIntr(uint32_t flags) { intr_restore(flags); } static bool XX_IsPortalIntr(uintptr_t irq) { int cpu, type; /* Check interrupt numbers of all available portals */ for (cpu = 0, type = 0; XX_PInfo.portal_intr[type][cpu] != 0; cpu++) { if (irq == XX_PInfo.portal_intr[type][cpu]) { /* Found it! */ return (1); } if (XX_PInfo.portal_intr[type][cpu + 1] == 0) { type++; cpu = 0; } } return (0); } void XX_FmanFixIntr(int irq) { XX_IntrInfo[irq].flags |= XX_INTR_FLAG_FMAN_FIX; } static bool XX_FmanNeedsIntrFix(int irq) { if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_FMAN_FIX) return (1); return (0); } static void XX_Dispatch(void *arg) { struct XX_IntrInfo *info; info = arg; /* Bind this thread to proper CPU when SMP has been already started. */ if ((info->flags & XX_INTR_FLAG_BOUND) == 0 && smp_started && info->cpu >= 0) { thread_lock(curthread); sched_bind(curthread, info->cpu); thread_unlock(curthread); info->flags |= XX_INTR_FLAG_BOUND; } if (info->handler == NULL) { printf("%s(): IRQ handler is NULL!\n", __func__); return; } info->handler(info->arg); } t_Error XX_PreallocAndBindIntr(uintptr_t irq, unsigned int cpu) { struct resource *r; unsigned int inum; t_Error error; r = (struct resource *)irq; inum = rman_get_start(r); error = XX_SetIntr(irq, XX_Dispatch, &XX_IntrInfo[inum]); if (error != 0) return (error); XX_IntrInfo[inum].flags = XX_INTR_FLAG_PREALLOCATED; XX_IntrInfo[inum].cpu = cpu; return (E_OK); } t_Error XX_DeallocIntr(uintptr_t irq) { struct resource *r; unsigned int inum; r = (struct resource *)irq; inum = rman_get_start(r); if ((XX_IntrInfo[inum].flags & XX_INTR_FLAG_PREALLOCATED) == 0) return (E_INVALID_STATE); XX_IntrInfo[inum].flags = 0; return (XX_FreeIntr(irq)); } t_Error XX_SetIntr(uintptr_t irq, t_Isr *f_Isr, t_Handle handle) { device_t dev; struct resource *r; unsigned int flags; int err; r = (struct resource *)irq; dev = rman_get_device(r); irq = rman_get_start(r); /* Handle preallocated interrupts */ if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { if (XX_IntrInfo[irq].handler != NULL) return (E_BUSY); XX_IntrInfo[irq].handler = f_Isr; XX_IntrInfo[irq].arg = handle; return (E_OK); } flags = INTR_TYPE_NET | INTR_MPSAFE; /* BMAN/QMAN Portal interrupts must be exlusive */ if (XX_IsPortalIntr(irq)) flags |= INTR_EXCL; err = bus_setup_intr(dev, r, flags, NULL, f_Isr, handle, &XX_IntrInfo[irq].cookie); if (err) goto finish; /* * XXX: Bind FMan IRQ to CPU0. Current interrupt subsystem directs each * interrupt to all CPUs. Race between an interrupt assertion and * masking may occur and interrupt handler may be called multiple times * per one interrupt. FMan doesn't support such a situation. Workaround * is to bind FMan interrupt to one CPU0 only. */ #ifdef SMP if (XX_FmanNeedsIntrFix(irq)) err = powerpc_bind_intr(irq, 0); #endif finish: return (err); } t_Error XX_FreeIntr(uintptr_t irq) { device_t dev; struct resource *r; r = (struct resource *)irq; dev = rman_get_device(r); irq = rman_get_start(r); /* Handle preallocated interrupts */ if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { if (XX_IntrInfo[irq].handler == NULL) return (E_INVALID_STATE); XX_IntrInfo[irq].handler = NULL; XX_IntrInfo[irq].arg = NULL; return (E_OK); } return (bus_teardown_intr(dev, r, XX_IntrInfo[irq].cookie)); } t_Error XX_EnableIntr(uintptr_t irq) { struct resource *r; r = (struct resource *)irq; irq = rman_get_start(r); powerpc_intr_unmask(irq); return (E_OK); } t_Error XX_DisableIntr(uintptr_t irq) { struct resource *r; r = (struct resource *)irq; irq = rman_get_start(r); powerpc_intr_mask(irq); return (E_OK); } t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (NULL); } void XX_FreeTasklet (t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } void XX_FlushScheduledTasks(void) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (NULL); } t_Handle XX_InitSpinlock(void) { struct mtx *m; m = malloc(sizeof(*m), M_NETCOMMSW, M_NOWAIT | M_ZERO); if (!m) return (0); mtx_init(m, "NetCommSW Lock", NULL, MTX_DEF | MTX_DUPOK); return (m); } void XX_FreeSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_destroy(m); free(m, M_NETCOMMSW); } void XX_LockSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_lock(m); } void XX_UnlockSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_unlock(m); } uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock) { XX_LockSpinlock(h_Spinlock); return (0); } void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags) { XX_UnlockSpinlock(h_Spinlock); } uint32_t XX_Sleep(uint32_t msecs) { XX_UDelay(1000 * msecs); return (0); } void XX_UDelay(uint32_t usecs) { DELAY(usecs); } t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH], t_IpcMsgHandler *f_MsgHandler, t_Handle h_Module, uint32_t replyLength) { /* * This function returns fake E_OK status and does nothing * as NetCommSW IPC is not used by FreeBSD drivers. */ return (E_OK); } t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]) { /* * This function returns fake E_OK status and does nothing * as NetCommSW IPC is not used by FreeBSD drivers. */ return (E_OK); } t_Error XX_IpcSendMessage(t_Handle h_Session, uint8_t *p_Msg, uint32_t msgLength, uint8_t *p_Reply, uint32_t *p_ReplyLength, t_IpcMsgCompletion *f_Completion, t_Handle h_Arg) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH], char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); - return (E_OK); + return (NULL); } t_Error XX_IpcFreeSession(t_Handle h_Session) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } physAddress_t XX_VirtToPhys(void *addr) { vm_paddr_t paddr; int cpu; cpu = PCPU_GET(cpuid); /* Handle NULL address */ if (addr == NULL) return (-1); /* Check CCSR */ if ((vm_offset_t)addr >= ccsrbar_va && (vm_offset_t)addr < ccsrbar_va + ccsrbar_size) return (((vm_offset_t)addr - ccsrbar_va) + ccsrbar_pa); /* Handle BMAN mappings */ if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[BM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ce_va[BM_PORTAL] + XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) return (XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ce_va[BM_PORTAL]); if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[BM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ci_va[BM_PORTAL] + XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) return (XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ci_va[BM_PORTAL]); /* Handle QMAN mappings */ if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[QM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ce_va[QM_PORTAL] + XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) return (XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ce_va[QM_PORTAL]); if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[QM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ci_va[QM_PORTAL] + XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) return (XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ci_va[QM_PORTAL]); if (PMAP_HAS_DMAP && (vm_offset_t)addr >= DMAP_BASE_ADDRESS && (vm_offset_t)addr <= DMAP_MAX_ADDRESS) return (DMAP_TO_PHYS((vm_offset_t)addr)); else paddr = pmap_kextract((vm_offset_t)addr); if (paddr == 0) printf("NetCommSW: " "Unable to translate virtual address %p!\n", addr); else pmap_track_page(kernel_pmap, (vm_offset_t)addr); return (paddr); } void * XX_PhysToVirt(physAddress_t addr) { struct pv_entry *pv; vm_page_t page; int cpu; /* Check CCSR */ if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size) return ((void *)((vm_offset_t)(addr - ccsrbar_pa) + ccsrbar_va)); cpu = PCPU_GET(cpuid); /* Handle BMAN mappings */ if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); /* Handle QMAN mappings */ if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]))); if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]))); page = PHYS_TO_VM_PAGE(addr); pv = TAILQ_FIRST(&page->md.pv_list); if (pv != NULL) return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK))); if (PMAP_HAS_DMAP) return ((void *)(uintptr_t)PHYS_TO_DMAP(addr)); printf("NetCommSW: " "Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr); return (NULL); } void XX_PortalSetInfo(device_t dev) { char *dev_name; struct dpaa_portals_softc *sc; int i, type, len; dev_name = malloc(sizeof(*dev_name), M_TEMP, M_WAITOK | M_ZERO); len = strlen("bman-portals"); strncpy(dev_name, device_get_name(dev), len); if (strncmp(dev_name, "bman-portals", len) && strncmp(dev_name, "qman-portals", len)) goto end; if (strncmp(dev_name, "bman-portals", len) == 0) type = BM_PORTAL; else type = QM_PORTAL; sc = device_get_softc(dev); for (i = 0; sc->sc_dp[i].dp_ce_pa != 0; i++) { XX_PInfo.portal_ce_pa[type][i] = sc->sc_dp[i].dp_ce_pa; XX_PInfo.portal_ci_pa[type][i] = sc->sc_dp[i].dp_ci_pa; XX_PInfo.portal_ce_size[type][i] = sc->sc_dp[i].dp_ce_size; XX_PInfo.portal_ci_size[type][i] = sc->sc_dp[i].dp_ci_size; XX_PInfo.portal_intr[type][i] = sc->sc_dp[i].dp_intr_num; } XX_PInfo.portal_ce_va[type] = rman_get_bushandle(sc->sc_rres[0]); XX_PInfo.portal_ci_va[type] = rman_get_bushandle(sc->sc_rres[1]); end: free(dev_name, M_TEMP); } Index: head/sys/dev/dpaa/if_dtsec_rm.c =================================================================== --- head/sys/dev/dpaa/if_dtsec_rm.c (revision 351321) +++ head/sys/dev/dpaa/if_dtsec_rm.c (revision 351322) @@ -1,658 +1,658 @@ /*- * Copyright (c) 2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include #include #include #include #include #include "fman.h" #include "bman.h" #include "qman.h" #include "if_dtsec.h" #include "if_dtsec_rm.h" /** * @group dTSEC RM private defines. * @{ */ #define DTSEC_BPOOLS_USED (1) #define DTSEC_MAX_TX_QUEUE_LEN 256 struct dtsec_rm_frame_info { struct mbuf *fi_mbuf; t_DpaaSGTE fi_sgt[DPAA_NUM_OF_SG_TABLE_ENTRY]; }; enum dtsec_rm_pool_params { DTSEC_RM_POOL_RX_LOW_MARK = 16, DTSEC_RM_POOL_RX_HIGH_MARK = 64, DTSEC_RM_POOL_RX_MAX_SIZE = 256, DTSEC_RM_POOL_FI_LOW_MARK = 16, DTSEC_RM_POOL_FI_HIGH_MARK = 64, DTSEC_RM_POOL_FI_MAX_SIZE = 256, }; +#define DTSEC_RM_FQR_RX_CHANNEL e_QM_FQ_CHANNEL_POOL1 +#define DTSEC_RM_FQR_TX_CONF_CHANNEL e_QM_FQ_CHANNEL_SWPORTAL0 enum dtsec_rm_fqr_params { - DTSEC_RM_FQR_RX_CHANNEL = e_QM_FQ_CHANNEL_POOL1, DTSEC_RM_FQR_RX_WQ = 1, - DTSEC_RM_FQR_TX_CONF_CHANNEL = e_QM_FQ_CHANNEL_SWPORTAL0, DTSEC_RM_FQR_TX_WQ = 1, DTSEC_RM_FQR_TX_CONF_WQ = 1 }; /** @} */ /** * @group dTSEC Frame Info routines. * @{ */ void dtsec_rm_fi_pool_free(struct dtsec_softc *sc) { if (sc->sc_fi_zone != NULL) uma_zdestroy(sc->sc_fi_zone); } int dtsec_rm_fi_pool_init(struct dtsec_softc *sc) { snprintf(sc->sc_fi_zname, sizeof(sc->sc_fi_zname), "%s: Frame Info", device_get_nameunit(sc->sc_dev)); sc->sc_fi_zone = uma_zcreate(sc->sc_fi_zname, sizeof(struct dtsec_rm_frame_info), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); if (sc->sc_fi_zone == NULL) return (EIO); return (0); } static struct dtsec_rm_frame_info * dtsec_rm_fi_alloc(struct dtsec_softc *sc) { struct dtsec_rm_frame_info *fi; fi = uma_zalloc(sc->sc_fi_zone, M_NOWAIT); return (fi); } static void dtsec_rm_fi_free(struct dtsec_softc *sc, struct dtsec_rm_frame_info *fi) { uma_zfree(sc->sc_fi_zone, fi); } /** @} */ /** * @group dTSEC FMan PORT routines. * @{ */ int dtsec_rm_fm_port_rx_init(struct dtsec_softc *sc, int unit) { t_FmPortParams params; t_FmPortRxParams *rx_params; t_FmExtPools *pool_params; t_Error error; memset(¶ms, 0, sizeof(params)); params.baseAddr = sc->sc_fm_base + sc->sc_port_rx_hw_id; params.h_Fm = sc->sc_fmh; params.portType = dtsec_fm_port_rx_type(sc->sc_eth_dev_type); params.portId = sc->sc_eth_id; params.independentModeEnable = false; params.liodnBase = FM_PORT_LIODN_BASE; params.f_Exception = dtsec_fm_port_rx_exception_callback; params.h_App = sc; rx_params = ¶ms.specificParams.rxParams; rx_params->errFqid = sc->sc_rx_fqid; rx_params->dfltFqid = sc->sc_rx_fqid; rx_params->liodnOffset = 0; pool_params = &rx_params->extBufPools; pool_params->numOfPoolsUsed = DTSEC_BPOOLS_USED; pool_params->extBufPool->id = sc->sc_rx_bpid; pool_params->extBufPool->size = FM_PORT_BUFFER_SIZE; sc->sc_rxph = FM_PORT_Config(¶ms); if (sc->sc_rxph == NULL) { device_printf(sc->sc_dev, "couldn't configure FM Port RX.\n"); return (ENXIO); } error = FM_PORT_Init(sc->sc_rxph); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM Port RX.\n"); FM_PORT_Free(sc->sc_rxph); return (ENXIO); } if (bootverbose) device_printf(sc->sc_dev, "RX hw port 0x%02x initialized.\n", sc->sc_port_rx_hw_id); return (0); } int dtsec_rm_fm_port_tx_init(struct dtsec_softc *sc, int unit) { t_FmPortParams params; t_FmPortNonRxParams *tx_params; t_Error error; memset(¶ms, 0, sizeof(params)); params.baseAddr = sc->sc_fm_base + sc->sc_port_tx_hw_id; params.h_Fm = sc->sc_fmh; params.portType = dtsec_fm_port_tx_type(sc->sc_eth_dev_type); params.portId = sc->sc_eth_id; params.independentModeEnable = false; params.liodnBase = FM_PORT_LIODN_BASE; params.f_Exception = dtsec_fm_port_tx_exception_callback; params.h_App = sc; tx_params = ¶ms.specificParams.nonRxParams; tx_params->errFqid = sc->sc_tx_conf_fqid; tx_params->dfltFqid = sc->sc_tx_conf_fqid; tx_params->qmChannel = sc->sc_port_tx_qman_chan; #ifdef FM_OP_PARTITION_ERRATA_FMANx8 tx_params->opLiodnOffset = 0; #endif sc->sc_txph = FM_PORT_Config(¶ms); if (sc->sc_txph == NULL) { device_printf(sc->sc_dev, "couldn't configure FM Port TX.\n"); return (ENXIO); } error = FM_PORT_Init(sc->sc_txph); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM Port TX.\n"); FM_PORT_Free(sc->sc_txph); return (ENXIO); } if (bootverbose) device_printf(sc->sc_dev, "TX hw port 0x%02x initialized.\n", sc->sc_port_tx_hw_id); return (0); } /** @} */ /** * @group dTSEC buffer pools routines. * @{ */ static t_Error dtsec_rm_pool_rx_put_buffer(t_Handle h_BufferPool, uint8_t *buffer, t_Handle context) { struct dtsec_softc *sc; sc = h_BufferPool; uma_zfree(sc->sc_rx_zone, buffer); return (E_OK); } static uint8_t * dtsec_rm_pool_rx_get_buffer(t_Handle h_BufferPool, t_Handle *context) { struct dtsec_softc *sc; uint8_t *buffer; sc = h_BufferPool; buffer = uma_zalloc(sc->sc_rx_zone, M_NOWAIT); return (buffer); } static void dtsec_rm_pool_rx_depleted(t_Handle h_App, bool in) { struct dtsec_softc *sc; unsigned int count; sc = h_App; if (!in) return; while (1) { count = bman_count(sc->sc_rx_pool); if (count > DTSEC_RM_POOL_RX_HIGH_MARK) return; bman_pool_fill(sc->sc_rx_pool, DTSEC_RM_POOL_RX_HIGH_MARK); } } void dtsec_rm_pool_rx_free(struct dtsec_softc *sc) { if (sc->sc_rx_pool != NULL) bman_pool_destroy(sc->sc_rx_pool); if (sc->sc_rx_zone != NULL) uma_zdestroy(sc->sc_rx_zone); } int dtsec_rm_pool_rx_init(struct dtsec_softc *sc) { /* FM_PORT_BUFFER_SIZE must be less than PAGE_SIZE */ CTASSERT(FM_PORT_BUFFER_SIZE < PAGE_SIZE); snprintf(sc->sc_rx_zname, sizeof(sc->sc_rx_zname), "%s: RX Buffers", device_get_nameunit(sc->sc_dev)); sc->sc_rx_zone = uma_zcreate(sc->sc_rx_zname, FM_PORT_BUFFER_SIZE, NULL, NULL, NULL, NULL, FM_PORT_BUFFER_SIZE - 1, 0); if (sc->sc_rx_zone == NULL) return (EIO); sc->sc_rx_pool = bman_pool_create(&sc->sc_rx_bpid, FM_PORT_BUFFER_SIZE, 0, 0, DTSEC_RM_POOL_RX_MAX_SIZE, dtsec_rm_pool_rx_get_buffer, dtsec_rm_pool_rx_put_buffer, DTSEC_RM_POOL_RX_LOW_MARK, DTSEC_RM_POOL_RX_HIGH_MARK, 0, 0, dtsec_rm_pool_rx_depleted, sc, NULL, NULL); if (sc->sc_rx_pool == NULL) { device_printf(sc->sc_dev, "NULL rx pool somehow\n"); dtsec_rm_pool_rx_free(sc); return (EIO); } return (0); } /** @} */ /** * @group dTSEC Frame Queue Range routines. * @{ */ static void dtsec_rm_fqr_mext_free(struct mbuf *m) { struct dtsec_softc *sc; void *buffer; buffer = m->m_ext.ext_arg1; sc = m->m_ext.ext_arg2; if (bman_count(sc->sc_rx_pool) <= DTSEC_RM_POOL_RX_MAX_SIZE) bman_put_buffer(sc->sc_rx_pool, buffer); else dtsec_rm_pool_rx_put_buffer(sc, buffer, NULL); } static e_RxStoreResponse dtsec_rm_fqr_rx_callback(t_Handle app, t_Handle fqr, t_Handle portal, uint32_t fqid_off, t_DpaaFD *frame) { struct dtsec_softc *sc; struct mbuf *m; void *frame_va; m = NULL; sc = app; frame_va = DPAA_FD_GET_ADDR(frame); KASSERT(DPAA_FD_GET_FORMAT(frame) == e_DPAA_FD_FORMAT_TYPE_SHORT_SBSF, ("%s(): Got unsupported frame format 0x%02X!", __func__, DPAA_FD_GET_FORMAT(frame))); KASSERT(DPAA_FD_GET_OFFSET(frame) == 0, ("%s(): Only offset 0 is supported!", __func__)); if (DPAA_FD_GET_STATUS(frame) != 0) { device_printf(sc->sc_dev, "RX error: 0x%08X\n", DPAA_FD_GET_STATUS(frame)); goto err; } m = m_gethdr(M_NOWAIT, MT_HEADER); if (m == NULL) goto err; m_extadd(m, frame_va, FM_PORT_BUFFER_SIZE, dtsec_rm_fqr_mext_free, frame_va, sc, 0, EXT_NET_DRV); m->m_pkthdr.rcvif = sc->sc_ifnet; m->m_len = DPAA_FD_GET_LENGTH(frame); m_fixhdr(m); (*sc->sc_ifnet->if_input)(sc->sc_ifnet, m); return (e_RX_STORE_RESPONSE_CONTINUE); err: bman_put_buffer(sc->sc_rx_pool, frame_va); if (m != NULL) m_freem(m); return (e_RX_STORE_RESPONSE_CONTINUE); } static e_RxStoreResponse dtsec_rm_fqr_tx_confirm_callback(t_Handle app, t_Handle fqr, t_Handle portal, uint32_t fqid_off, t_DpaaFD *frame) { struct dtsec_rm_frame_info *fi; struct dtsec_softc *sc; unsigned int qlen; t_DpaaSGTE *sgt0; sc = app; if (DPAA_FD_GET_STATUS(frame) != 0) device_printf(sc->sc_dev, "TX error: 0x%08X\n", DPAA_FD_GET_STATUS(frame)); /* * We are storing struct dtsec_rm_frame_info in first entry * of scatter-gather table. */ sgt0 = DPAA_FD_GET_ADDR(frame); fi = DPAA_SGTE_GET_ADDR(sgt0); /* Free transmitted frame */ m_freem(fi->fi_mbuf); dtsec_rm_fi_free(sc, fi); qlen = qman_fqr_get_counter(sc->sc_tx_conf_fqr, 0, e_QM_FQR_COUNTERS_FRAME); if (qlen == 0) { DTSEC_LOCK(sc); if (sc->sc_tx_fqr_full) { sc->sc_tx_fqr_full = 0; dtsec_rm_if_start_locked(sc); } DTSEC_UNLOCK(sc); } return (e_RX_STORE_RESPONSE_CONTINUE); } void dtsec_rm_fqr_rx_free(struct dtsec_softc *sc) { if (sc->sc_rx_fqr) qman_fqr_free(sc->sc_rx_fqr); } int dtsec_rm_fqr_rx_init(struct dtsec_softc *sc) { t_Error error; t_Handle fqr; /* Default Frame Queue */ fqr = qman_fqr_create(1, DTSEC_RM_FQR_RX_CHANNEL, DTSEC_RM_FQR_RX_WQ, false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create default RX queue" "\n"); return (EIO); } sc->sc_rx_fqr = fqr; sc->sc_rx_fqid = qman_fqr_get_base_fqid(fqr); error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_rx_callback, sc); if (error != E_OK) { device_printf(sc->sc_dev, "could not register RX callback\n"); dtsec_rm_fqr_rx_free(sc); return (EIO); } return (0); } void dtsec_rm_fqr_tx_free(struct dtsec_softc *sc) { if (sc->sc_tx_fqr) qman_fqr_free(sc->sc_tx_fqr); if (sc->sc_tx_conf_fqr) qman_fqr_free(sc->sc_tx_conf_fqr); } int dtsec_rm_fqr_tx_init(struct dtsec_softc *sc) { t_Error error; t_Handle fqr; /* TX Frame Queue */ fqr = qman_fqr_create(1, sc->sc_port_tx_qman_chan, DTSEC_RM_FQR_TX_WQ, false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create default TX queue" "\n"); return (EIO); } sc->sc_tx_fqr = fqr; /* TX Confirmation Frame Queue */ fqr = qman_fqr_create(1, DTSEC_RM_FQR_TX_CONF_CHANNEL, DTSEC_RM_FQR_TX_CONF_WQ, false, 0, false, false, true, false, 0, 0, 0); if (fqr == NULL) { device_printf(sc->sc_dev, "could not create TX confirmation " "queue\n"); dtsec_rm_fqr_tx_free(sc); return (EIO); } sc->sc_tx_conf_fqr = fqr; sc->sc_tx_conf_fqid = qman_fqr_get_base_fqid(fqr); error = qman_fqr_register_cb(fqr, dtsec_rm_fqr_tx_confirm_callback, sc); if (error != E_OK) { device_printf(sc->sc_dev, "could not register TX confirmation " "callback\n"); dtsec_rm_fqr_tx_free(sc); return (EIO); } return (0); } /** @} */ /** * @group dTSEC IFnet routines. * @{ */ void dtsec_rm_if_start_locked(struct dtsec_softc *sc) { vm_size_t dsize, psize, ssize; struct dtsec_rm_frame_info *fi; unsigned int qlen, i; struct mbuf *m0, *m; vm_offset_t vaddr; t_DpaaFD fd; DTSEC_LOCK_ASSERT(sc); /* TODO: IFF_DRV_OACTIVE */ if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) == 0) return; if ((sc->sc_ifnet->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) return; while (!IFQ_DRV_IS_EMPTY(&sc->sc_ifnet->if_snd)) { /* Check length of the TX queue */ qlen = qman_fqr_get_counter(sc->sc_tx_fqr, 0, e_QM_FQR_COUNTERS_FRAME); if (qlen >= DTSEC_MAX_TX_QUEUE_LEN) { sc->sc_tx_fqr_full = 1; return; } fi = dtsec_rm_fi_alloc(sc); if (fi == NULL) return; IFQ_DRV_DEQUEUE(&sc->sc_ifnet->if_snd, m0); if (m0 == NULL) { dtsec_rm_fi_free(sc, fi); return; } i = 0; m = m0; psize = 0; dsize = 0; fi->fi_mbuf = m0; while (m && i < DPAA_NUM_OF_SG_TABLE_ENTRY) { if (m->m_len == 0) continue; /* * First entry in scatter-gather table is used to keep * pointer to frame info structure. */ DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)fi); DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0); i++; dsize = m->m_len; vaddr = (vm_offset_t)m->m_data; while (dsize > 0 && i < DPAA_NUM_OF_SG_TABLE_ENTRY) { ssize = PAGE_SIZE - (vaddr & PAGE_MASK); if (m->m_len < ssize) ssize = m->m_len; DPAA_SGTE_SET_ADDR(&fi->fi_sgt[i], (void *)vaddr); DPAA_SGTE_SET_LENGTH(&fi->fi_sgt[i], ssize); DPAA_SGTE_SET_EXTENSION(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_BPID(&fi->fi_sgt[i], 0); DPAA_SGTE_SET_OFFSET(&fi->fi_sgt[i], 0); dsize -= ssize; vaddr += ssize; psize += ssize; i++; } if (dsize > 0) break; m = m->m_next; } /* Check if SG table was constructed properly */ if (m != NULL || dsize != 0) { dtsec_rm_fi_free(sc, fi); m_freem(m0); continue; } DPAA_SGTE_SET_FINAL(&fi->fi_sgt[i-1], 1); DPAA_FD_SET_ADDR(&fd, fi->fi_sgt); DPAA_FD_SET_LENGTH(&fd, psize); DPAA_FD_SET_FORMAT(&fd, e_DPAA_FD_FORMAT_TYPE_SHORT_MBSF); fd.liodn = 0; fd.bpid = 0; fd.elion = 0; DPAA_FD_SET_OFFSET(&fd, 0); DPAA_FD_SET_STATUS(&fd, 0); DTSEC_UNLOCK(sc); if (qman_fqr_enqueue(sc->sc_tx_fqr, 0, &fd) != E_OK) { dtsec_rm_fi_free(sc, fi); m_freem(m0); } DTSEC_LOCK(sc); } } /** @} */