Index: projects/netbsd-tests-upstream-01-2017/contrib/byacc/test/yacc/expr.oxout.tab.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/contrib/byacc/test/yacc/expr.oxout.tab.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/contrib/byacc/test/yacc/expr.oxout.tab.c (revision 313399) @@ -1,1958 +1,1958 @@ /* original parser id follows */ /* yysccsid[] = "@(#)yaccpar 1.9 (Berkeley) 02/21/93" */ /* (use YYMAJOR/YYMINOR for ifdefs dependent on parser version) */ #define YYBYACC 1 #define YYMAJOR 1 #define YYMINOR 9 #define YYCHECK "yyyymmdd" #define YYEMPTY (-1) #define yyclearin (yychar = YYEMPTY) #define yyerrok (yyerrflag = 0) #define YYRECOVERING() (yyerrflag != 0) #define YYENOMEM (-2) #define YYEOF 0 #ifndef yyparse #define yyparse expr.oxout_parse #endif /* yyparse */ #ifndef yylex #define yylex expr.oxout_lex #endif /* yylex */ #ifndef yyerror #define yyerror expr.oxout_error #endif /* yyerror */ #ifndef yychar #define yychar expr.oxout_char #endif /* yychar */ #ifndef yyval #define yyval expr.oxout_val #endif /* yyval */ #ifndef yylval #define yylval expr.oxout_lval #endif /* yylval */ #ifndef yydebug #define yydebug expr.oxout_debug #endif /* yydebug */ #ifndef yynerrs #define yynerrs expr.oxout_nerrs #endif /* yynerrs */ #ifndef yyerrflag #define yyerrflag expr.oxout_errflag #endif /* yyerrflag */ #ifndef yylhs #define yylhs expr.oxout_lhs #endif /* yylhs */ #ifndef yylen #define yylen expr.oxout_len #endif /* yylen */ #ifndef yydefred #define yydefred expr.oxout_defred #endif /* yydefred */ #ifndef yydgoto #define yydgoto expr.oxout_dgoto #endif /* yydgoto */ #ifndef yysindex #define yysindex expr.oxout_sindex #endif /* yysindex */ #ifndef yyrindex #define yyrindex expr.oxout_rindex #endif /* yyrindex */ #ifndef yygindex #define yygindex expr.oxout_gindex #endif /* yygindex */ #ifndef yytable #define yytable expr.oxout_table #endif /* yytable */ #ifndef yycheck #define yycheck expr.oxout_check #endif /* yycheck */ #ifndef yyname #define yyname expr.oxout_name #endif /* yyname */ #ifndef yyrule #define yyrule expr.oxout_rule #endif /* yyrule */ #define YYPREFIX "expr.oxout_" #define YYPURE 0 #line 5 "expr.oxout.y" #include #include #line 8 "expr.Y" #include "expr.oxout.h" #include extern int yylex(void); extern void yyerror(const char *); #line 27 "expr.oxout.y" #include #define yyyR USHRT_MAX #ifdef YYSTYPE #undef YYSTYPE_IS_DECLARED #define YYSTYPE_IS_DECLARED 1 #endif #ifndef YYSTYPE_IS_DECLARED #define YYSTYPE_IS_DECLARED 1 #line 31 "expr.oxout.y" typedef union { struct yyyOxAttrbs { struct yyyStackItem *yyyOxStackItem; } yyyOxAttrbs; } YYSTYPE; #endif /* !YYSTYPE_IS_DECLARED */ #line 38 "expr.oxout.y" #include #include static int yyyYok = 1; extern yyyFT yyyRCIL[]; void yyyExecuteRRsection(yyyGNT *rootNode); void yyyYoxInit(void); void yyyDecorate(void); struct yyyOxAttrbs; /* hack required to compensate for 'msta' behavior */ void yyyGenIntNode(long yyyProdNum, int yyyRHSlength, int yyyNattrbs, struct yyyOxAttrbs *yyval_OxAttrbs, ...); void yyyAdjustINRC(long yyyProdNum, int yyyRHSlength, long startP, long stopP, struct yyyOxAttrbs *yyval_OxAttrbs, ...); void yyyCheckUnsolvedInstTrav(yyyGNT *rootNode,long *nNZrc,long *cycleSum); void yyyUnsolvedInstSearchTrav(yyyGNT *pNode); void yyyUnsolvedInstSearchTravAux(yyyGNT *pNode); void yyyabort(void); #line 146 "expr.oxout.tab.c" /* compatibility with bison */ #ifdef YYPARSE_PARAM /* compatibility with FreeBSD */ # ifdef YYPARSE_PARAM_TYPE # define YYPARSE_DECL() yyparse(YYPARSE_PARAM_TYPE YYPARSE_PARAM) # else # define YYPARSE_DECL() yyparse(void *YYPARSE_PARAM) # endif #else # define YYPARSE_DECL() yyparse(void) #endif /* Parameters sent to lex. */ #ifdef YYLEX_PARAM # define YYLEX_DECL() yylex(void *YYLEX_PARAM) # define YYLEX yylex(YYLEX_PARAM) #else # define YYLEX_DECL() yylex(void) # define YYLEX yylex() #endif /* Parameters sent to yyerror. */ #ifndef YYERROR_DECL #define YYERROR_DECL() yyerror(const char *s) #endif #ifndef YYERROR_CALL #define YYERROR_CALL(msg) yyerror(msg) #endif extern int YYPARSE_DECL(); #define ID 257 #define CONST 258 #define YYERRCODE 256 -typedef short YYINT; +typedef int YYINT; static const YYINT expr.oxout_lhs[] = { -1, 2, 0, 1, 3, 3, 3, 3, 3, 3, 3, }; static const YYINT expr.oxout_len[] = { 2, 0, 2, 1, 3, 3, 3, 3, 3, 1, 1, }; static const YYINT expr.oxout_defred[] = { 1, 0, 0, 9, 10, 0, 2, 0, 0, 0, 0, 0, 0, 8, 0, 0, 4, 0, }; static const YYINT expr.oxout_dgoto[] = { 1, 6, 2, 7, }; static const YYINT expr.oxout_sindex[] = { 0, 0, -40, 0, 0, -40, 0, -18, -24, -40, -40, -40, -40, 0, -37, -37, 0, -39, }; static const YYINT expr.oxout_rindex[] = { 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 2, 8, 0, 1, }; static const YYINT expr.oxout_gindex[] = { 0, 0, 0, 4, }; #define YYTABLESIZE 218 static const YYINT expr.oxout_table[] = { 5, 6, 5, 11, 0, 11, 3, 0, 7, 8, 12, 0, 0, 14, 15, 16, 17, 13, 11, 9, 0, 10, 0, 12, 11, 9, 0, 10, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 6, 5, 6, 5, 6, 7, 0, 7, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, }; static const YYINT expr.oxout_check[] = { 40, 0, 0, 42, -1, 42, 0, -1, 0, 5, 47, -1, -1, 9, 10, 11, 12, 41, 42, 43, -1, 45, -1, 47, 42, 43, -1, 45, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 41, 41, 43, 43, 45, 45, 47, 41, -1, 43, -1, 45, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 257, 258, }; #define YYFINAL 1 #ifndef YYDEBUG #define YYDEBUG 0 #endif #define YYMAXTOKEN 258 #define YYUNDFTOKEN 264 #define YYTRANSLATE(a) ((a) > YYMAXTOKEN ? YYUNDFTOKEN : (a)) #if YYDEBUG static const char *const expr.oxout_name[] = { "end-of-file",0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,"'('","')'","'*'","'+'",0,"'-'",0,"'/'",0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,"ID", "CONST",0,0,0,0,0,"illegal-symbol", }; static const char *const expr.oxout_rule[] = { "$accept : yyyAugNonterm", "$$1 :", "yyyAugNonterm : $$1 s", "s : expr", "expr : expr '*' expr", "expr : expr '+' expr", "expr : expr '/' expr", "expr : expr '-' expr", "expr : '(' expr ')'", "expr : ID", "expr : CONST", }; #endif int yydebug; int yynerrs; int yyerrflag; int yychar; YYSTYPE yyval; YYSTYPE yylval; /* define the initial stack-sizes */ #ifdef YYSTACKSIZE #undef YYMAXDEPTH #define YYMAXDEPTH YYSTACKSIZE #else #ifdef YYMAXDEPTH #define YYSTACKSIZE YYMAXDEPTH #else #define YYSTACKSIZE 10000 #define YYMAXDEPTH 10000 #endif #endif #define YYINITSTACKSIZE 200 typedef struct { unsigned stacksize; YYINT *s_base; YYINT *s_mark; YYINT *s_last; YYSTYPE *l_base; YYSTYPE *l_mark; } YYSTACKDATA; /* variables for the parser stack */ static YYSTACKDATA yystack; #line 53 "expr.Y" int yyparse(void); int main() {yyparse(); } #line 138 "expr.oxout.y" long yyySSALspaceSize = 20000; long yyyRSmaxSize = 1000; long yyyTravStackMaxSize = 2000; struct yyySolvedSAlistCell {yyyWAT attrbNum; long next; }; #define yyyLambdaSSAL 0 long yyySSALCfreeList = yyyLambdaSSAL; long yyyNewSSALC = 1; struct yyySolvedSAlistCell *yyySSALspace; long yyyNbytesStackStg; yyyFT yyyRCIL[1]; short yyyIIIEL[] = {0, 0,2,6,10,14,18,22,24, }; long yyyIIEL[] = { 0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0, 1,1, }; long yyyIEL[] = { 0,0,0, }; yyyFT yyyEntL[1]; void yyyfatal(char *msg) {fputs(msg,stderr);exit(-1);} #define yyySSALof 'S' #define yyyRSof 'q' #define yyyTSof 't' void yyyHandleOverflow(char which) {char *msg1,*msg2; long oldSize,newSize; switch(which) { case yyySSALof : msg1 = "SSAL overflow: "; oldSize = yyySSALspaceSize; break; case yyyRSof : msg1 = "ready set overflow: "; oldSize = yyyRSmaxSize; break; case yyyTSof : msg1 = "traversal stack overflow: "; oldSize = yyyTravStackMaxSize; break; default :; } newSize = (3*oldSize)/2; if (newSize < 100) newSize = 100; fputs(msg1,stderr); fprintf(stderr,"size was %ld.\n",oldSize); msg2 = " Have to modify evaluator: -Y%c%ld.\n"; fprintf(stderr,msg2,which,newSize); exit(-1); } void yyySignalEnts(yyyGNT *node,long startP,long stopP) {yyyGNT *dumNode; while (startP < stopP) { if (!yyyEntL[startP]) dumNode = node; else dumNode = (node->cL)[yyyEntL[startP]-1]; if (!(--((dumNode->refCountList)[yyyEntL[startP+1]] ) ) ) { if (++yyyRSTop == yyyAfterRS) {yyyHandleOverflow(yyyRSof); break; } yyyRSTop->node = dumNode; yyyRSTop->whichSym = yyyEntL[startP]; yyyRSTop->wa = yyyEntL[startP+1]; } startP += 2; } } void yyySolveAndSignal() { long yyyiDum,*yyypL; int yyyws,yyywa; yyyGNT *yyyRSTopN,*yyyRefN; yyyParent yyyRSTopNp; yyyRSTopNp = (yyyRSTopN = yyyRSTop->node)->parent; yyyRefN= (yyyws = (yyyRSTop->whichSym))?yyyRSTopNp.noderef:yyyRSTopN; yyywa = yyyRSTop->wa; yyyRSTop--; switch(yyyRefN->prodNum) { case 1: /***yacc rule 1***/ switch (yyyws) { } break; case 2: /***yacc rule 2***/ switch (yyyws) { } break; case 3: /***yacc rule 3***/ switch (yyyws) { } break; case 4: /***yacc rule 4***/ switch (yyyws) { } break; case 5: /***yacc rule 5***/ switch (yyyws) { } break; case 6: /***yacc rule 6***/ switch (yyyws) { } break; case 7: /***yacc rule 7***/ switch (yyyws) { case 1: /**/ switch (yyywa) { } break; } break; case 8: /***yacc rule 8***/ switch (yyyws) { case 1: /**/ switch (yyywa) { } break; } break; } /* switch */ if (yyyws) /* the just-solved instance was inherited. */ {if (yyyRSTopN->prodNum) {yyyiDum = yyyIIEL[yyyIIIEL[yyyRSTopN->prodNum]] + yyywa; yyySignalEnts(yyyRSTopN,yyyIEL[yyyiDum], yyyIEL[yyyiDum+1] ); } } else /* the just-solved instance was synthesized. */ {if (!(yyyRSTopN->parentIsStack)) /* node has a parent. */ {yyyiDum = yyyIIEL[yyyIIIEL[yyyRSTopNp.noderef->prodNum] + yyyRSTopN->whichSym ] + yyywa; yyySignalEnts(yyyRSTopNp.noderef, yyyIEL[yyyiDum], yyyIEL[yyyiDum+1] ); } else /* node is still on the stack--it has no parent yet. */ {yyypL = &(yyyRSTopNp.stackref->solvedSAlist); if (yyySSALCfreeList == yyyLambdaSSAL) {yyySSALspace[yyyNewSSALC].next = *yyypL; if ((*yyypL = yyyNewSSALC++) == yyySSALspaceSize) yyyHandleOverflow(yyySSALof); } else {yyyiDum = yyySSALCfreeList; yyySSALCfreeList = yyySSALspace[yyySSALCfreeList].next; yyySSALspace[yyyiDum].next = *yyypL; *yyypL = yyyiDum; } yyySSALspace[*yyypL].attrbNum = yyywa; } } } /* yyySolveAndSignal */ #define condStg unsigned int conds; #define yyyClearConds {yyyTST->conds = 0;} #define yyySetCond(n) {yyyTST->conds += (1<<(n));} #define yyyCond(n) ((yyyTST->conds & (1<<(n)))?1:0) struct yyyTravStackItem {yyyGNT *node; char isReady; condStg }; void yyyDoTraversals(yyyGNT *rootNode) {struct yyyTravStackItem *yyyTravStack,*yyyTST,*yyyAfterTravStack; yyyGNT *yyyTSTn,**yyyCLptr2; int yyyi,yyyRL,yyyPass; int i; if (!yyyYok) return; if ((yyyTravStack = ((struct yyyTravStackItem *) calloc((size_t)yyyTravStackMaxSize, (size_t)sizeof(struct yyyTravStackItem) ) ) ) == (struct yyyTravStackItem *)NULL ) {fputs("malloc error in traversal stack allocation\n",stderr); exit(-1); } yyyAfterTravStack = yyyTravStack + yyyTravStackMaxSize; yyyTravStack++; for (yyyi=0; yyyi<2; yyyi++) { yyyTST = yyyTravStack; yyyTST->node = rootNode; yyyTST->isReady = 0; yyyClearConds while(yyyTST >= yyyTravStack) {yyyTSTn = yyyTST->node; if (yyyTST->isReady) {yyyPass = 1; goto yyyTravSwitch; yyyTpop: yyyTST--; } else {yyyPass = 0; goto yyyTravSwitch; yyyTpush: yyyTST->isReady = 1; if (yyyTSTn->prodNum) {if (yyyRL) {yyyCLptr2 = yyyTSTn->cL; i = yyyTSTn->cLlen; while (i--) {if (++yyyTST == yyyAfterTravStack) yyyHandleOverflow(yyyTSof); else {yyyTST->node = *yyyCLptr2; yyyTST->isReady = 0; yyyClearConds } yyyCLptr2++; } } /* right to left */ else /* left to right */ {i = yyyTSTn->cLlen; yyyCLptr2 = yyyTSTn->cL + i; while (i--) {yyyCLptr2--; if (++yyyTST == yyyAfterTravStack) yyyHandleOverflow(yyyTSof); else {yyyTST->node = *yyyCLptr2; yyyTST->isReady = 0; yyyClearConds } } } /* left to right */ } } /* else */ continue; yyyTravSwitch: switch(yyyTSTn->prodNum) { case 1: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) if (! #line 24 "expr.Y" (1) #line 444 "expr.oxout.y" ) yyySetCond(1) yyySetCond(2) case 1: if (yyyCond(0) != yyyPass) { #line 24 "expr.Y" #line 453 "expr.oxout.y" } if (yyyCond(1) != yyyPass) { #line 24 "expr.Y" printf("\n"); #line 459 "expr.oxout.y" } if (yyyCond(2) != yyyPass) { #line 25 "expr.Y" printf("prefix: "); #line 465 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; if ( #line 23 "expr.Y" (1) #line 477 "expr.oxout.y" ) yyySetCond(2) case 1: if (yyyCond(0) != yyyPass) { #line 22 "expr.Y" printf("\n"); #line 486 "expr.oxout.y" } if (yyyCond(1) != yyyPass) { #line 23 "expr.Y" #line 491 "expr.oxout.y" } if (yyyCond(2) != yyyPass) { #line 23 "expr.Y" printf("postfix: ")/* missing ; */ #line 497 "expr.oxout.y" } break; } break; } break; case 2: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 29 "expr.Y" printf(" * "); #line 518 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 28 "expr.Y" printf(" * "); #line 533 "expr.oxout.y" } break; } break; } break; case 3: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 32 "expr.Y" printf(" + "); #line 554 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 33 "expr.Y" printf(" + "); #line 569 "expr.oxout.y" } break; } break; } break; case 4: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 37 "expr.Y" printf(" / "); #line 590 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 36 "expr.Y" printf(" / "); #line 605 "expr.oxout.y" } break; } break; } break; case 5: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 41 "expr.Y" printf(" - "); #line 626 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 40 "expr.Y" printf(" - "); #line 641 "expr.oxout.y" } break; } break; } break; case 6: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0; case 1: break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: break; } break; } break; case 7: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 46 "expr.Y" printf(" %s ",yyyTSTn->cL[0]->yyyAttrbs.yyyAttrb1.lexeme); #line 685 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 45 "expr.Y" printf(" %s ",yyyTSTn->cL[0]->yyyAttrbs.yyyAttrb1.lexeme); #line 700 "expr.oxout.y" } break; } break; } break; case 8: switch(yyyi) { case 0: switch(yyyPass) { case 0: yyyRL = 0;yyySetCond(0) case 1: if (yyyCond(0) != yyyPass) { #line 50 "expr.Y" printf(" %s ",yyyTSTn->cL[0]->yyyAttrbs.yyyAttrb1.lexeme); #line 721 "expr.oxout.y" } break; } break; case 1: switch(yyyPass) { case 0: yyyRL = 0; case 1: if (yyyCond(0) != yyyPass) { #line 49 "expr.Y" printf(" %s ",yyyTSTn->cL[0]->yyyAttrbs.yyyAttrb1.lexeme); #line 736 "expr.oxout.y" } break; } break; } break; } /* switch */ if (yyyPass) goto yyyTpop; else goto yyyTpush; } /* while */ } /* for */ } /* yyyDoTraversals */ void yyyExecuteRRsection(yyyGNT *rootNode) { int yyyi; long cycleSum = 0; long nNZrc = 0; if (!yyyYok) return; yyyCheckUnsolvedInstTrav(rootNode,&nNZrc,&cycleSum); if (nNZrc) { fputs("\n\n\n**********\n",stderr); fputs("cycle detected in completed parse tree",stderr); fputs(" after decoration.\n",stderr); #if CYCLE_VERBOSE fprintf(stderr, "number of unsolved attribute instances == %ld.\n", nNZrc ); fprintf(stderr, "total number of remaining dependencies == %ld.\n", cycleSum ); fputs("average number of remaining dependencies\n",stderr); fprintf(stderr," per unsolved instance == %f.\n", ((float)(cycleSum)/(float)(nNZrc)) ); #endif fprintf(stderr, "searching parse tree for %ld unsolved instances:\n", nNZrc ); yyyUnsolvedInstSearchTravAux(rootNode); } yyyDoTraversals(rootNode); } /* yyyExecuteRRsection */ yyyWAT yyyLRCIL[2] = {0,0, }; void yyyYoxInit(void) { static int yyyInitDone = 0; if (yyyInitDone) return; if ((yyyRS = (struct yyyRSitem *) calloc((size_t)(yyyRSmaxSize+1), (size_t)sizeof(struct yyyRSitem)) ) == ((struct yyyRSitem *) NULL) ) yyyfatal("malloc error in ox ready set space allocation\n"); yyyRS++; yyyAfterRS = yyyRS + yyyRSmaxSize; if ((yyySSALspace = (struct yyySolvedSAlistCell *) calloc((size_t)(yyySSALspaceSize+1), (size_t)sizeof(struct yyySolvedSAlistCell)) ) == ((struct yyySolvedSAlistCell *) NULL) ) yyyfatal("malloc error in stack solved list space allocation\n"); yyyInitDone = 1; yyyRSTop = yyyRS - 1; } /* yyyYoxInit */ void yyyDecorate(void) { while (yyyRSTop >= yyyRS) yyySolveAndSignal(); } void yyyGenIntNode(long yyyProdNum, int yyyRHSlength, int yyyNattrbs, struct yyyOxAttrbs *yyval_OxAttrbs, ...) {yyyWST i; yyySIT **yyyOxStackItem = &yyval_OxAttrbs->yyyOxStackItem; yyyGNT *gnpDum; va_list ap; *yyyOxStackItem = (yyySIT *) malloc((size_t)sizeof(yyySIT)); if (*yyyOxStackItem == (yyySIT *) NULL) yyyfatal("malloc error in ox yacc semantic stack space allocation\n"); (*yyyOxStackItem)->node = (yyyGNT *) malloc((size_t)sizeof(yyyGNT)); if ((*yyyOxStackItem)->node == (yyyGNT *) NULL) yyyfatal("malloc error in ox node space allocation\n"); (*yyyOxStackItem)->solvedSAlist = yyyLambdaSSAL; (*yyyOxStackItem)->node->parent.stackref = *yyyOxStackItem; (*yyyOxStackItem)->node->parentIsStack = 1; (*yyyOxStackItem)->node->cLlen = yyyRHSlength; (*yyyOxStackItem)->node->cL = (yyyGNT **) calloc((size_t)yyyRHSlength, (size_t)sizeof(yyyGNT *)); if ((*yyyOxStackItem)->node->cL == (yyyGNT **) NULL) yyyfatal("malloc error in ox child list space allocation\n"); (*yyyOxStackItem)->node->refCountListLen = yyyNattrbs; (*yyyOxStackItem)->node->refCountList = (yyyRCT *) calloc((size_t)yyyNattrbs, (size_t)sizeof(yyyRCT)); if ((*yyyOxStackItem)->node->refCountList == (yyyRCT *) NULL) yyyfatal("malloc error in ox reference count list space allocation\n"); (*yyyOxStackItem)->node->prodNum = yyyProdNum; va_start(ap, yyval_OxAttrbs); for (i=1;i<=yyyRHSlength;i++) {yyySIT *yaccStDum = va_arg(ap,struct yyyOxAttrbs *)->yyyOxStackItem; gnpDum = (*yyyOxStackItem)->node->cL[i-1] = yaccStDum->node; gnpDum->whichSym = i; gnpDum->parent.noderef = (*yyyOxStackItem)->node; gnpDum->parentIsStack = 0; } va_end(ap); } #define yyyDECORfREQ 50 void yyyAdjustINRC(long yyyProdNum, int yyyRHSlength, long startP, long stopP, struct yyyOxAttrbs *yyval_OxAttrbs, ...) {yyyWST i; yyySIT *yyyOxStackItem = yyval_OxAttrbs->yyyOxStackItem; long SSALptr,SSALptrHead,*cPtrPtr; long *pL; yyyGNT *gnpDum; long iTemp; long nextP; static unsigned short intNodeCount = yyyDECORfREQ; va_list ap; nextP = startP; while (nextP < stopP) {if (yyyRCIL[nextP] == yyyR) {(yyyOxStackItem->node->refCountList)[yyyRCIL[nextP+1]] = yyyRCIL[nextP+2]; } else {(((yyyOxStackItem->node->cL)[yyyRCIL[nextP]])->refCountList)[yyyRCIL[nextP+1]] = yyyRCIL[nextP+2]; } nextP += 3; } pL = yyyIIEL + yyyIIIEL[yyyProdNum]; va_start(ap, yyval_OxAttrbs); for (i=1;i<=yyyRHSlength;i++) {yyySIT *yaccStDum = va_arg(ap,struct yyyOxAttrbs *)->yyyOxStackItem; pL++; SSALptrHead = SSALptr = *(cPtrPtr = &(yaccStDum->solvedSAlist)); if (SSALptr != yyyLambdaSSAL) {*cPtrPtr = yyyLambdaSSAL; do { iTemp = (*pL+yyySSALspace[SSALptr].attrbNum); yyySignalEnts(yyyOxStackItem->node, yyyIEL[iTemp], yyyIEL[iTemp+1] ); SSALptr = *(cPtrPtr = &(yyySSALspace[SSALptr].next)); } while (SSALptr != yyyLambdaSSAL); *cPtrPtr = yyySSALCfreeList; yyySSALCfreeList = SSALptrHead; } } va_end(ap); nextP = startP + 2; while (nextP < stopP) {if (!yyyRCIL[nextP]) {if (yyyRCIL[nextP-2] == yyyR) {pL = &(yyyOxStackItem->solvedSAlist); if (yyySSALCfreeList == yyyLambdaSSAL) {yyySSALspace[yyyNewSSALC].next = *pL; if ((*pL = yyyNewSSALC++) == yyySSALspaceSize) yyyHandleOverflow(yyySSALof); } else {iTemp = yyySSALCfreeList; yyySSALCfreeList = yyySSALspace[yyySSALCfreeList].next; yyySSALspace[iTemp].next = *pL; *pL = iTemp; } yyySSALspace[*pL].attrbNum = yyyRCIL[nextP-1]; } else {if ((gnpDum = (yyyOxStackItem->node->cL)[yyyRCIL[nextP-2]])->prodNum != 0) { iTemp = yyyIIEL[yyyIIIEL[gnpDum->prodNum]] + yyyRCIL[nextP-1]; yyySignalEnts(gnpDum, yyyIEL[iTemp], yyyIEL[iTemp+1] ); } } } nextP += 3; } if (!--intNodeCount) {intNodeCount = yyyDECORfREQ; yyyDecorate(); } } void yyyGenLeaf(int nAttrbs,int typeNum,long startP,long stopP,YYSTYPE *yylval) {yyyRCT *rcPdum; yyySIT **yyyOxStackItem = &yylval->yyyOxAttrbs.yyyOxStackItem; (*yyyOxStackItem) = (yyySIT *) malloc((size_t)sizeof(yyySIT)); if ((*yyyOxStackItem) == (yyySIT *) NULL) yyyfatal("malloc error in ox yacc semantic stack space allocation\n"); (*yyyOxStackItem)->node = (yyyGNT *) malloc((size_t)sizeof(yyyGNT)) ; if ((*yyyOxStackItem)->node == (yyyGNT *) NULL) yyyfatal("malloc error in ox node space allocation\n"); (*yyyOxStackItem)->solvedSAlist = yyyLambdaSSAL; (*yyyOxStackItem)->node->parent.stackref = *yyyOxStackItem; (*yyyOxStackItem)->node->parentIsStack = 1; (*yyyOxStackItem)->node->cLlen = 0; (*yyyOxStackItem)->node->cL = (yyyGNT **)NULL; (*yyyOxStackItem)->node->refCountListLen = nAttrbs; rcPdum = (*yyyOxStackItem)->node->refCountList = (yyyRCT *) calloc((size_t)nAttrbs, (size_t)sizeof(yyyRCT)); if (rcPdum == (yyyRCT *) NULL) yyyfatal("malloc error in ox reference count list space allocation\n"); while (startP < stopP) rcPdum[yyyLRCIL[startP++]] = 0; (*yyyOxStackItem)->node->prodNum = 0; (*yyyOxStackItem)->node->whichSym = 0; } void yyyabort(void) {yyyYok = 0; } #define yyyLastProdNum 8 #define yyyNsorts 1 int yyyProdsInd[] = { 0, 0, 2, 6, 10, 14, 18, 22, 24, 26, }; int yyyProds[][2] = { { 116, 0},{ 462, 0},{ 462, 0},{ 462, 0},{ 412, 0}, { 462, 0},{ 462, 0},{ 462, 0},{ 420, 0},{ 462, 0}, { 462, 0},{ 462, 0},{ 452, 0},{ 462, 0},{ 462, 0}, { 462, 0},{ 436, 0},{ 462, 0},{ 462, 0},{ 396, 0}, { 462, 0},{ 404, 0},{ 462, 0},{ 619, 1},{ 462, 0}, { 567, 1}, }; int yyySortsInd[] = { 0, 0, 1, }; int yyySorts[] = { 413, }; char *yyyStringTab[] = { 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,"s",0,0,0, 0,0,"y",0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,"LRpre",0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,"'('",0,0,0, 0,0,0,0,"')'", 0,0,0,0,0, 0,0,"'*'","lexeme",0, 0,0,0,0,0, "'+'",0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,"'-'",0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,"'/'",0,0, 0,0,0,0,0, 0,0,"expr",0,0, 0,0,0,0,0, 0,0,0,0,0, 0,"printf",0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,"CONST","LRpost",0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,"ID", 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0, 0, }; #define yyySizeofProd(num) (yyyProdsInd[(num)+1] - yyyProdsInd[(num)]) #define yyyGSoccurStr(prodNum,symPos) \ (yyyStringTab[yyyProds[yyyProdsInd[(prodNum)] + (symPos)][0]]) #define yyySizeofSort(num) (yyySortsInd[(num)+1] - yyySortsInd[(num)]) #define yyySortOf(prodNum,symPos) \ (yyyProds[yyyProdsInd[(prodNum)] + (symPos)][1]) #define yyyAttrbStr(prodNum,symPos,attrbNum) \ (yyyStringTab[yyySorts[yyySortsInd[yyySortOf(prodNum,symPos)] + \ (attrbNum) \ ] \ ] \ ) void yyyShowProd(int i) {int j,nSyms; nSyms = yyySizeofProd(i); for (j=0; j\n",stderr); else putc('\n',stderr); } } } void yyyCheckNodeInstancesSolved(yyyGNT *np) {int mysort,sortSize,i,prodNum,symPos,inTerminalNode; int nUnsolvedInsts = 0; if (np->prodNum != 0) {inTerminalNode = 0; prodNum = np->prodNum; symPos = 0; } else {inTerminalNode = 1; prodNum = np->parent.noderef->prodNum; symPos = np->whichSym; } mysort = yyySortOf(prodNum,symPos); sortSize = yyySizeofSort(mysort); for (i=0; irefCountList)[i] != 0) nUnsolvedInsts += 1; if (nUnsolvedInsts) {fprintf(stderr, "\nFound node that has %d unsolved attribute instance(s).\n", nUnsolvedInsts ); fprintf(stderr,"Node is labeled \"%s\".\n", yyyGSoccurStr(prodNum,symPos)); if (inTerminalNode) {fputs("Node is terminal. Its parent production is:\n ",stderr); yyyShowProd(prodNum); } else {fputs("Node is nonterminal. ",stderr); if (!(np->parentIsStack)) {fprintf(stderr, "Node is %dth child in its parent production:\n ", np->whichSym ); yyyShowProd(np->parent.noderef->prodNum); } fputs("Node is on left hand side of this production:\n ",stderr); yyyShowProd(np->prodNum); } fputs("The following instances are unsolved:\n",stderr); for (i=0; irefCountList)[i] != 0) fprintf(stderr," %-16s still has %1d dependencies.\n", yyyAttrbStr(prodNum,symPos,i),(np->refCountList)[i]); } } void yyyCheckUnsolvedInstTrav(yyyGNT *pNode,long *nNZrc,long *cycleSum) {yyyGNT **yyyCLpdum; yyyRCT *rcp; int i; /* visit the refCountList of each node in the tree, and sum the non-zero refCounts */ rcp = pNode->refCountList; i = pNode->refCountListLen; while (i--) if (*rcp++) {*cycleSum += *(rcp - 1); (*nNZrc)++;} yyyCLpdum = pNode->cL; i = pNode->cLlen; while (i--) { yyyCheckUnsolvedInstTrav(*yyyCLpdum,nNZrc,cycleSum); yyyCLpdum++; } } void yyyUnsolvedInstSearchTravAux(yyyGNT *pNode) {yyyGNT **yyyCLpdum; int i; yyyCheckNodeInstancesSolved(pNode); yyyCLpdum = pNode->cL; i = pNode->cLlen; while (i--) { yyyUnsolvedInstSearchTravAux(*yyyCLpdum); yyyCLpdum++; } } void yyyUnsolvedInstSearchTrav(yyyGNT *pNode) {yyyGNT **yyyCLpdum; int i; yyyCLpdum = pNode->cL; i = pNode->cLlen; while (i--) { yyyUnsolvedInstSearchTravAux(*yyyCLpdum); yyyCLpdum++; } } #line 1645 "expr.oxout.tab.c" #if YYDEBUG #include /* needed for printf */ #endif #include /* needed for malloc, etc */ #include /* needed for memset */ /* allocate initial stack or double stack size, up to YYMAXDEPTH */ static int yygrowstack(YYSTACKDATA *data) { int i; unsigned newsize; YYINT *newss; YYSTYPE *newvs; if ((newsize = data->stacksize) == 0) newsize = YYINITSTACKSIZE; else if (newsize >= YYMAXDEPTH) return YYENOMEM; else if ((newsize *= 2) > YYMAXDEPTH) newsize = YYMAXDEPTH; i = (int) (data->s_mark - data->s_base); newss = (YYINT *)realloc(data->s_base, newsize * sizeof(*newss)); if (newss == 0) return YYENOMEM; data->s_base = newss; data->s_mark = newss + i; newvs = (YYSTYPE *)realloc(data->l_base, newsize * sizeof(*newvs)); if (newvs == 0) return YYENOMEM; data->l_base = newvs; data->l_mark = newvs + i; data->stacksize = newsize; data->s_last = data->s_base + newsize - 1; return 0; } #if YYPURE || defined(YY_NO_LEAKS) static void yyfreestack(YYSTACKDATA *data) { free(data->s_base); free(data->l_base); memset(data, 0, sizeof(*data)); } #else #define yyfreestack(data) /* nothing */ #endif #define YYABORT goto yyabort #define YYREJECT goto yyabort #define YYACCEPT goto yyaccept #define YYERROR goto yyerrlab int YYPARSE_DECL() { int yym, yyn, yystate; #if YYDEBUG const char *yys; if ((yys = getenv("YYDEBUG")) != 0) { yyn = *yys; if (yyn >= '0' && yyn <= '9') yydebug = yyn - '0'; } #endif yym = 0; yyn = 0; yynerrs = 0; yyerrflag = 0; yychar = YYEMPTY; yystate = 0; #if YYPURE memset(&yystack, 0, sizeof(yystack)); #endif if (yystack.s_base == NULL && yygrowstack(&yystack) == YYENOMEM) goto yyoverflow; yystack.s_mark = yystack.s_base; yystack.l_mark = yystack.l_base; yystate = 0; *yystack.s_mark = 0; yyloop: if ((yyn = yydefred[yystate]) != 0) goto yyreduce; if (yychar < 0) { yychar = YYLEX; if (yychar < 0) yychar = YYEOF; #if YYDEBUG if (yydebug) { if ((yys = yyname[YYTRANSLATE(yychar)]) == NULL) yys = yyname[YYUNDFTOKEN]; printf("%sdebug: state %d, reading %d (%s)\n", YYPREFIX, yystate, yychar, yys); } #endif } if (((yyn = yysindex[yystate]) != 0) && (yyn += yychar) >= 0 && yyn <= YYTABLESIZE && yycheck[yyn] == (YYINT) yychar) { #if YYDEBUG if (yydebug) printf("%sdebug: state %d, shifting to state %d\n", YYPREFIX, yystate, yytable[yyn]); #endif if (yystack.s_mark >= yystack.s_last && yygrowstack(&yystack) == YYENOMEM) goto yyoverflow; yystate = yytable[yyn]; *++yystack.s_mark = yytable[yyn]; *++yystack.l_mark = yylval; yychar = YYEMPTY; if (yyerrflag > 0) --yyerrflag; goto yyloop; } if (((yyn = yyrindex[yystate]) != 0) && (yyn += yychar) >= 0 && yyn <= YYTABLESIZE && yycheck[yyn] == (YYINT) yychar) { yyn = yytable[yyn]; goto yyreduce; } if (yyerrflag != 0) goto yyinrecovery; YYERROR_CALL("syntax error"); goto yyerrlab; /* redundant goto avoids 'unused label' warning */ yyerrlab: ++yynerrs; yyinrecovery: if (yyerrflag < 3) { yyerrflag = 3; for (;;) { if (((yyn = yysindex[*yystack.s_mark]) != 0) && (yyn += YYERRCODE) >= 0 && yyn <= YYTABLESIZE && yycheck[yyn] == (YYINT) YYERRCODE) { #if YYDEBUG if (yydebug) printf("%sdebug: state %d, error recovery shifting\ to state %d\n", YYPREFIX, *yystack.s_mark, yytable[yyn]); #endif if (yystack.s_mark >= yystack.s_last && yygrowstack(&yystack) == YYENOMEM) goto yyoverflow; yystate = yytable[yyn]; *++yystack.s_mark = yytable[yyn]; *++yystack.l_mark = yylval; goto yyloop; } else { #if YYDEBUG if (yydebug) printf("%sdebug: error recovery discarding state %d\n", YYPREFIX, *yystack.s_mark); #endif if (yystack.s_mark <= yystack.s_base) goto yyabort; --yystack.s_mark; --yystack.l_mark; } } } else { if (yychar == YYEOF) goto yyabort; #if YYDEBUG if (yydebug) { if ((yys = yyname[YYTRANSLATE(yychar)]) == NULL) yys = yyname[YYUNDFTOKEN]; printf("%sdebug: state %d, error recovery discards token %d (%s)\n", YYPREFIX, yystate, yychar, yys); } #endif yychar = YYEMPTY; goto yyloop; } yyreduce: #if YYDEBUG if (yydebug) printf("%sdebug: state %d, reducing by rule %d (%s)\n", YYPREFIX, yystate, yyn, yyrule[yyn]); #endif yym = yylen[yyn]; if (yym > 0) yyval = yystack.l_mark[1-yym]; else memset(&yyval, 0, sizeof yyval); switch (yyn) { case 1: #line 64 "expr.oxout.y" {yyyYoxInit();} break; case 2: #line 66 "expr.oxout.y" { yyyDecorate(); yyyExecuteRRsection(yystack.l_mark[0].yyyOxAttrbs.yyyOxStackItem->node); } break; case 3: #line 73 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(1,1,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(1,1,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 4: #line 80 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(2,3,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(2,3,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 5: #line 87 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(3,3,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(3,3,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 6: #line 94 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(4,3,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(4,3,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 7: #line 101 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(5,3,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(5,3,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 8: #line 108 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(6,3,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(6,3,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[-2].yyyOxAttrbs,&yystack.l_mark[-1].yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 9: #line 114 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(7,1,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(7,1,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; case 10: #line 121 "expr.oxout.y" {if(yyyYok){ yyyGenIntNode(8,1,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs); yyyAdjustINRC(8,1,0,0,&yyval.yyyOxAttrbs,&yystack.l_mark[0].yyyOxAttrbs);}} break; #line 1902 "expr.oxout.tab.c" } yystack.s_mark -= yym; yystate = *yystack.s_mark; yystack.l_mark -= yym; yym = yylhs[yyn]; if (yystate == 0 && yym == 0) { #if YYDEBUG if (yydebug) printf("%sdebug: after reduction, shifting from state 0 to\ state %d\n", YYPREFIX, YYFINAL); #endif yystate = YYFINAL; *++yystack.s_mark = YYFINAL; *++yystack.l_mark = yyval; if (yychar < 0) { yychar = YYLEX; if (yychar < 0) yychar = YYEOF; #if YYDEBUG if (yydebug) { if ((yys = yyname[YYTRANSLATE(yychar)]) == NULL) yys = yyname[YYUNDFTOKEN]; printf("%sdebug: state %d, reading %d (%s)\n", YYPREFIX, YYFINAL, yychar, yys); } #endif } if (yychar == YYEOF) goto yyaccept; goto yyloop; } if (((yyn = yygindex[yym]) != 0) && (yyn += yystate) >= 0 && yyn <= YYTABLESIZE && yycheck[yyn] == (YYINT) yystate) yystate = yytable[yyn]; else yystate = yydgoto[yym]; #if YYDEBUG if (yydebug) printf("%sdebug: after reduction, shifting from state %d \ to state %d\n", YYPREFIX, *yystack.s_mark, yystate); #endif if (yystack.s_mark >= yystack.s_last && yygrowstack(&yystack) == YYENOMEM) goto yyoverflow; *++yystack.s_mark = (YYINT) yystate; *++yystack.l_mark = yyval; goto yyloop; yyoverflow: YYERROR_CALL("yacc stack overflow"); yyabort: yyfreestack(&yystack); return (1); yyaccept: yyfreestack(&yystack); return (0); } Index: projects/netbsd-tests-upstream-01-2017/contrib/byacc =================================================================== --- projects/netbsd-tests-upstream-01-2017/contrib/byacc (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/contrib/byacc (revision 313399) Property changes on: projects/netbsd-tests-upstream-01-2017/contrib/byacc ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/contrib/byacc:r313244-313398 Index: projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_common.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_common.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_common.c (revision 313399) @@ -1,69 +1,45 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include -void -cpu_reset(void) -{ - uint32_t paddr; - bus_addr_t vaddr; - phandle_t node; - - if (rstmgr_warmreset() == 0) - goto end; - - node = OF_finddevice("rstmgr"); - if (node == -1) - goto end; - - if ((OF_getencprop(node, "reg", &paddr, sizeof(paddr))) > 0) { - if (bus_space_map(fdtbus_bs_tag, paddr, 0x8, 0, &vaddr) == 0) { - bus_space_write_4(fdtbus_bs_tag, vaddr, - RSTMGR_CTRL, CTRL_SWWARMRSTREQ); - } - } - -end: - while (1); -} Index: projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_machdep.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_machdep.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_machdep.c (revision 313399) @@ -1,101 +1,123 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ -#include "opt_ddb.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include + #include #include +#include #include #include +#include -vm_offset_t -platform_lastaddr(void) -{ +#include +#include - return (devmap_lastaddr()); -} +#include "platform_if.h" -void -platform_probe_and_attach(void) +static int +socfpga_devmap_init(platform_t plat) { -} - -void -platform_gpio_init(void) -{ - -} - -void -platform_late_init(void) -{ - -} - -int -platform_devmap_init(void) -{ - /* UART */ devmap_add_entry(0xffc00000, 0x100000); /* * USB OTG * * We use static device map for USB due to some bug in the Altera * which throws Translation Fault (P) exception on high load. * It might be caused due to some power save options being turned * on or something else. */ devmap_add_entry(0xffb00000, 0x100000); /* dwmmc */ devmap_add_entry(0xff700000, 0x100000); /* scu */ devmap_add_entry(0xfff00000, 0x100000); /* FPGA memory window, 256MB */ devmap_add_entry(0xd0000000, 0x10000000); return (0); } + +static void +socfpga_cpu_reset(platform_t plat) +{ + uint32_t paddr; + bus_addr_t vaddr; + phandle_t node; + + if (rstmgr_warmreset() == 0) + goto end; + + node = OF_finddevice("rstmgr"); + if (node == -1) + goto end; + + if ((OF_getencprop(node, "reg", &paddr, sizeof(paddr))) > 0) { + if (bus_space_map(fdtbus_bs_tag, paddr, 0x8, 0, &vaddr) == 0) { + bus_space_write_4(fdtbus_bs_tag, vaddr, + RSTMGR_CTRL, CTRL_SWWARMRSTREQ); + } + } + +end: + while (1); +} + +static platform_method_t socfpga_methods[] = { + PLATFORMMETHOD(platform_devmap_init, socfpga_devmap_init), + PLATFORMMETHOD(platform_cpu_reset, socfpga_cpu_reset), + +#ifdef SMP + PLATFORMMETHOD(platform_mp_setmaxid, socfpga_mp_setmaxid), + PLATFORMMETHOD(platform_mp_start_ap, socfpga_mp_start_ap), +#endif + + PLATFORMMETHOD_END, +}; + +FDT_PLATFORM_DEF(socfpga, "socfpga", 0, "altr,socfpga", 0); Index: projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.c (revision 313399) @@ -1,160 +1,165 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include "opt_platform.h" + #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include +#include +#include + #define SCU_PHYSBASE 0xFFFEC000 #define SCU_SIZE 0x100 #define SCU_CONTROL_REG 0x00 #define SCU_CONTROL_ENABLE (1 << 0) #define SCU_CONFIG_REG 0x04 #define SCU_CONFIG_REG_NCPU_MASK 0x03 #define SCU_CPUPOWER_REG 0x08 #define SCU_INV_TAGS_REG 0x0c #define SCU_DIAG_CONTROL 0x30 #define SCU_DIAG_DISABLE_MIGBIT (1 << 0) #define SCU_FILTER_START_REG 0x40 #define SCU_FILTER_END_REG 0x44 #define SCU_SECURE_ACCESS_REG 0x50 #define SCU_NONSECURE_ACCESS_REG 0x54 #define RSTMGR_PHYSBASE 0xFFD05000 #define RSTMGR_SIZE 0x100 #define MPUMODRST 0x10 #define MPUMODRST_CPU1 (1 << 1) #define RAM_PHYSBASE 0x0 #define RAM_SIZE 0x1000 extern char *mpentry_addr; static void socfpga_trampoline(void); static void socfpga_trampoline(void) { __asm __volatile( "ldr pc, 1f\n" ".globl mpentry_addr\n" "mpentry_addr:\n" "1: .space 4\n"); } void -platform_mp_setmaxid(void) +socfpga_mp_setmaxid(platform_t plat) { int hwcpu, ncpu; /* If we've already set this don't bother to do it again. */ if (mp_ncpus != 0) return; hwcpu = 2; ncpu = hwcpu; TUNABLE_INT_FETCH("hw.ncpu", &ncpu); if (ncpu < 1 || ncpu > hwcpu) ncpu = hwcpu; mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void -platform_mp_start_ap(void) +socfpga_mp_start_ap(platform_t plat) { bus_space_handle_t scu, rst, ram; int reg; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Couldn't map the SCU\n"); if (bus_space_map(fdtbus_bs_tag, RSTMGR_PHYSBASE, RSTMGR_SIZE, 0, &rst) != 0) panic("Couldn't map the reset manager (RSTMGR)\n"); if (bus_space_map(fdtbus_bs_tag, RAM_PHYSBASE, RAM_SIZE, 0, &ram) != 0) panic("Couldn't map the first physram page\n"); /* Invalidate SCU cache tags */ bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff); /* * Erratum ARM/MP: 764369 (problems with cache maintenance). * Setting the "disable-migratory bit" in the undocumented SCU * Diagnostic Control Register helps work around the problem. */ reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL); reg |= (SCU_DIAG_DISABLE_MIGBIT); bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, reg); /* Put CPU1 to reset state */ bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, MPUMODRST_CPU1); /* Enable the SCU, then clean the cache on this core */ reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG); reg |= (SCU_CONTROL_ENABLE); bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, reg); /* Set up trampoline code */ mpentry_addr = (char *)pmap_kextract((vm_offset_t)mpentry); bus_space_write_region_4(fdtbus_bs_tag, ram, 0, (uint32_t *)&socfpga_trampoline, 8); dcache_wbinv_poc_all(); /* Put CPU1 out from reset */ bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, 0); dsb(); sev(); bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); bus_space_unmap(fdtbus_bs_tag, rst, RSTMGR_SIZE); bus_space_unmap(fdtbus_bs_tag, ram, RAM_SIZE); } Index: projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.h =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.h (nonexistent) +++ projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.h (revision 313399) @@ -0,0 +1,34 @@ +/*- + * Copyright (c) 2017 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SOCFPGA_MP_H_ +#define _SOCFPGA_MP_H_ + +void socfpga_mp_setmaxid(platform_t); +void socfpga_mp_start_ap(platform_t); + +#endif /* _SOCFPGA_MP_H_ */ Property changes on: projects/netbsd-tests-upstream-01-2017/sys/arm/altera/socfpga/socfpga_mp.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: projects/netbsd-tests-upstream-01-2017/sys/arm/conf/SOCKIT.common =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm/conf/SOCKIT.common (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm/conf/SOCKIT.common (revision 313399) @@ -1,94 +1,96 @@ # # Kernel configuration for Terasic SoCKit (Altera Cyclone V SoC). # # For more information on this file, please read the config(5) manual page, # and/or the handbook section on Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ include "std.armv6" include "../altera/socfpga/std.socfpga" makeoptions MODULES_OVERRIDE="" makeoptions WERROR="-Werror" options SCHED_ULE # ULE scheduler +options PLATFORM # Platform based SoC +options PLATFORM_SMP options SMP # Enable multiple cores # NFS root from boopt/dhcp #options BOOTP #options BOOTP_NFSROOT #options BOOTP_COMPAT #options BOOTP_NFSV3 #options BOOTP_WIRED_TO=ue0 # Interrupt controller device gic options INTRNG # ARM MPCore timer device mpcore_timer # MMC/SD/SDIO Card slot support device mmc # mmc/sd bus device mmcsd # mmc/sd flash cards device dwmmc # Pseudo devices device loop device random device pty device md device gpio # USB support options USB_HOST_ALIGN=64 # Align usb buffers to cache line size. device usb #device musb device dwcotg device umass device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) device pass # Serial ports device uart device uart_ns8250 # I2C (TWSI) device iic device iicbus # SPI device spibus # Ethernet device ether device mii device smsc device smscphy device dwc device micphy # USB ethernet support, requires miibus device miibus device axe # ASIX Electronics USB Ethernet device bpf # Berkeley packet filter # Flattened Device Tree options FDT # Configure using FDT/DTB data Index: projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/exception.S =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/exception.S (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/exception.S (revision 313399) @@ -1,221 +1,223 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "assym.s" .text .macro save_registers el .if \el == 1 mov x18, sp sub sp, sp, #128 .endif sub sp, sp, #(TF_SIZE + 16) stp x29, x30, [sp, #(TF_SIZE)] stp x28, x29, [sp, #(TF_X + 28 * 8)] stp x26, x27, [sp, #(TF_X + 26 * 8)] stp x24, x25, [sp, #(TF_X + 24 * 8)] stp x22, x23, [sp, #(TF_X + 22 * 8)] stp x20, x21, [sp, #(TF_X + 20 * 8)] stp x18, x19, [sp, #(TF_X + 18 * 8)] stp x16, x17, [sp, #(TF_X + 16 * 8)] stp x14, x15, [sp, #(TF_X + 14 * 8)] stp x12, x13, [sp, #(TF_X + 12 * 8)] stp x10, x11, [sp, #(TF_X + 10 * 8)] stp x8, x9, [sp, #(TF_X + 8 * 8)] stp x6, x7, [sp, #(TF_X + 6 * 8)] stp x4, x5, [sp, #(TF_X + 4 * 8)] stp x2, x3, [sp, #(TF_X + 2 * 8)] stp x0, x1, [sp, #(TF_X + 0 * 8)] mrs x10, elr_el1 mrs x11, spsr_el1 + mrs x12, esr_el1 .if \el == 0 mrs x18, sp_el0 .endif - stp x10, x11, [sp, #(TF_ELR)] + str x10, [sp, #(TF_ELR)] + stp w11, w12, [sp, #(TF_SPSR)] stp x18, lr, [sp, #(TF_SP)] mrs x18, tpidr_el1 add x29, sp, #(TF_SIZE) .endm .macro restore_registers el .if \el == 1 msr daifset, #2 /* * Disable interrupts, x18 may change in the interrupt exception * handler. For EL0 exceptions, do_ast already did this. */ .endif ldp x18, lr, [sp, #(TF_SP)] ldp x10, x11, [sp, #(TF_ELR)] .if \el == 0 msr sp_el0, x18 .endif msr spsr_el1, x11 msr elr_el1, x10 ldp x0, x1, [sp, #(TF_X + 0 * 8)] ldp x2, x3, [sp, #(TF_X + 2 * 8)] ldp x4, x5, [sp, #(TF_X + 4 * 8)] ldp x6, x7, [sp, #(TF_X + 6 * 8)] ldp x8, x9, [sp, #(TF_X + 8 * 8)] ldp x10, x11, [sp, #(TF_X + 10 * 8)] ldp x12, x13, [sp, #(TF_X + 12 * 8)] ldp x14, x15, [sp, #(TF_X + 14 * 8)] ldp x16, x17, [sp, #(TF_X + 16 * 8)] .if \el == 0 /* * We only restore the callee saved registers when returning to * userland as they may have been updated by a system call or signal. */ ldp x18, x19, [sp, #(TF_X + 18 * 8)] ldp x20, x21, [sp, #(TF_X + 20 * 8)] ldp x22, x23, [sp, #(TF_X + 22 * 8)] ldp x24, x25, [sp, #(TF_X + 24 * 8)] ldp x26, x27, [sp, #(TF_X + 26 * 8)] ldp x28, x29, [sp, #(TF_X + 28 * 8)] .else ldr x29, [sp, #(TF_X + 29 * 8)] .endif .if \el == 0 add sp, sp, #(TF_SIZE + 16) .else mov sp, x18 mrs x18, tpidr_el1 .endif .endm .macro do_ast /* Disable interrupts */ mrs x19, daif 1: msr daifset, #2 /* Read the current thread flags */ ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */ ldr x2, [x1, #TD_FLAGS] /* Check if we have either bits set */ mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8) lsl x3, x3, #8 and x2, x2, x3 cbz x2, 2f /* Restore interrupts */ msr daif, x19 /* handle the ast */ mov x0, sp bl _C_LABEL(ast) /* Re-check for new ast scheduled */ b 1b 2: .endm ENTRY(handle_el1h_sync) save_registers 1 mov x0, sp bl do_el1h_sync restore_registers 1 eret END(handle_el1h_sync) ENTRY(handle_el1h_irq) save_registers 1 mov x0, sp bl intr_irq_handler restore_registers 1 eret END(handle_el1h_irq) ENTRY(handle_el1h_error) brk 0xf13 END(handle_el1h_error) ENTRY(handle_el0_sync) save_registers 0 mov x0, sp bl do_el0_sync do_ast restore_registers 0 eret END(handle_el0_sync) ENTRY(handle_el0_irq) save_registers 0 mov x0, sp bl intr_irq_handler do_ast restore_registers 0 eret END(handle_el0_irq) ENTRY(handle_el0_error) save_registers 0 mov x0, sp bl do_el0_error brk 0xf23 1: b 1b END(handle_el0_error) .macro vempty .align 7 brk 0xfff 1: b 1b .endm .macro vector name .align 7 b handle_\name .endm .align 11 .globl exception_vectors exception_vectors: vempty /* Synchronous EL1t */ vempty /* IRQ EL1t */ vempty /* FIQ EL1t */ vempty /* Error EL1t */ vector el1h_sync /* Synchronous EL1h */ vector el1h_irq /* IRQ EL1h */ vempty /* FIQ EL1h */ vector el1h_error /* Error EL1h */ vector el0_sync /* Synchronous 64-bit EL0 */ vector el0_irq /* IRQ 64-bit EL0 */ vempty /* FIQ 64-bit EL0 */ vector el0_error /* Error 64-bit EL0 */ vempty /* Synchronous 32-bit EL0 */ vempty /* IRQ 32-bit EL0 */ vempty /* FIQ 32-bit EL0 */ vempty /* Error 32-bit EL0 */ Index: projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/genassym.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/genassym.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/genassym.c (revision 313399) @@ -1,65 +1,66 @@ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(PCPU_SIZE, sizeof(struct pcpu)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); /* Size of pcb, rounded to keep stack alignment */ ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1)); ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT); ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_L0ADDR, offsetof(struct pcb, pcb_l0addr)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TF_SIZE, sizeof(struct trapframe)); ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); ASSYM(TF_ELR, offsetof(struct trapframe, tf_elr)); +ASSYM(TF_SPSR, offsetof(struct trapframe, tf_spsr)); ASSYM(TF_X, offsetof(struct trapframe, tf_x)); Index: projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/trap.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/trap.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm64/arm64/trap.c (revision 313399) @@ -1,434 +1,434 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #ifdef KDB #include #endif #include #include #include #include #include #include #include #include #include #ifdef KDTRACE_HOOKS #include #endif #ifdef VFP #include #endif #ifdef KDB #include #endif #ifdef DDB #include #endif extern register_t fsu_intr_fault; /* Called from exception.S */ void do_el1h_sync(struct trapframe *); void do_el0_sync(struct trapframe *); void do_el0_error(struct trapframe *); static void print_registers(struct trapframe *frame); int (*dtrace_invop_jump_addr)(struct trapframe *); static __inline void call_trapsignal(struct thread *td, int sig, int code, void *addr) { ksiginfo_t ksi; ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = code; ksi.ksi_addr = addr; trapsignal(td, &ksi); } int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct proc *p; register_t *ap; int nap; nap = 8; p = td->td_proc; ap = td->td_frame->tf_x; sa->code = td->td_frame->tf_x[8]; if (sa->code == SYS_syscall || sa->code == SYS___syscall) { sa->code = *ap++; nap--; } if (p->p_sysent->sv_mask) sa->code &= p->p_sysent->sv_mask; if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; memcpy(sa->args, ap, nap * sizeof(register_t)); if (sa->narg > nap) panic("ARM64TODO: Could we have more than 8 args?"); td->td_retval[0] = 0; td->td_retval[1] = 0; return (0); } #include "../../kern/subr_syscall.c" static void svc_handler(struct trapframe *frame) { struct syscall_args sa; struct thread *td; int error; td = curthread; error = syscallenter(td, &sa); syscallret(td, error, &sa); } static void data_abort(struct trapframe *frame, uint64_t esr, uint64_t far, int lower) { struct vm_map *map; struct thread *td; struct proc *p; struct pcb *pcb; vm_prot_t ftype; vm_offset_t va; int error, sig, ucode; /* * According to the ARMv8-A rev. A.g, B2.10.5 "Load-Exclusive * and Store-Exclusive instruction usage restrictions", state * of the exclusive monitors after data abort exception is unknown. */ clrex(); #ifdef KDB if (kdb_active) { kdb_reenter(); return; } #endif td = curthread; pcb = td->td_pcb; /* * Special case for fuswintr and suswintr. These can't sleep so * handle them early on in the trap handler. */ if (__predict_false(pcb->pcb_onfault == (vm_offset_t)&fsu_intr_fault)) { frame->tf_elr = pcb->pcb_onfault; return; } p = td->td_proc; if (lower) map = &p->p_vmspace->vm_map; else { /* The top bit tells us which range to use */ if ((far >> 63) == 1) { map = kernel_map; } else { map = &p->p_vmspace->vm_map; if (map == NULL) map = kernel_map; } } if (pmap_fault(map->pmap, esr, far) == KERN_SUCCESS) return; KASSERT(td->td_md.md_spinlock_count == 0, ("data abort with spinlock held")); if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) { print_registers(frame); printf(" far: %16lx\n", far); printf(" esr: %.8lx\n", esr); panic("data abort in critical section or under mutex"); } va = trunc_page(far); ftype = ((esr >> 6) & 1) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; /* Fault in the page. */ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); if (error != KERN_SUCCESS) { if (lower) { sig = SIGSEGV; if (error == KERN_PROTECTION_FAILURE) ucode = SEGV_ACCERR; else ucode = SEGV_MAPERR; call_trapsignal(td, sig, ucode, (void *)far); } else { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != 0) { frame->tf_x[0] = error; frame->tf_elr = pcb->pcb_onfault; return; } printf("Fatal data abort:\n"); print_registers(frame); printf(" far: %16lx\n", far); printf(" esr: %.8lx\n", esr); #ifdef KDB if (debugger_on_panic || kdb_active) if (kdb_trap(ESR_ELx_EXCEPTION(esr), 0, frame)) return; #endif panic("vm_fault failed: %lx", frame->tf_elr); } } if (lower) userret(td, frame); } static void print_registers(struct trapframe *frame) { u_int reg; for (reg = 0; reg < nitems(frame->tf_x); reg++) { printf(" %sx%d: %16lx\n", (reg < 10) ? " " : "", reg, frame->tf_x[reg]); } printf(" sp: %16lx\n", frame->tf_sp); printf(" lr: %16lx\n", frame->tf_lr); printf(" elr: %16lx\n", frame->tf_elr); - printf("spsr: %16lx\n", frame->tf_spsr); + printf("spsr: %8x\n", frame->tf_spsr); } void do_el1h_sync(struct trapframe *frame) { uint32_t exception; uint64_t esr, far; /* Read the esr register to get the exception details */ - esr = READ_SPECIALREG(esr_el1); + esr = frame->tf_esr; exception = ESR_ELx_EXCEPTION(esr); #ifdef KDTRACE_HOOKS if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, exception)) return; #endif CTR4(KTR_TRAP, "do_el1_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", curthread, esr, frame->tf_elr, frame); switch(exception) { case EXCP_FP_SIMD: case EXCP_TRAP_FP: #ifdef VFP if ((curthread->td_pcb->pcb_fpflags & PCB_FP_KERN) != 0) { vfp_restore_state(); } else #endif { print_registers(frame); printf(" esr: %.8lx\n", esr); panic("VFP exception in the kernel"); } break; case EXCP_INSN_ABORT: case EXCP_DATA_ABORT: far = READ_SPECIALREG(far_el1); intr_enable(); data_abort(frame, esr, far, 0); break; case EXCP_BRK: #ifdef KDTRACE_HOOKS if ((esr & ESR_ELx_ISS_MASK) == 0x40d && \ dtrace_invop_jump_addr != 0) { dtrace_invop_jump_addr(frame); break; } #endif /* FALLTHROUGH */ case EXCP_WATCHPT_EL1: case EXCP_SOFTSTP_EL1: #ifdef KDB kdb_trap(exception, 0, frame); #else panic("No debugger in kernel.\n"); #endif break; default: print_registers(frame); panic("Unknown kernel exception %x esr_el1 %lx\n", exception, esr); } } /* * The attempted execution of an instruction bit pattern that has no allocated * instruction results in an exception with an unknown reason. */ static void el0_excp_unknown(struct trapframe *frame, uint64_t far) { struct thread *td; td = curthread; call_trapsignal(td, SIGILL, ILL_ILLTRP, (void *)far); userret(td, frame); } void do_el0_sync(struct trapframe *frame) { struct thread *td; uint32_t exception; uint64_t esr, far; /* Check we have a sane environment when entering from userland */ KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS, ("Invalid pcpu address from userland: %p (tpidr %lx)", get_pcpu(), READ_SPECIALREG(tpidr_el1))); td = curthread; td->td_frame = frame; - esr = READ_SPECIALREG(esr_el1); + esr = frame->tf_esr; exception = ESR_ELx_EXCEPTION(esr); switch (exception) { case EXCP_UNKNOWN: case EXCP_INSN_ABORT_L: case EXCP_DATA_ABORT_L: case EXCP_DATA_ABORT: far = READ_SPECIALREG(far_el1); } intr_enable(); CTR4(KTR_TRAP, "do_el0_sync: curthread: %p, esr %lx, elr: %lx, frame: %p", curthread, esr, frame->tf_elr, frame); switch(exception) { case EXCP_FP_SIMD: case EXCP_TRAP_FP: #ifdef VFP vfp_restore_state(); #else panic("VFP exception in userland"); #endif break; case EXCP_SVC: svc_handler(frame); break; case EXCP_INSN_ABORT_L: case EXCP_DATA_ABORT_L: case EXCP_DATA_ABORT: data_abort(frame, esr, far, 1); break; case EXCP_UNKNOWN: el0_excp_unknown(frame, far); break; case EXCP_SP_ALIGN: call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp); userret(td, frame); break; case EXCP_PC_ALIGN: call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr); userret(td, frame); break; case EXCP_BRK: call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr); userret(td, frame); break; case EXCP_MSR: call_trapsignal(td, SIGILL, ILL_PRVOPC, (void *)frame->tf_elr); userret(td, frame); break; case EXCP_SOFTSTP_EL0: td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; WRITE_SPECIALREG(MDSCR_EL1, READ_SPECIALREG(MDSCR_EL1) & ~DBG_MDSCR_SS); call_trapsignal(td, SIGTRAP, TRAP_TRACE, (void *)frame->tf_elr); userret(td, frame); break; default: call_trapsignal(td, SIGBUS, BUS_OBJERR, (void *)frame->tf_elr); userret(td, frame); break; } KASSERT((curthread->td_pcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Kernel VFP flags set while entering userspace")); KASSERT( curthread->td_pcb->pcb_fpusaved == &curthread->td_pcb->pcb_fpustate, ("Kernel VFP state in use when entering userspace")); } void do_el0_error(struct trapframe *frame) { panic("ARM64TODO: do_el0_error"); } Index: projects/netbsd-tests-upstream-01-2017/sys/arm64/include/frame.h =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/arm64/include/frame.h (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/arm64/include/frame.h (revision 313399) @@ -1,74 +1,75 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_FRAME_H_ #define _MACHINE_FRAME_H_ #ifndef LOCORE #include #include /* * NOTE: keep this structure in sync with struct reg and struct mcontext. */ struct trapframe { uint64_t tf_sp; uint64_t tf_lr; uint64_t tf_elr; - uint64_t tf_spsr; + uint32_t tf_spsr; + uint32_t tf_esr; uint64_t tf_x[30]; }; struct arm64_frame { struct arm64_frame *f_frame; u_long f_retaddr; }; /* * Signal frame, pushedonto the user stack */ struct sigframe { siginfo_t sf_si; /* actual saved siginfo */ ucontext_t sf_uc; /* actual saved ucontext */ }; /* * There is no fixed frame layout, other than to be 16-byte aligned */ struct frame { int dummy; }; #endif /* !LOCORE */ #endif /* !_MACHINE_FRAME_H_ */ Index: projects/netbsd-tests-upstream-01-2017/sys/boot/efi/libefi/Makefile =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/boot/efi/libefi/Makefile (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/boot/efi/libefi/Makefile (revision 313399) @@ -1,52 +1,53 @@ # $FreeBSD$ .include .if ${MK_FORTH} != "no" CFLAGS+= -DBOOT_FORTH .include "${.CURDIR}/../../Makefile.ficl" .endif LIB= efi INTERNALLIB= WARNS?= 2 SRCS= delay.c devpath.c efi_console.c efinet.c efipart.c env.c errno.c \ handles.c wchar.c libefi.c .if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386" SRCS+= time.c .elif ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "arm" SRCS+= time_event.c .endif # We implement a slightly non-standard %S in that it always takes a # CHAR16 that's common in UEFI-land instead of a wchar_t. This only # seems to matter on arm64 where wchar_t defaults to an int instead # of a short. There's no good cast to use here so just ignore the # warnings for now. CWARNFLAGS.efinet.c+= -Wno-format +CWARNFLAGS.efipart.c+= -Wno-format CWARNFLAGS.env.c+= -Wno-format .if ${MACHINE_CPUARCH} == "aarch64" CFLAGS+= -msoft-float -mgeneral-regs-only .endif .if ${MACHINE_ARCH} == "amd64" CFLAGS+= -fPIC -mno-red-zone .endif CFLAGS+= -I${.CURDIR}/../include CFLAGS+= -I${.CURDIR}/../include/${MACHINE} CFLAGS+= -I${.CURDIR}/../../../../lib/libstand # Pick up the bootstrap header for some interface items CFLAGS+= -I${.CURDIR}/../../common # Handle FreeBSD specific %b and %D printf format specifiers CFLAGS+= ${FORMAT_EXTENSIONS} # Do not use TERM_EMU on arm and arm64 as it doesn't behave well with serial console .if ${MACHINE_CPUARCH} != "arm" && ${MACHINE_CPUARCH} != "aarch64" CFLAGS+= -DTERM_EMU .endif .include Index: projects/netbsd-tests-upstream-01-2017/sys/kern/init_main.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/init_main.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/init_main.c (revision 313399) @@ -1,858 +1,858 @@ /*- * Copyright (c) 1995 Terrence R. Lambert * All rights reserved. * * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_init_path.h" #include "opt_verbose_sysinit.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void mi_startup(void); /* Should be elsewhere */ /* Components of the first process -- never freed. */ static struct session session0; static struct pgrp pgrp0; struct proc proc0; -struct thread0_storage thread0_st __aligned(16); +struct thread0_storage thread0_st __aligned(32); struct vmspace vmspace0; struct proc *initproc; #ifndef BOOTHOWTO #define BOOTHOWTO 0 #endif int boothowto = BOOTHOWTO; /* initialized so that it can be patched */ SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0, "Boot control flags, passed from loader"); #ifndef BOOTVERBOSE #define BOOTVERBOSE 0 #endif int bootverbose = BOOTVERBOSE; SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0, "Control the output of verbose kernel messages"); #ifdef INVARIANTS FEATURE(invariants, "Kernel compiled with INVARIANTS, may affect performance"); #endif /* * This ensures that there is at least one entry so that the sysinit_set * symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never * executed. */ SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL); /* * The sysinit table itself. Items are checked off as the are run. * If we want to register new sysinit types, add them to newsysinit. */ SET_DECLARE(sysinit_set, struct sysinit); struct sysinit **sysinit, **sysinit_end; struct sysinit **newsysinit, **newsysinit_end; /* * Merge a new sysinit set into the current set, reallocating it if * necessary. This can only be called after malloc is running. */ void sysinit_add(struct sysinit **set, struct sysinit **set_end) { struct sysinit **newset; struct sysinit **sipp; struct sysinit **xipp; int count; count = set_end - set; if (newsysinit) count += newsysinit_end - newsysinit; else count += sysinit_end - sysinit; newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT); if (newset == NULL) panic("cannot malloc for sysinit"); xipp = newset; if (newsysinit) for (sipp = newsysinit; sipp < newsysinit_end; sipp++) *xipp++ = *sipp; else for (sipp = sysinit; sipp < sysinit_end; sipp++) *xipp++ = *sipp; for (sipp = set; sipp < set_end; sipp++) *xipp++ = *sipp; if (newsysinit) free(newsysinit, M_TEMP); newsysinit = newset; newsysinit_end = newset + count; } #if defined (DDB) && defined(VERBOSE_SYSINIT) static const char * symbol_name(vm_offset_t va, db_strategy_t strategy) { const char *name; c_db_sym_t sym; db_expr_t offset; if (va == 0) return (NULL); sym = db_search_symbol(va, strategy, &offset); if (offset != 0) return (NULL); db_symbol_values(sym, &name, NULL); return (name); } #endif /* * System startup; initialize the world, create process 0, mount root * filesystem, and fork to create init and pagedaemon. Most of the * hard work is done in the lower-level initialization routines including * startup(), which does memory initialization and autoconfiguration. * * This allows simple addition of new kernel subsystems that require * boot time initialization. It also allows substitution of subsystem * (for instance, a scheduler, kernel profiler, or VM system) by object * module. Finally, it allows for optional "kernel threads". */ void mi_startup(void) { struct sysinit **sipp; /* system initialization*/ struct sysinit **xipp; /* interior loop of sort*/ struct sysinit *save; /* bubble*/ #if defined(VERBOSE_SYSINIT) int last; int verbose; #endif if (boothowto & RB_VERBOSE) bootverbose++; if (sysinit == NULL) { sysinit = SET_BEGIN(sysinit_set); sysinit_end = SET_LIMIT(sysinit_set); } restart: /* * Perform a bubble sort of the system initialization objects by * their subsystem (primary key) and order (secondary key). */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { for (xipp = sipp + 1; xipp < sysinit_end; xipp++) { if ((*sipp)->subsystem < (*xipp)->subsystem || ((*sipp)->subsystem == (*xipp)->subsystem && (*sipp)->order <= (*xipp)->order)) continue; /* skip*/ save = *sipp; *sipp = *xipp; *xipp = save; } } #if defined(VERBOSE_SYSINIT) last = SI_SUB_COPYRIGHT; verbose = 0; #if !defined(DDB) printf("VERBOSE_SYSINIT: DDB not enabled, symbol lookups disabled.\n"); #endif #endif /* * Traverse the (now) ordered list of system initialization tasks. * Perform each task, and continue on to the next task. */ for (sipp = sysinit; sipp < sysinit_end; sipp++) { if ((*sipp)->subsystem == SI_SUB_DUMMY) continue; /* skip dummy task(s)*/ if ((*sipp)->subsystem == SI_SUB_DONE) continue; #if defined(VERBOSE_SYSINIT) if ((*sipp)->subsystem > last) { verbose = 1; last = (*sipp)->subsystem; printf("subsystem %x\n", last); } if (verbose) { #if defined(DDB) const char *func, *data; func = symbol_name((vm_offset_t)(*sipp)->func, DB_STGY_PROC); data = symbol_name((vm_offset_t)(*sipp)->udata, DB_STGY_ANY); if (func != NULL && data != NULL) printf(" %s(&%s)... ", func, data); else if (func != NULL) printf(" %s(%p)... ", func, (*sipp)->udata); else #endif printf(" %p(%p)... ", (*sipp)->func, (*sipp)->udata); } #endif /* Call function */ (*((*sipp)->func))((*sipp)->udata); #if defined(VERBOSE_SYSINIT) if (verbose) printf("done.\n"); #endif /* Check off the one we're just done */ (*sipp)->subsystem = SI_SUB_DONE; /* Check if we've installed more sysinit items via KLD */ if (newsysinit != NULL) { if (sysinit != SET_BEGIN(sysinit_set)) free(sysinit, M_TEMP); sysinit = newsysinit; sysinit_end = newsysinit_end; newsysinit = NULL; newsysinit_end = NULL; goto restart; } } mtx_assert(&Giant, MA_OWNED | MA_NOTRECURSED); mtx_unlock(&Giant); /* * Now hand over this thread to swapper. */ swapper(); /* NOTREACHED*/ } static void print_caddr_t(void *data) { printf("%s", (char *)data); } static void print_version(void *data __unused) { int len; /* Strip a trailing newline from version. */ len = strlen(version); while (len > 0 && version[len - 1] == '\n') len--; printf("%.*s %s\n", len, version, machine); printf("%s\n", compiler_version); } SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t, copyright); SYSINIT(trademark, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t, trademark); SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_THIRD, print_version, NULL); #ifdef WITNESS static char wit_warn[] = "WARNING: WITNESS option enabled, expect reduced performance.\n"; SYSINIT(witwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 1, print_caddr_t, wit_warn); SYSINIT(witwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 1, print_caddr_t, wit_warn); #endif #ifdef DIAGNOSTIC static char diag_warn[] = "WARNING: DIAGNOSTIC option enabled, expect reduced performance.\n"; SYSINIT(diagwarn, SI_SUB_COPYRIGHT, SI_ORDER_THIRD + 2, print_caddr_t, diag_warn); SYSINIT(diagwarn2, SI_SUB_LAST, SI_ORDER_THIRD + 2, print_caddr_t, diag_warn); #endif static int null_fetch_syscall_args(struct thread *td __unused, struct syscall_args *sa __unused) { panic("null_fetch_syscall_args"); } static void null_set_syscall_retval(struct thread *td __unused, int error __unused) { panic("null_set_syscall_retval"); } struct sysentvec null_sysvec = { .sv_size = 0, .sv_table = NULL, .sv_mask = 0, .sv_errsize = 0, .sv_errtbl = NULL, .sv_transtrap = NULL, .sv_fixup = NULL, .sv_sendsig = NULL, .sv_sigcode = NULL, .sv_szsigcode = NULL, .sv_name = "null", .sv_coredump = NULL, .sv_imgact_try = NULL, .sv_minsigstksz = 0, .sv_pagesize = PAGE_SIZE, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_psstrings = PS_STRINGS, .sv_stackprot = VM_PROT_ALL, .sv_copyout_strings = NULL, .sv_setregs = NULL, .sv_fixlimit = NULL, .sv_maxssiz = NULL, .sv_flags = 0, .sv_set_syscall_retval = null_set_syscall_retval, .sv_fetch_syscall_args = null_fetch_syscall_args, .sv_syscallnames = NULL, .sv_schedtail = NULL, .sv_thread_detach = NULL, .sv_trap = NULL, }; /* * The two following SYSINIT's are proc0 specific glue code. I am not * convinced that they can not be safely combined, but their order of * operation has been maintained as the same as the original init_main.c * for right now. */ /* ARGSUSED*/ static void proc0_init(void *dummy __unused) { struct proc *p; struct thread *td; struct ucred *newcred; vm_paddr_t pageablemem; int i; GIANT_REQUIRED; p = &proc0; td = &thread0; /* * Initialize magic number and osrel. */ p->p_magic = P_MAGIC; p->p_osrel = osreldate; /* * Initialize thread and process structures. */ procinit(); /* set up proc zone */ threadinit(); /* set up UMA zones */ /* * Initialise scheduler resources. * Add scheduler specific parts to proc, thread as needed. */ schedinit(); /* scheduler gets its house in order */ /* * Create process 0 (the swapper). */ LIST_INSERT_HEAD(&allproc, p, p_list); LIST_INSERT_HEAD(PIDHASH(0), p, p_hash); mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); p->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist); pgrp0.pg_session = &session0; mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF); refcount_init(&session0.s_count, 1); session0.s_leader = p; p->p_sysent = &null_sysvec; p->p_flag = P_SYSTEM | P_INMEM | P_KPROC; p->p_flag2 = 0; p->p_state = PRS_NORMAL; p->p_klist = knlist_alloc(&p->p_mtx); STAILQ_INIT(&p->p_ktr); p->p_nice = NZERO; /* pid_max cannot be greater than PID_MAX */ td->td_tid = PID_MAX + 1; LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); td->td_state = TDS_RUNNING; td->td_pri_class = PRI_TIMESHARE; td->td_user_pri = PUSER; td->td_base_user_pri = PUSER; td->td_lend_user_pri = PRI_MAX; td->td_priority = PVM; td->td_base_pri = PVM; td->td_oncpu = curcpu; td->td_flags = TDF_INMEM; td->td_pflags = TDP_KTHREAD; td->td_cpuset = cpuset_thread0(); vm_domain_policy_init(&td->td_vm_dom_policy); vm_domain_policy_set(&td->td_vm_dom_policy, VM_POLICY_NONE, -1); vm_domain_policy_init(&p->p_vm_dom_policy); vm_domain_policy_set(&p->p_vm_dom_policy, VM_POLICY_NONE, -1); prison0_init(); p->p_peers = 0; p->p_leader = p; p->p_reaper = p; LIST_INIT(&p->p_reaplist); strncpy(p->p_comm, "kernel", sizeof (p->p_comm)); strncpy(td->td_name, "swapper", sizeof (td->td_name)); callout_init_mtx(&p->p_itcallout, &p->p_mtx, 0); callout_init_mtx(&p->p_limco, &p->p_mtx, 0); callout_init(&td->td_slpcallout, 1); /* Create credentials. */ newcred = crget(); newcred->cr_ngroups = 1; /* group 0 */ newcred->cr_uidinfo = uifind(0); newcred->cr_ruidinfo = uifind(0); newcred->cr_prison = &prison0; newcred->cr_loginclass = loginclass_find("default"); proc_set_cred_init(p, newcred); #ifdef AUDIT audit_cred_kproc0(newcred); #endif #ifdef MAC mac_cred_create_swapper(newcred); #endif /* Create sigacts. */ p->p_sigacts = sigacts_alloc(); /* Initialize signal state for process 0. */ siginit(&proc0); /* Create the file descriptor table. */ p->p_fd = fdinit(NULL, false); p->p_fdtol = NULL; /* Create the limits structures. */ p->p_limit = lim_alloc(); for (i = 0; i < RLIM_NLIMITS; i++) p->p_limit->pl_rlimit[i].rlim_cur = p->p_limit->pl_rlimit[i].rlim_max = RLIM_INFINITY; p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; p->p_limit->pl_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; /* Cast to avoid overflow on i386/PAE. */ pageablemem = ptoa((vm_paddr_t)vm_cnt.v_free_count); p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = pageablemem; p->p_cpulimit = RLIM_INFINITY; PROC_LOCK(p); thread_cow_get_proc(td, p); PROC_UNLOCK(p); /* Initialize resource accounting structures. */ racct_create(&p->p_racct); p->p_stats = pstats_alloc(); /* Allocate a prototype map so we have something to fork. */ p->p_vmspace = &vmspace0; vmspace0.vm_refcnt = 1; pmap_pinit0(vmspace_pmap(&vmspace0)); /* * proc0 is not expected to enter usermode, so there is no special * handling for sv_minuser here, like is done for exec_new_vmspace(). */ vm_map_init(&vmspace0.vm_map, vmspace_pmap(&vmspace0), p->p_sysent->sv_minuser, p->p_sysent->sv_maxuser); /* * Call the init and ctor for the new thread and proc. We wait * to do this until all other structures are fairly sane. */ EVENTHANDLER_INVOKE(process_init, p); EVENTHANDLER_INVOKE(thread_init, td); EVENTHANDLER_INVOKE(process_ctor, p); EVENTHANDLER_INVOKE(thread_ctor, td); /* * Charge root for one process. */ (void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0); PROC_LOCK(p); racct_add_force(p, RACCT_NPROC, 1); PROC_UNLOCK(p); } SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL); /* ARGSUSED*/ static void proc0_post(void *dummy __unused) { struct timespec ts; struct proc *p; struct rusage ru; struct thread *td; /* * Now we can look at the time, having had a chance to verify the * time from the filesystem. Pretend that proc0 started now. */ sx_slock(&allproc_lock); FOREACH_PROC_IN_SYSTEM(p) { microuptime(&p->p_stats->p_start); PROC_STATLOCK(p); rufetch(p, &ru); /* Clears thread stats */ PROC_STATUNLOCK(p); p->p_rux.rux_runtime = 0; p->p_rux.rux_uticks = 0; p->p_rux.rux_sticks = 0; p->p_rux.rux_iticks = 0; FOREACH_THREAD_IN_PROC(p, td) { td->td_runtime = 0; } } sx_sunlock(&allproc_lock); PCPU_SET(switchtime, cpu_ticks()); PCPU_SET(switchticks, ticks); /* * Give the ``random'' number generator a thump. */ nanotime(&ts); srandom(ts.tv_sec ^ ts.tv_nsec); } SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL); static void random_init(void *dummy __unused) { /* * After CPU has been started we have some randomness on most * platforms via get_cyclecount(). For platforms that don't * we will reseed random(9) in proc0_post() as well. */ srandom(get_cyclecount()); } SYSINIT(random, SI_SUB_RANDOM, SI_ORDER_FIRST, random_init, NULL); /* *************************************************************************** **** **** The following SYSINIT's and glue code should be moved to the **** respective files on a per subsystem basis. **** *************************************************************************** */ /* * List of paths to try when searching for "init". */ static char init_path[MAXPATHLEN] = #ifdef INIT_PATH __XSTRING(INIT_PATH); #else "/sbin/init:/sbin/oinit:/sbin/init.bak:/rescue/init"; #endif SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0, "Path used to search the init process"); /* * Shutdown timeout of init(8). * Unused within kernel, but used to control init(8), hence do not remove. */ #ifndef INIT_SHUTDOWN_TIMEOUT #define INIT_SHUTDOWN_TIMEOUT 120 #endif static int init_shutdown_timeout = INIT_SHUTDOWN_TIMEOUT; SYSCTL_INT(_kern, OID_AUTO, init_shutdown_timeout, CTLFLAG_RW, &init_shutdown_timeout, 0, "Shutdown timeout of init(8). " "Unused within kernel, but used to control init(8)"); /* * Start the initial user process; try exec'ing each pathname in init_path. * The program is invoked with one argument containing the boot flags. */ static void start_init(void *dummy) { vm_offset_t addr; struct execve_args args; int options, error; char *var, *path, *next, *s; char *ucp, **uap, *arg0, *arg1; struct thread *td; struct proc *p; mtx_lock(&Giant); GIANT_REQUIRED; td = curthread; p = td->td_proc; vfs_mountroot(); /* Wipe GELI passphrase from the environment. */ kern_unsetenv("kern.geom.eli.passphrase"); /* * Need just enough stack to hold the faked-up "execve()" arguments. */ addr = p->p_sysent->sv_usrstack - PAGE_SIZE; if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE, 0, VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0) panic("init: couldn't allocate argument space"); p->p_vmspace->vm_maxsaddr = (caddr_t)addr; p->p_vmspace->vm_ssize = 1; if ((var = kern_getenv("init_path")) != NULL) { strlcpy(init_path, var, sizeof(init_path)); freeenv(var); } for (path = init_path; *path != '\0'; path = next) { while (*path == ':') path++; if (*path == '\0') break; for (next = path; *next != '\0' && *next != ':'; next++) /* nothing */ ; if (bootverbose) printf("start_init: trying %.*s\n", (int)(next - path), path); /* * Move out the boot flag argument. */ options = 0; ucp = (char *)p->p_sysent->sv_usrstack; (void)subyte(--ucp, 0); /* trailing zero */ if (boothowto & RB_SINGLE) { (void)subyte(--ucp, 's'); options = 1; } #ifdef notyet if (boothowto & RB_FASTBOOT) { (void)subyte(--ucp, 'f'); options = 1; } #endif #ifdef BOOTCDROM (void)subyte(--ucp, 'C'); options = 1; #endif if (options == 0) (void)subyte(--ucp, '-'); (void)subyte(--ucp, '-'); /* leading hyphen */ arg1 = ucp; /* * Move out the file name (also arg 0). */ (void)subyte(--ucp, 0); for (s = next - 1; s >= path; s--) (void)subyte(--ucp, *s); arg0 = ucp; /* * Move out the arg pointers. */ uap = (char **)rounddown2((intptr_t)ucp, sizeof(intptr_t)); (void)suword((caddr_t)--uap, (long)0); /* terminator */ (void)suword((caddr_t)--uap, (long)(intptr_t)arg1); (void)suword((caddr_t)--uap, (long)(intptr_t)arg0); /* * Point at the arguments. */ args.fname = arg0; args.argv = uap; args.envv = NULL; /* * Now try to exec the program. If can't for any reason * other than it doesn't exist, complain. * * Otherwise, return via fork_trampoline() all the way * to user mode as init! */ if ((error = sys_execve(td, &args)) == 0) { mtx_unlock(&Giant); return; } if (error != ENOENT) printf("exec %.*s: error %d\n", (int)(next - path), path, error); } printf("init: not found in path %s\n", init_path); panic("no init"); } /* * Like kproc_create(), but runs in its own address space. * We do this early to reserve pid 1. * * Note special case - do not make it runnable yet. Other work * in progress will change this more. */ static void create_init(const void *udata __unused) { struct fork_req fr; struct ucred *newcred, *oldcred; struct thread *td; int error; bzero(&fr, sizeof(fr)); fr.fr_flags = RFFDG | RFPROC | RFSTOPPED; fr.fr_procp = &initproc; error = fork1(&thread0, &fr); if (error) panic("cannot fork init: %d\n", error); KASSERT(initproc->p_pid == 1, ("create_init: initproc->p_pid != 1")); /* divorce init's credentials from the kernel's */ newcred = crget(); sx_xlock(&proctree_lock); PROC_LOCK(initproc); initproc->p_flag |= P_SYSTEM | P_INMEM; initproc->p_treeflag |= P_TREE_REAPER; LIST_INSERT_HEAD(&initproc->p_reaplist, &proc0, p_reapsibling); oldcred = initproc->p_ucred; crcopy(newcred, oldcred); #ifdef MAC mac_cred_create_init(newcred); #endif #ifdef AUDIT audit_cred_proc1(newcred); #endif proc_set_cred(initproc, newcred); td = FIRST_THREAD_IN_PROC(initproc); crfree(td->td_ucred); td->td_ucred = crhold(initproc->p_ucred); PROC_UNLOCK(initproc); sx_xunlock(&proctree_lock); crfree(oldcred); cpu_fork_kthread_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL); } SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL); /* * Make it runnable now. */ static void kick_init(const void *udata __unused) { struct thread *td; td = FIRST_THREAD_IN_PROC(initproc); thread_lock(td); TD_SET_CAN_RUN(td); sched_add(td, SRQ_BORING); thread_unlock(td); } SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_MIDDLE, kick_init, NULL); Index: projects/netbsd-tests-upstream-01-2017/sys/kern/kern_mutex.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/kern_mutex.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/kern_mutex.c (revision 313399) @@ -1,1144 +1,1108 @@ /*- * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $ */ /* * Machine independent bits of mutex implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_adaptive_mutexes.h" #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) #define ADAPTIVE_MUTEXES #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DEFINE( , , lock, failed); #endif /* * Return the mutex address when the lock cookie address is provided. * This functionality assumes that struct mtx* have a member named mtx_lock. */ #define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) /* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED) static void assert_mtx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_mtx(const struct lock_object *lock); #endif static void lock_mtx(struct lock_object *lock, uintptr_t how); static void lock_spin(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_mtx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_mtx(struct lock_object *lock); static uintptr_t unlock_spin(struct lock_object *lock); /* * Lock classes for sleep and spin mutexes. */ struct lock_class lock_class_mtx_sleep = { .lc_name = "sleep mutex", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_mtx, .lc_unlock = unlock_mtx, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; struct lock_class lock_class_mtx_spin = { .lc_name = "spin mutex", .lc_flags = LC_SPINLOCK | LC_RECURSABLE, .lc_assert = assert_mtx, #ifdef DDB .lc_ddb_show = db_show_mtx, #endif .lc_lock = lock_spin, .lc_unlock = unlock_spin, #ifdef KDTRACE_HOOKS .lc_owner = owner_mtx, #endif }; #ifdef ADAPTIVE_MUTEXES static SYSCTL_NODE(_debug, OID_AUTO, mtx, CTLFLAG_RD, NULL, "mtx debugging"); -static struct lock_delay_config __read_mostly mtx_delay = { - .initial = 1000, - .step = 500, - .min = 100, - .max = 5000, -}; +static struct lock_delay_config __read_mostly mtx_delay; -SYSCTL_INT(_debug_mtx, OID_AUTO, delay_initial, CTLFLAG_RW, &mtx_delay.initial, +SYSCTL_INT(_debug_mtx, OID_AUTO, delay_base, CTLFLAG_RW, &mtx_delay.base, 0, ""); -SYSCTL_INT(_debug_mtx, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_delay.step, - 0, ""); -SYSCTL_INT(_debug_mtx, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_delay.min, - 0, ""); SYSCTL_INT(_debug_mtx, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_delay.max, 0, ""); -static void -mtx_delay_sysinit(void *dummy) -{ - - mtx_delay.initial = mp_ncpus * 25; - mtx_delay.step = (mp_ncpus * 25) / 2; - mtx_delay.min = mp_ncpus * 5; - mtx_delay.max = mp_ncpus * 25 * 10; -} -LOCK_DELAY_SYSINIT(mtx_delay_sysinit); +LOCK_DELAY_SYSINIT_DEFAULT(mtx_delay); #endif static SYSCTL_NODE(_debug, OID_AUTO, mtx_spin, CTLFLAG_RD, NULL, "mtx spin debugging"); -static struct lock_delay_config __read_mostly mtx_spin_delay = { - .initial = 1000, - .step = 500, - .min = 100, - .max = 5000, -}; +static struct lock_delay_config __read_mostly mtx_spin_delay; -SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_initial, CTLFLAG_RW, - &mtx_spin_delay.initial, 0, ""); -SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_step, CTLFLAG_RW, &mtx_spin_delay.step, - 0, ""); -SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_min, CTLFLAG_RW, &mtx_spin_delay.min, - 0, ""); -SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, &mtx_spin_delay.max, - 0, ""); +SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_base, CTLFLAG_RW, + &mtx_spin_delay.base, 0, ""); +SYSCTL_INT(_debug_mtx_spin, OID_AUTO, delay_max, CTLFLAG_RW, + &mtx_spin_delay.max, 0, ""); -static void -mtx_spin_delay_sysinit(void *dummy) -{ - - mtx_spin_delay.initial = mp_ncpus * 25; - mtx_spin_delay.step = (mp_ncpus * 25) / 2; - mtx_spin_delay.min = mp_ncpus * 5; - mtx_spin_delay.max = mp_ncpus * 25 * 10; -} -LOCK_DELAY_SYSINIT(mtx_spin_delay_sysinit); +LOCK_DELAY_SYSINIT_DEFAULT(mtx_spin_delay); /* * System-wide mutexes */ struct mtx blocked_lock; struct mtx Giant; void assert_mtx(const struct lock_object *lock, int what) { mtx_assert((const struct mtx *)lock, what); } void lock_mtx(struct lock_object *lock, uintptr_t how) { mtx_lock((struct mtx *)lock); } void lock_spin(struct lock_object *lock, uintptr_t how) { panic("spin locks can only use msleep_spin"); } uintptr_t unlock_mtx(struct lock_object *lock) { struct mtx *m; m = (struct mtx *)lock; mtx_assert(m, MA_OWNED | MA_NOTRECURSED); mtx_unlock(m); return (0); } uintptr_t unlock_spin(struct lock_object *lock) { panic("spin locks can only use msleep_spin"); } #ifdef KDTRACE_HOOKS int owner_mtx(const struct lock_object *lock, struct thread **owner) { const struct mtx *m; uintptr_t x; m = (const struct mtx *)lock; x = m->mtx_lock; *owner = (struct thread *)(x & ~MTX_FLAGMASK); return (x != MTX_UNOWNED); } #endif /* * Function versions of the inlined __mtx_* macros. These are used by * modules and can also be called from assembly language if needed. */ void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; v = MTX_UNOWNED; if (!_mtx_obtain_lock_fetch(m, &v, tid)) _mtx_lock_sleep(m, v, tid, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, 0, 0, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_sleep(c, opts, file, line); TD_LOCKS_DEC(curthread); } void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_lock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); __mtx_lock_spin(m, curthread, opts, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); } int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_trylock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); KASSERT((opts & MTX_RECURSE) == 0, ("mtx_trylock_spin: unsupp. opt MTX_RECURSE on mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); if (__mtx_trylock_spin(m, curthread, opts, file, line)) { LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); return (1); } LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line); return (0); } void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("mtx_unlock_spin() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file, line); mtx_assert(m, MA_OWNED); __mtx_unlock_spin(m); } /* * The important part of mtx_trylock{,_flags}() * Tries to acquire lock `m.' If this function is called on a mutex that * is already owned, it will recursively acquire the lock. */ int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int rval; if (SCHEDULER_STOPPED()) return (1); m = mtxlock2mtx(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0)) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); rval = 1; } else rval = _mtx_obtain_lock(m, (uintptr_t)curthread); opts &= ~MTX_RECURSE; LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line); if (rval) { WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); TD_LOCKS_INC(curthread); if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); } return (rval); } /* * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; #ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; #endif #ifdef KTR int cont_logged = 0; #endif #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #if defined(ADAPTIVE_MUTEXES) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_MUTEXES) lock_delay_arg_init(&lda, &mtx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif m = mtxlock2mtx(c); if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); opts &= ~MTX_RECURSE; m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); return; } opts &= ~MTX_RECURSE; #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef ADAPTIVE_MUTEXES /* * If the owner is running on another CPU, spin until the * owner stops running or the state of the lock changes. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&m->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); do { lock_delay(&lda); v = MTX_READ_VALUE(m); owner = lv_mtx_owner(v); } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); continue; } #endif ts = turnstile_trywait(&m->lock_object); v = MTX_READ_VALUE(m); /* * Check if the lock has been released while spinning for * the turnstile chain lock. */ if (v == MTX_UNOWNED) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_MUTEXES /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ owner = lv_mtx_owner(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } #endif /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. */ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { turnstile_cancel(ts); v = MTX_READ_VALUE(m); continue; } /* * We definitely must sleep for this lock. */ mtx_assert(m, MA_NOTOWNED); #ifdef KTR if (!cont_logged) { CTR6(KTR_CONTENTION, "contention: %p at %s:%d wants %s, taken by %s:%d", (void *)tid, file, line, m->lock_object.lo_name, WITNESS_FILE(&m->lock_object), WITNESS_LINE(&m->lock_object)); cont_logged = 1; } #endif /* * Block on the turnstile. */ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&m->lock_object); #endif turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&m->lock_object); sleep_cnt++; #endif v = MTX_READ_VALUE(m); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&m->lock_object); #endif #ifdef KTR if (cont_logged) { CTR4(KTR_CONTENTION, "contention end: %s acquired by %p at %s:%d", m->lock_object.lo_name, (void *)tid, file, line); } #endif LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested, waittime, file, line); #ifdef KDTRACE_HOOKS if (sleep_time) LOCKSTAT_RECORD1(adaptive__block, m, sleep_time); /* * Only record the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time); #endif } static void _mtx_lock_spin_failed(struct mtx *m) { struct thread *td; td = mtx_owner(m); /* If the mutex is unlocked, try again. */ if (td == NULL) return; printf( "spin lock %p (%s) held by %p (tid %d) too long\n", m, m->lock_object.lo_name, td, td->td_tid); #ifdef WITNESS witness_display_spinlock(&m->lock_object, td, printf); #endif panic("spin lock held too long"); } #ifdef SMP /* * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) { struct mtx *m; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif if (SCHEDULER_STOPPED()) return; lock_delay_arg_init(&lda, &mtx_spin_delay); m = mtxlock2mtx(c); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); #ifdef KDTRACE_HOOKS spin_time -= lockstat_nsecs(&m->lock_object); #endif for (;;) { if (v == MTX_UNOWNED) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; continue; } /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (lda.spin_cnt < 10000000) { lock_delay(&lda); } else { lda.spin_cnt++; if (lda.spin_cnt < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "running"); #ifdef KDTRACE_HOOKS LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); if (spin_time != 0) LOCKSTAT_RECORD1(spin__spin, m, spin_time); #endif } #endif /* SMP */ void thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid, v; struct lock_delay_arg lda; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif #ifdef KDTRACE_HOOKS int64_t spin_time = 0; #endif tid = (uintptr_t)curthread; if (SCHEDULER_STOPPED()) { /* * Ensure that spinlock sections are balanced even when the * scheduler is stopped, since we may otherwise inadvertently * re-enable interrupts while dumping core. */ spinlock_enter(); return; } lock_delay_arg_init(&lda, &mtx_spin_delay); #ifdef KDTRACE_HOOKS spin_time -= lockstat_nsecs(&td->td_lock->lock_object); #endif for (;;) { retry: v = MTX_UNOWNED; spinlock_enter(); m = td->td_lock; KASSERT(m->mtx_lock != MTX_DESTROYED, ("thread_lock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, ("thread_lock() of sleep mutex %s @ %s:%d", m->lock_object.lo_name, file, line)); if (mtx_owned(m)) KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); for (;;) { if (_mtx_obtain_lock_fetch(m, &v, tid)) break; if (v == MTX_UNOWNED) continue; if (v == tid) { m->mtx_recurse++; break; } #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); do { if (lda.spin_cnt < 10000000) { lock_delay(&lda); } else { lda.spin_cnt++; if (lda.spin_cnt < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } if (m != td->td_lock) goto retry; v = MTX_READ_VALUE(m); } while (v != MTX_UNOWNED); spinlock_enter(); } if (m == td->td_lock) break; __mtx_unlock_spin(m); /* does spinlock_exit() */ } #ifdef KDTRACE_HOOKS spin_time += lockstat_nsecs(&m->lock_object); #endif if (m->mtx_recurse == 0) LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, m, contested, waittime, file, line); LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file, line); WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line); #ifdef KDTRACE_HOOKS if (spin_time != 0) LOCKSTAT_RECORD1(thread__spin, m, spin_time); #endif } struct mtx * thread_lock_block(struct thread *td) { struct mtx *lock; THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = &blocked_lock; mtx_unlock_spin(lock); return (lock); } void thread_lock_unblock(struct thread *td, struct mtx *new) { mtx_assert(new, MA_OWNED); MPASS(td->td_lock == &blocked_lock); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); } void thread_lock_set(struct thread *td, struct mtx *new) { struct mtx *lock; mtx_assert(new, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = new; mtx_unlock_spin(lock); } /* * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; if (SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); if (!mtx_recursed(m)) { LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m); if (_mtx_release_lock(m, (uintptr_t)curthread)) return; } else { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); return; } /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ turnstile_chain_lock(&m->lock_object); ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); MPASS(ts != NULL); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _mtx_release_lock_quick(m); /* * This turnstile is now no longer associated with the mutex. We can * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&m->lock_object); } /* * All the unlocking of MTX_SPIN locks is done inline. * See the __mtx_unlock_spin() macro for the details. */ /* * The backing function for the INVARIANTS-enabled mtx_assert() */ #ifdef INVARIANT_SUPPORT void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct mtx *m; if (panicstr != NULL || dumping || SCHEDULER_STOPPED()) return; m = mtxlock2mtx(c); switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: case MA_OWNED | MA_NOTRECURSED: if (!mtx_owned(m)) panic("mutex %s not owned at %s:%d", m->lock_object.lo_name, file, line); if (mtx_recursed(m)) { if ((what & MA_NOTRECURSED) != 0) panic("mutex %s recursed at %s:%d", m->lock_object.lo_name, file, line); } else if ((what & MA_RECURSED) != 0) { panic("mutex %s unrecursed at %s:%d", m->lock_object.lo_name, file, line); } break; case MA_NOTOWNED: if (mtx_owned(m)) panic("mutex %s owned at %s:%d", m->lock_object.lo_name, file, line); break; default: panic("unknown mtx_assert at %s:%d", file, line); } } #endif /* * General init routine used by the MTX_SYSINIT() macro. */ void mtx_sysinit(void *arg) { struct mtx_args *margs = arg; mtx_init((struct mtx *)margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); } /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional * lock type `type' is used as a general lock category name for use with * witness. */ void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { struct mtx *m; struct lock_class *class; int flags; m = mtxlock2mtx(c); MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", __func__, name, &m->mtx_lock)); /* Determine lock class and lock flags. */ if (opts & MTX_SPIN) class = &lock_class_mtx_spin; else class = &lock_class_mtx_sleep; flags = 0; if (opts & MTX_QUIET) flags |= LO_QUIET; if (opts & MTX_RECURSE) flags |= LO_RECURSABLE; if ((opts & MTX_NOWITNESS) == 0) flags |= LO_WITNESS; if (opts & MTX_DUPOK) flags |= LO_DUPOK; if (opts & MTX_NOPROFILE) flags |= LO_NOPROFILE; if (opts & MTX_NEW) flags |= LO_NEW; /* Initialize mutex. */ lock_init(&m->lock_object, class, name, type, flags); m->mtx_lock = MTX_UNOWNED; m->mtx_recurse = 0; } /* * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be * passed in as a flag here because if the corresponding mtx_init() was * called with MTX_QUIET set, then it will already be set in the mutex's * flags. */ void _mtx_destroy(volatile uintptr_t *c) { struct mtx *m; m = mtxlock2mtx(c); if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); /* Perform the non-mtx related part of mtx_unlock_spin(). */ if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) spinlock_exit(); else TD_LOCKS_DEC(curthread); lock_profile_release_lock(&m->lock_object); /* Tell witness this isn't locked to make it happy. */ WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__, __LINE__); } m->mtx_lock = MTX_DESTROYED; lock_destroy(&m->lock_object); } /* * Intialize the mutex code and system mutexes. This is called from the MD * startup code prior to mi_startup(). The per-CPU data space needs to be * setup before this is called. */ void mutex_init(void) { /* Setup turnstiles so that sleep mutexes work. */ init_turnstiles(); /* * Initialize mutexes. */ mtx_init(&Giant, "Giant", NULL, MTX_DEF | MTX_RECURSE); mtx_init(&blocked_lock, "blocked lock", NULL, MTX_SPIN); blocked_lock.mtx_lock = 0xdeadc0de; /* Always blocked. */ mtx_init(&proc0.p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK); mtx_init(&proc0.p_slock, "process slock", NULL, MTX_SPIN); mtx_init(&proc0.p_statmtx, "pstatl", NULL, MTX_SPIN); mtx_init(&proc0.p_itimmtx, "pitiml", NULL, MTX_SPIN); mtx_init(&proc0.p_profmtx, "pprofl", NULL, MTX_SPIN); mtx_init(&devmtx, "cdev", NULL, MTX_DEF); mtx_lock(&Giant); } #ifdef DDB void db_show_mtx(const struct lock_object *lock) { struct thread *td; const struct mtx *m; m = (const struct mtx *)lock; db_printf(" flags: {"); if (LOCK_CLASS(lock) == &lock_class_mtx_spin) db_printf("SPIN"); else db_printf("DEF"); if (m->lock_object.lo_flags & LO_RECURSABLE) db_printf(", RECURSE"); if (m->lock_object.lo_flags & LO_DUPOK) db_printf(", DUPOK"); db_printf("}\n"); db_printf(" state: {"); if (mtx_unowned(m)) db_printf("UNOWNED"); else if (mtx_destroyed(m)) db_printf("DESTROYED"); else { db_printf("OWNED"); if (m->mtx_lock & MTX_CONTESTED) db_printf(", CONTESTED"); if (m->mtx_lock & MTX_RECURSED) db_printf(", RECURSED"); } db_printf("}\n"); if (!mtx_unowned(m) && !mtx_destroyed(m)) { td = mtx_owner(m); db_printf(" owner: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (mtx_recursed(m)) db_printf(" recursed: %d\n", m->mtx_recurse); } } #endif Index: projects/netbsd-tests-upstream-01-2017/sys/kern/kern_rwlock.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/kern_rwlock.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/kern_rwlock.c (revision 313399) @@ -1,1343 +1,1330 @@ /*- * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_no_adaptive_rwlocks.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) #define ADAPTIVE_RWLOCKS #endif #ifdef HWPMC_HOOKS #include PMC_SOFT_DECLARE( , , lock, failed); #endif /* * Return the rwlock address when the lock cookie address is provided. * This functionality assumes that struct rwlock* have a member named rw_lock. */ #define rwlock2rw(c) (__containerof(c, struct rwlock, rw_lock)) #ifdef DDB #include static void db_show_rwlock(const struct lock_object *lock); #endif static void assert_rw(const struct lock_object *lock, int what); static void lock_rw(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rw(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_rw(struct lock_object *lock); struct lock_class lock_class_rw = { .lc_name = "rw", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE, .lc_assert = assert_rw, #ifdef DDB .lc_ddb_show = db_show_rwlock, #endif .lc_lock = lock_rw, .lc_unlock = unlock_rw, #ifdef KDTRACE_HOOKS .lc_owner = owner_rw, #endif }; #ifdef ADAPTIVE_RWLOCKS static int rowner_retries = 10; static int rowner_loops = 10000; static SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL, "rwlock debugging"); SYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, ""); SYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, ""); -static struct lock_delay_config __read_mostly rw_delay = { - .initial = 1000, - .step = 500, - .min = 100, - .max = 5000, -}; +static struct lock_delay_config __read_mostly rw_delay; -SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial, +SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base, 0, ""); -SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step, - 0, ""); -SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min, - 0, ""); SYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max, 0, ""); -static void -rw_delay_sysinit(void *dummy) -{ - - rw_delay.initial = mp_ncpus * 25; - rw_delay.step = (mp_ncpus * 25) / 2; - rw_delay.min = mp_ncpus * 5; - rw_delay.max = mp_ncpus * 25 * 10; -} -LOCK_DELAY_SYSINIT(rw_delay_sysinit); +LOCK_DELAY_SYSINIT_DEFAULT(rw_delay); #endif /* * Return a pointer to the owning thread if the lock is write-locked or * NULL if the lock is unlocked or read-locked. */ #define lv_rw_wowner(v) \ ((v) & RW_LOCK_READ ? NULL : \ (struct thread *)RW_OWNER((v))) #define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw)) /* * Returns if a write owner is recursed. Write ownership is not assured * here and should be previously checked. */ #define rw_recursed(rw) ((rw)->rw_recurse != 0) /* * Return true if curthread helds the lock. */ #define rw_wlocked(rw) (rw_wowner((rw)) == curthread) /* * Return a pointer to the owning thread for this lock who should receive * any priority lent by threads that block on this lock. Currently this * is identical to rw_wowner(). */ #define rw_owner(rw) rw_wowner(rw) #ifndef INVARIANTS #define __rw_assert(c, what, file, line) #endif void assert_rw(const struct lock_object *lock, int what) { rw_assert((const struct rwlock *)lock, what); } void lock_rw(struct lock_object *lock, uintptr_t how) { struct rwlock *rw; rw = (struct rwlock *)lock; if (how) rw_rlock(rw); else rw_wlock(rw); } uintptr_t unlock_rw(struct lock_object *lock) { struct rwlock *rw; rw = (struct rwlock *)lock; rw_assert(rw, RA_LOCKED | LA_NOTRECURSED); if (rw->rw_lock & RW_LOCK_READ) { rw_runlock(rw); return (1); } else { rw_wunlock(rw); return (0); } } #ifdef KDTRACE_HOOKS int owner_rw(const struct lock_object *lock, struct thread **owner) { const struct rwlock *rw = (const struct rwlock *)lock; uintptr_t x = rw->rw_lock; *owner = rw_wowner(rw); return ((x & RW_LOCK_READ) != 0 ? (RW_READERS(x) != 0) : (*owner != NULL)); } #endif void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts) { struct rwlock *rw; int flags; rw = rwlock2rw(c); MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE | RW_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p", __func__, name, &rw->rw_lock)); flags = LO_UPGRADABLE; if (opts & RW_DUPOK) flags |= LO_DUPOK; if (opts & RW_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & RW_NOWITNESS)) flags |= LO_WITNESS; if (opts & RW_RECURSE) flags |= LO_RECURSABLE; if (opts & RW_QUIET) flags |= LO_QUIET; if (opts & RW_NEW) flags |= LO_NEW; lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); rw->rw_lock = RW_UNLOCKED; rw->rw_recurse = 0; } void _rw_destroy(volatile uintptr_t *c) { struct rwlock *rw; rw = rwlock2rw(c); KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); rw->rw_lock = RW_DESTROYED; lock_destroy(&rw->lock_object); } void rw_sysinit(void *arg) { struct rw_args *args = arg; rw_init((struct rwlock *)args->ra_rw, args->ra_desc); } void rw_sysinit_flags(void *arg) { struct rw_args_flags *args = arg; rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, args->ra_flags); } int _rw_wowned(const volatile uintptr_t *c) { return (rw_wowner(rwlock2rw(c)) == curthread); } void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; uintptr_t tid, v; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; v = RW_UNLOCKED; if (!_rw_write_lock_fetch(rw, &v, tid)) _rw_wlock_hard(rw, v, tid, file, line); else LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_WRITER); LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; int rval; if (SCHEDULER_STOPPED()) return (1); rw = rwlock2rw(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); if (rw_wlocked(rw) && (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { rw->rw_recurse++; + atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); rval = 1; } else rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED, (uintptr_t)curthread); LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); if (!rw_recursed(rw)) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); } return (rval); } void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(c, RA_WLOCKED, file, line); WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); - if (rw->rw_recurse) - rw->rw_recurse--; - else - _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); + _rw_wunlock_hard(rw, (uintptr_t)curthread, file, line); + TD_LOCKS_DEC(curthread); } /* * Determines whether a new reader can acquire a lock. Succeeds if the * reader already owns a read lock and the lock is locked for read to * prevent deadlock from reader recursion. Also succeeds if the lock * is unlocked and has no writer waiters or spinners. Failing otherwise * prioritizes writers before readers. */ #define RW_CAN_READ(_rw) \ ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) & \ (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) == \ RW_LOCK_READ) void __rw_rlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; struct turnstile *ts; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; int i; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif uintptr_t v; #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS uintptr_t state; u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_RWLOCKS) lock_delay_arg_init(&lda, &rw_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif rw = rwlock2rw(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); KASSERT(rw_wowner(rw) != curthread, ("rw_rlock: wlock already held for %s @ %s:%d", rw->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&rw->lock_object); #endif v = RW_READ_VALUE(rw); #ifdef KDTRACE_HOOKS state = v; #endif for (;;) { /* * Handle the easy case. If no other thread has a write * lock, then try to bump up the count of read locks. Note * that we have to preserve the current state of the * RW_LOCK_WRITE_WAITERS flag. If we fail to acquire a * read lock, then rw_lock must have changed, so restart * the loop. Note that this handles the case of a * completely unlocked rwlock since such a lock is encoded * as a read lock with no waiters. */ if (RW_CAN_READ(v)) { /* * The RW_LOCK_READ_WAITERS flag should only be set * if the lock has been unlocked and write waiters * were present. */ if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, v + RW_ONE_READER)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", __func__, rw, (void *)v, (void *)(v + RW_ONE_READER)); break; } continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); #ifdef ADAPTIVE_RWLOCKS /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if ((v & RW_LOCK_READ) == 0) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); do { lock_delay(&lda); v = RW_READ_VALUE(rw); owner = lv_rw_wowner(v); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } } else if (spintries < rowner_retries) { spintries++; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { v = RW_READ_VALUE(rw); if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v)) break; cpu_spinwait(); } v = RW_READ_VALUE(rw); #ifdef KDTRACE_HOOKS lda.spin_cnt += rowner_loops - i; #endif KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); if (i != rowner_loops) continue; } #endif /* * Okay, now it's the hard case. Some other thread already * has a write lock or there are write waiters present, * acquire the turnstile lock so we can begin the process * of blocking. */ ts = turnstile_trywait(&rw->lock_object); /* * The lock might have been released while we spun, so * recheck its state and restart the loop if needed. */ v = RW_READ_VALUE(rw); if (RW_CAN_READ(v)) { turnstile_cancel(ts); continue; } #ifdef ADAPTIVE_RWLOCKS /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ if ((v & RW_LOCK_READ) == 0) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } } #endif /* * The lock is held in write mode or it already has waiters. */ MPASS(!RW_CAN_READ(v)); /* * If the RW_LOCK_READ_WAITERS flag is already set, then * we can go ahead and block. If it is not set then try * to set it. If we fail to set it drop the turnstile * lock and restart the loop. */ if (!(v & RW_LOCK_READ_WAITERS)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_READ_WAITERS)) { turnstile_cancel(ts); v = RW_READ_VALUE(rw); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set read waiters flag", __func__, rw); } /* * We were unable to acquire the lock and the read waiters * flag is set, so we must block on the turnstile. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, rw); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&rw->lock_object); #endif turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&rw->lock_object); sleep_cnt++; #endif if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from turnstile", __func__, rw); v = RW_READ_VALUE(rw); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&rw->lock_object); if (sleep_time) LOCKSTAT_RECORD4(rw__block, rw, sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); #endif /* * TODO: acquire "owner of record" here. Here be turnstile dragons * however. turnstiles don't like owners changing between calls to * turnstile_wait() currently. */ LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, waittime, file, line, LOCKSTAT_READER); LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); WITNESS_LOCK(&rw->lock_object, 0, file, line); TD_LOCKS_INC(curthread); curthread->td_rw_rlocks++; } int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; uintptr_t x; if (SCHEDULER_STOPPED()) return (1); rw = rwlock2rw(c); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d", curthread, rw->lock_object.lo_name, file, line)); for (;;) { x = rw->rw_lock; KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line)); if (!(x & RW_LOCK_READ)) break; if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, line); WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, 0, 0, file, line, LOCKSTAT_READER); TD_LOCKS_INC(curthread); curthread->td_rw_rlocks++; return (1); } } LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); return (0); } void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; struct turnstile *ts; uintptr_t x, v, queue; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(c, RA_RLOCKED, file, line); WITNESS_UNLOCK(&rw->lock_object, 0, file, line); LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); /* TODO: drop "owner of record" here. */ x = RW_READ_VALUE(rw); for (;;) { /* * See if there is more than one read lock held. If so, * just drop one and return. */ if (RW_READERS(x) > 1) { if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, x - RW_ONE_READER)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeeded %p -> %p", __func__, rw, (void *)x, (void *)(x - RW_ONE_READER)); break; } continue; } /* * If there aren't any waiters for a write lock, then try * to drop it quickly. */ if (!(x & RW_LOCK_WAITERS)) { MPASS((x & ~RW_LOCK_WRITE_SPINNER) == RW_READERS_LOCK(1)); if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x, RW_UNLOCKED)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded", __func__, rw); break; } continue; } /* * Ok, we know we have waiters and we think we are the * last reader, so grab the turnstile lock. */ turnstile_chain_lock(&rw->lock_object); v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); MPASS(v & RW_LOCK_WAITERS); /* * Try to drop our lock leaving the lock in a unlocked * state. * * If you wanted to do explicit lock handoff you'd have to * do it here. You'd also want to use turnstile_signal() * and you'd have to handle the race where a higher * priority thread blocks on the write lock before the * thread you wakeup actually runs and have the new thread * "steal" the lock. For now it's a lot simpler to just * wakeup all of the waiters. * * As above, if we fail, then another thread might have * acquired a read lock, so drop the turnstile lock and * restart. */ x = RW_UNLOCKED; if (v & RW_LOCK_WRITE_WAITERS) { queue = TS_EXCLUSIVE_QUEUE; x |= (v & RW_LOCK_READ_WAITERS); } else queue = TS_SHARED_QUEUE; if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, x)) { turnstile_chain_unlock(&rw->lock_object); x = RW_READ_VALUE(rw); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded with waiters", __func__, rw); /* * Ok. The lock is released and all that's left is to * wake up the waiters. Note that the lock might not be * free anymore, but in that case the writers will just * block again if they run before the new lock holder(s) * release the lock. */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); turnstile_broadcast(ts, queue); turnstile_unpend(ts, TS_SHARED_LOCK); turnstile_chain_unlock(&rw->lock_object); break; } LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER); TD_LOCKS_DEC(curthread); curthread->td_rw_rlocks--; } /* * This function is called when we are unable to obtain a write lock on the * first try. This means that at least one other thread holds either a * read or write lock. */ void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, const char *file, int line) { struct rwlock *rw; struct turnstile *ts; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; int i; #endif uintptr_t x; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif #if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS uintptr_t state; u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return; #if defined(ADAPTIVE_RWLOCKS) lock_delay_arg_init(&lda, &rw_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif rw = rwlock2rw(c); if (__predict_false(v == RW_UNLOCKED)) v = RW_READ_VALUE(rw); if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) { KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, ("%s: recursing but non-recursive rw %s @ %s:%d\n", __func__, rw->lock_object.lo_name, file, line)); rw->rw_recurse++; + atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw); return; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&rw->lock_object); state = v; #endif for (;;) { if (v == RW_UNLOCKED) { if (_rw_write_lock_fetch(rw, &v, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&rw->lock_object, &contested, &waittime); #ifdef ADAPTIVE_RWLOCKS /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ owner = lv_rw_wowner(v); if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, rw, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); do { lock_delay(&lda); v = RW_READ_VALUE(rw); owner = lv_rw_wowner(v); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } if ((v & RW_LOCK_READ) && RW_READERS(v) && spintries < rowner_retries) { if (!(v & RW_LOCK_WRITE_SPINNER)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_SPINNER)) { v = RW_READ_VALUE(rw); continue; } } spintries++; KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", rw->lock_object.lo_name); for (i = 0; i < rowner_loops; i++) { if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0) break; cpu_spinwait(); } KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); v = RW_READ_VALUE(rw); #ifdef KDTRACE_HOOKS lda.spin_cnt += rowner_loops - i; #endif if (i != rowner_loops) continue; } #endif ts = turnstile_trywait(&rw->lock_object); v = RW_READ_VALUE(rw); #ifdef ADAPTIVE_RWLOCKS /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the turnstile * chain lock. If so, drop the turnstile lock and try * again. */ if (!(v & RW_LOCK_READ)) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { turnstile_cancel(ts); continue; } } #endif /* * Check for the waiters flags about this rwlock. * If the lock was released, without maintain any pending * waiters queue, simply try to acquire it. * If a pending waiters queue is present, claim the lock * ownership and maintain the pending queue. */ x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); if ((v & ~x) == RW_UNLOCKED) { x &= ~RW_LOCK_WRITE_SPINNER; if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { if (x) turnstile_claim(ts); else turnstile_cancel(ts); break; } turnstile_cancel(ts); v = RW_READ_VALUE(rw); continue; } /* * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to * set it. If we fail to set it, then loop back and try * again. */ if (!(v & RW_LOCK_WRITE_WAITERS)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_WAITERS)) { turnstile_cancel(ts); v = RW_READ_VALUE(rw); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set write waiters flag", __func__, rw); } /* * We were unable to acquire the lock and the write waiters * flag is set, so we must block on the turnstile. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__, rw); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&rw->lock_object); #endif turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&rw->lock_object); sleep_cnt++; #endif if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from turnstile", __func__, rw); #ifdef ADAPTIVE_RWLOCKS spintries = 0; #endif v = RW_READ_VALUE(rw); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&rw->lock_object); if (sleep_time) LOCKSTAT_RECORD4(rw__block, rw, sleep_time, LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); /* Record only the loops spinning and not sleeping. */ if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); #endif LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested, waittime, file, line, LOCKSTAT_WRITER); } /* * This function is called if the first try at releasing a write lock failed. * This means that one of the 2 waiter bits must be set indicating that at * least one thread is waiting on this lock. */ void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, int line) { struct rwlock *rw; struct turnstile *ts; uintptr_t v; int queue; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); - MPASS(!rw_recursed(rw)); - LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, - LOCKSTAT_WRITER); - if (_rw_write_unlock(rw, tid)) + if (!rw_recursed(rw)) { + LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, + LOCKSTAT_WRITER); + if (_rw_write_unlock(rw, tid)) + return; + } else { + if (--(rw->rw_recurse) == 0) + atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); return; + } KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), ("%s: neither of the waiter flags are set", __func__)); if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); turnstile_chain_lock(&rw->lock_object); ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); /* * Use the same algo as sx locks for now. Prefer waking up shared * waiters if we have any over writers. This is probably not ideal. * * 'v' is the value we are going to write back to rw_lock. If we * have waiters on both queues, we need to preserve the state of * the waiter flag for the queue we don't wake up. For now this is * hardcoded for the algorithm mentioned above. * * In the case of both readers and writers waiting we wakeup the * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a * new writer comes in before a reader it will claim the lock up * above. There is probably a potential priority inversion in * there that could be worked around either by waking both queues * of waiters or doing some complicated lock handoff gymnastics. */ v = RW_UNLOCKED; if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) { queue = TS_EXCLUSIVE_QUEUE; v |= (rw->rw_lock & RW_LOCK_READ_WAITERS); } else queue = TS_SHARED_QUEUE; /* Wake up all waiters for the specific queue. */ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw, queue == TS_SHARED_QUEUE ? "read" : "write"); turnstile_broadcast(ts, queue); atomic_store_rel_ptr(&rw->rw_lock, v); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&rw->lock_object); } /* * Attempt to do a non-blocking upgrade from a read lock to a write * lock. This will only succeed if this thread holds a single read * lock. Returns true if the upgrade succeeded and false otherwise. */ int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; uintptr_t v, x, tid; struct turnstile *ts; int success; if (SCHEDULER_STOPPED()) return (1); rw = rwlock2rw(c); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(c, RA_RLOCKED, file, line); /* * Attempt to switch from one reader to a writer. If there * are any write waiters, then we will have to lock the * turnstile first to prevent races with another writer * calling turnstile_wait() before we have claimed this * turnstile. So, do the simple case of no waiters first. */ tid = (uintptr_t)curthread; success = 0; for (;;) { v = rw->rw_lock; if (RW_READERS(v) > 1) break; if (!(v & RW_LOCK_WAITERS)) { success = atomic_cmpset_ptr(&rw->rw_lock, v, tid); if (!success) continue; break; } /* * Ok, we think we have waiters, so lock the turnstile. */ ts = turnstile_trywait(&rw->lock_object); v = rw->rw_lock; if (RW_READERS(v) > 1) { turnstile_cancel(ts); break; } /* * Try to switch from one reader to a writer again. This time * we honor the current state of the waiters flags. * If we obtain the lock with the flags set, then claim * ownership of the turnstile. */ x = rw->rw_lock & RW_LOCK_WAITERS; success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); if (success) { if (x) turnstile_claim(ts); else turnstile_cancel(ts); break; } turnstile_cancel(ts); } LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); if (success) { curthread->td_rw_rlocks--; WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); LOCKSTAT_RECORD0(rw__upgrade, rw); } return (success); } /* * Downgrade a write lock into a single read lock. */ void __rw_downgrade(volatile uintptr_t *c, const char *file, int line) { struct rwlock *rw; struct turnstile *ts; uintptr_t tid, v; int rwait, wwait; if (SCHEDULER_STOPPED()) return; rw = rwlock2rw(c); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); __rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line); #ifndef INVARIANTS if (rw_recursed(rw)) panic("downgrade of a recursed lock"); #endif WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); /* * Convert from a writer to a single reader. First we handle * the easy case with no waiters. If there are any waiters, we * lock the turnstile and "disown" the lock. */ tid = (uintptr_t)curthread; if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) goto out; /* * Ok, we think we have waiters, so lock the turnstile so we can * read the waiter flags without any races. */ turnstile_chain_lock(&rw->lock_object); v = rw->rw_lock & RW_LOCK_WAITERS; rwait = v & RW_LOCK_READ_WAITERS; wwait = v & RW_LOCK_WRITE_WAITERS; MPASS(rwait | wwait); /* * Downgrade from a write lock while preserving waiters flag * and give up ownership of the turnstile. */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); if (!wwait) v &= ~RW_LOCK_READ_WAITERS; atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); /* * Wake other readers if there are no writers pending. Otherwise they * won't be able to acquire the lock anyway. */ if (rwait && !wwait) { turnstile_broadcast(ts, TS_SHARED_QUEUE); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); } else turnstile_disown(ts); turnstile_chain_unlock(&rw->lock_object); out: curthread->td_rw_rlocks++; LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); LOCKSTAT_RECORD0(rw__downgrade, rw); } #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef __rw_assert #endif /* * In the non-WITNESS case, rw_assert() can only detect that at least * *some* thread owns an rlock, but it cannot guarantee that *this* * thread owns an rlock. */ void __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line) { const struct rwlock *rw; if (panicstr != NULL) return; rw = rwlock2rw(c); switch (what) { case RA_LOCKED: case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_NOTRECURSED: case RA_RLOCKED: case RA_RLOCKED | RA_RECURSED: case RA_RLOCKED | RA_NOTRECURSED: #ifdef WITNESS witness_assert(&rw->lock_object, what, file, line); #else /* * If some other thread has a write lock or we have one * and are asserting a read lock, fail. Also, if no one * has a lock at all, fail. */ if (rw->rw_lock == RW_UNLOCKED || (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || rw_wowner(rw) != curthread))) panic("Lock %s not %slocked @ %s:%d\n", rw->lock_object.lo_name, (what & RA_RLOCKED) ? "read " : "", file, line); if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { if (rw_recursed(rw)) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } #endif break; case RA_WLOCKED: case RA_WLOCKED | RA_RECURSED: case RA_WLOCKED | RA_NOTRECURSED: if (rw_wowner(rw) != curthread) panic("Lock %s not exclusively locked @ %s:%d\n", rw->lock_object.lo_name, file, line); if (rw_recursed(rw)) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rw->lock_object.lo_name, file, line); break; case RA_UNLOCKED: #ifdef WITNESS witness_assert(&rw->lock_object, what, file, line); #else /* * If we hold a write lock fail. We can't reliably check * to see if we hold a read lock or not. */ if (rw_wowner(rw) == curthread) panic("Lock %s exclusively locked @ %s:%d\n", rw->lock_object.lo_name, file, line); #endif break; default: panic("Unknown rw lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB void db_show_rwlock(const struct lock_object *lock) { const struct rwlock *rw; struct thread *td; rw = (const struct rwlock *)lock; db_printf(" state: "); if (rw->rw_lock == RW_UNLOCKED) db_printf("UNLOCKED\n"); else if (rw->rw_lock == RW_DESTROYED) { db_printf("DESTROYED\n"); return; } else if (rw->rw_lock & RW_LOCK_READ) db_printf("RLOCK: %ju locks\n", (uintmax_t)(RW_READERS(rw->rw_lock))); else { td = rw_wowner(rw); db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (rw_recursed(rw)) db_printf(" recursed: %u\n", rw->rw_recurse); } db_printf(" waiters: "); switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { case RW_LOCK_READ_WAITERS: db_printf("readers\n"); break; case RW_LOCK_WRITE_WAITERS: db_printf("writers\n"); break; case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS: db_printf("readers and writers\n"); break; default: db_printf("none\n"); break; } } #endif Index: projects/netbsd-tests-upstream-01-2017/sys/kern/kern_sx.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/kern_sx.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/kern_sx.c (revision 313399) @@ -1,1291 +1,1273 @@ /*- * Copyright (c) 2007 Attilio Rao * Copyright (c) 2001 Jason Evans * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * Shared/exclusive locks. This implementation attempts to ensure * deterministic lock granting behavior, so that slocks and xlocks are * interleaved. * * Priority propagation will not generally raise the priority of lock holders, * so should not be relied upon in combination with sx locks. */ #include "opt_ddb.h" #include "opt_hwpmc_hooks.h" #include "opt_no_adaptive_sx.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(SMP) && !defined(NO_ADAPTIVE_SX) #include #endif #ifdef DDB #include #endif #if defined(SMP) && !defined(NO_ADAPTIVE_SX) #define ADAPTIVE_SX #endif CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); #ifdef HWPMC_HOOKS #include PMC_SOFT_DECLARE( , , lock, failed); #endif /* Handy macros for sleep queues. */ #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 /* * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We * drop Giant anytime we have to sleep or if we adaptively spin. */ #define GIANT_DECLARE \ int _giantcnt = 0; \ WITNESS_SAVE_DECL(Giant) \ #define GIANT_SAVE() do { \ if (mtx_owned(&Giant)) { \ WITNESS_SAVE(&Giant.lock_object, Giant); \ while (mtx_owned(&Giant)) { \ _giantcnt++; \ mtx_unlock(&Giant); \ } \ } \ } while (0) #define GIANT_RESTORE() do { \ if (_giantcnt > 0) { \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ mtx_lock(&Giant); \ WITNESS_RESTORE(&Giant.lock_object, Giant); \ } \ } while (0) /* * Returns true if an exclusive lock is recursed. It assumes * curthread currently has an exclusive lock. */ #define sx_recursed(sx) ((sx)->sx_recurse != 0) static void assert_sx(const struct lock_object *lock, int what); #ifdef DDB static void db_show_sx(const struct lock_object *lock); #endif static void lock_sx(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_sx(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_sx(struct lock_object *lock); struct lock_class lock_class_sx = { .lc_name = "sx", .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, .lc_assert = assert_sx, #ifdef DDB .lc_ddb_show = db_show_sx, #endif .lc_lock = lock_sx, .lc_unlock = unlock_sx, #ifdef KDTRACE_HOOKS .lc_owner = owner_sx, #endif }; #ifndef INVARIANTS #define _sx_assert(sx, what, file, line) #endif #ifdef ADAPTIVE_SX static u_int asx_retries = 10; static u_int asx_loops = 10000; static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); -static struct lock_delay_config __read_mostly sx_delay = { - .initial = 1000, - .step = 500, - .min = 100, - .max = 5000, -}; +static struct lock_delay_config __read_mostly sx_delay; -SYSCTL_INT(_debug_sx, OID_AUTO, delay_initial, CTLFLAG_RW, &sx_delay.initial, +SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 0, ""); -SYSCTL_INT(_debug_sx, OID_AUTO, delay_step, CTLFLAG_RW, &sx_delay.step, - 0, ""); -SYSCTL_INT(_debug_sx, OID_AUTO, delay_min, CTLFLAG_RW, &sx_delay.min, - 0, ""); SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 0, ""); -static void -sx_delay_sysinit(void *dummy) -{ - - sx_delay.initial = mp_ncpus * 25; - sx_delay.step = (mp_ncpus * 25) / 2; - sx_delay.min = mp_ncpus * 5; - sx_delay.max = mp_ncpus * 25 * 10; -} -LOCK_DELAY_SYSINIT(sx_delay_sysinit); +LOCK_DELAY_SYSINIT_DEFAULT(sx_delay); #endif void assert_sx(const struct lock_object *lock, int what) { sx_assert((const struct sx *)lock, what); } void lock_sx(struct lock_object *lock, uintptr_t how) { struct sx *sx; sx = (struct sx *)lock; if (how) sx_slock(sx); else sx_xlock(sx); } uintptr_t unlock_sx(struct lock_object *lock) { struct sx *sx; sx = (struct sx *)lock; sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); if (sx_xlocked(sx)) { sx_xunlock(sx); return (0); } else { sx_sunlock(sx); return (1); } } #ifdef KDTRACE_HOOKS int owner_sx(const struct lock_object *lock, struct thread **owner) { const struct sx *sx; uintptr_t x; sx = (const struct sx *)lock; x = sx->sx_lock; *owner = NULL; return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : ((*owner = (struct thread *)SX_OWNER(x)) != NULL)); } #endif void sx_sysinit(void *arg) { struct sx_args *sargs = arg; sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); } void sx_init_flags(struct sx *sx, const char *description, int opts) { int flags; MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p", __func__, description, &sx->sx_lock)); flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) flags |= LO_DUPOK; if (opts & SX_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & SX_NOWITNESS)) flags |= LO_WITNESS; if (opts & SX_RECURSE) flags |= LO_RECURSABLE; if (opts & SX_QUIET) flags |= LO_QUIET; if (opts & SX_NEW) flags |= LO_NEW; flags |= opts & SX_NOADAPTIVE; lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); sx->sx_lock = SX_LOCK_UNLOCKED; sx->sx_recurse = 0; } void sx_destroy(struct sx *sx) { KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); sx->sx_lock = SX_LOCK_DESTROYED; lock_destroy(&sx->lock_object); } int sx_try_slock_(struct sx *sx, const char *file, int line) { uintptr_t x; if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); for (;;) { x = sx->sx_lock; KASSERT(x != SX_LOCK_DESTROYED, ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); if (!(x & SX_LOCK_SHARED)) break; if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER)) { LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_READER); TD_LOCKS_INC(curthread); return (1); } } LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); return (0); } int _sx_xlock(struct sx *sx, int opts, const char *file, int line) { uintptr_t tid, x; int error = 0; if (SCHEDULER_STOPPED()) return (0); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("sx_xlock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xlock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); tid = (uintptr_t)curthread; x = SX_LOCK_UNLOCKED; if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) error = _sx_xlock_hard(sx, x, tid, opts, file, line); else LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); if (!error) { LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } return (error); } int sx_try_xlock_(struct sx *sx, const char *file, int line) { int rval; if (SCHEDULER_STOPPED()) return (1); KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); if (sx_xlocked(sx) && (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); rval = 1; } else rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, (uintptr_t)curthread); LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); if (rval) { WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); if (!sx_recursed(sx)) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 0, 0, file, line, LOCKSTAT_WRITER); TD_LOCKS_INC(curthread); } return (rval); } void _sx_xunlock(struct sx *sx, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_XLOCKED, file, line); WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line); _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); TD_LOCKS_DEC(curthread); } /* * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. * This will only succeed if this thread holds a single shared lock. * Return 1 if if the upgrade succeed, 0 otherwise. */ int sx_try_upgrade_(struct sx *sx, const char *file, int line) { uintptr_t x; int success; if (SCHEDULER_STOPPED()) return (1); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_SLOCKED, file, line); /* * Try to switch from one shared lock to an exclusive lock. We need * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that * we will wake up the exclusive waiters when we drop the lock. */ x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, (uintptr_t)curthread | x); LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); if (success) { WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file, line); LOCKSTAT_RECORD0(sx__upgrade, sx); } return (success); } /* * Downgrade an unrecursed exclusive lock into a single shared lock. */ void sx_downgrade_(struct sx *sx, const char *file, int line) { uintptr_t x; int wakeup_swapper; if (SCHEDULER_STOPPED()) return; KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); #ifndef INVARIANTS if (sx_recursed(sx)) panic("downgrade of a recursed lock"); #endif WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); /* * Try to switch from an exclusive lock with no shared waiters * to one sharer with no shared waiters. If there are * exclusive waiters, we don't need to lock the sleep queue so * long as we preserve the flag. We do one quick try and if * that fails we grab the sleepq lock to keep the flags from * changing and do it the slow way. * * We have to lock the sleep queue if there are shared waiters * so we can wake them up. */ x = sx->sx_lock; if (!(x & SX_LOCK_SHARED_WAITERS) && atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | (x & SX_LOCK_EXCLUSIVE_WAITERS))) { LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); return; } /* * Lock the sleep queue so we can read the waiters bits * without any races and wakeup any shared waiters. */ sleepq_lock(&sx->lock_object); /* * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single * shared lock. If there are any shared waiters, wake them up. */ wakeup_swapper = 0; x = sx->sx_lock; atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | (x & SX_LOCK_EXCLUSIVE_WAITERS)); if (x & SX_LOCK_SHARED_WAITERS) wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, SQ_SHARED_QUEUE); sleepq_release(&sx->lock_object); LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); LOCKSTAT_RECORD0(sx__downgrade, sx); if (wakeup_swapper) kick_proc0(); } /* * This function represents the so-called 'hard case' for sx_xlock * operation. All 'easy case' failures are redirected to this. Note * that ideally this would be a static function, but it needs to be * accessible from at least sx.h. */ int _sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts, const char *file, int line) { GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; u_int i, spintries = 0; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif int error = 0; #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS uintptr_t state; u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return (0); #if defined(ADAPTIVE_SX) lock_delay_arg_init(&lda, &sx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif if (__predict_false(x == SX_LOCK_UNLOCKED)) x = SX_READ_VALUE(sx); /* If we already hold an exclusive lock, then recurse. */ if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", sx->lock_object.lo_name, file, line)); sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); return (0); } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&sx->lock_object); state = x; #endif for (;;) { if (x == SX_LOCK_UNLOCKED) { if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) break; continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime); #ifdef ADAPTIVE_SX /* * If the lock is write locked and the owner is * running on another CPU, spin until the owner stops * running or the state of the lock changes. */ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { if ((x & SX_LOCK_SHARED) == 0) { owner = lv_sx_owner(x); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); do { lock_delay(&lda); x = SX_READ_VALUE(sx); owner = lv_sx_owner(x); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } } else if (SX_SHARERS(x) && spintries < asx_retries) { KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); spintries++; for (i = 0; i < asx_loops; i++) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: shared spinning on %p with %u and %u", __func__, sx, spintries, i); x = sx->sx_lock; if ((x & SX_LOCK_SHARED) == 0 || SX_SHARERS(x) == 0) break; cpu_spinwait(); #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif } KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); x = SX_READ_VALUE(sx); if (i != asx_loops) continue; } } #endif sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); /* * If the lock was released while spinning on the * sleep queue chain lock, try again. */ if (x == SX_LOCK_UNLOCKED) { sleepq_release(&sx->lock_object); continue; } #ifdef ADAPTIVE_SX /* * The current lock owner might have started executing * on another CPU (or the lock could have changed * owners) while we were waiting on the sleep queue * chain lock. If so, drop the sleep queue lock and try * again. */ if (!(x & SX_LOCK_SHARED) && (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); continue; } } #endif /* * If an exclusive lock was released with both shared * and exclusive waiters and a shared waiter hasn't * woken up and acquired the lock yet, sx_lock will be * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. * If we see that value, try to acquire it once. Note * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS * as there are other exclusive waiters still. If we * fail, restart the loop. */ if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { if (atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, tid | SX_LOCK_EXCLUSIVE_WAITERS)) { sleepq_release(&sx->lock_object); CTR2(KTR_LOCK, "%s: %p claimed by new writer", __func__, sx); break; } sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } /* * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, * than loop back and retry. */ if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { if (!atomic_cmpset_ptr(&sx->sx_lock, x, x | SX_LOCK_EXCLUSIVE_WAITERS)) { sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set excl waiters flag", __func__, sx); } /* * Since we have been unable to acquire the exclusive * lock and the exclusive waiters flag is set, we have * to sleep. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", __func__, sx); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif GIANT_SAVE(); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); if (!(opts & SX_INTERRUPTIBLE)) sleepq_wait(&sx->lock_object, 0); else error = sleepq_wait_sig(&sx->lock_object, 0); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&sx->lock_object); sleep_cnt++; #endif if (error) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: interruptible sleep by %p suspended by signal", __func__, sx); break; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", __func__, sx); x = SX_READ_VALUE(sx); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&sx->lock_object); if (sleep_time) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); #endif if (!error) LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, contested, waittime, file, line, LOCKSTAT_WRITER); GIANT_RESTORE(); return (error); } /* * This function represents the so-called 'hard case' for sx_xunlock * operation. All 'easy case' failures are redirected to this. Note * that ideally this would be a static function, but it needs to be * accessible from at least sx.h. */ void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) { uintptr_t x; int queue, wakeup_swapper; if (SCHEDULER_STOPPED()) return; MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); if (!sx_recursed(sx)) { LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); if (atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) return; } else { /* The lock is recursed, unrecurse one level. */ if ((--sx->sx_recurse) == 0) atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); return; } MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); sleepq_lock(&sx->lock_object); x = SX_LOCK_UNLOCKED; /* * The wake up algorithm here is quite simple and probably not * ideal. It gives precedence to shared waiters if they are * present. For this condition, we have to preserve the * state of the exclusive waiters flag. * If interruptible sleeps left the shared queue empty avoid a * starvation for the threads sleeping on the exclusive queue by giving * them precedence and cleaning up the shared waiters bit anyway. */ if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 && sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { queue = SQ_SHARED_QUEUE; x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); } else queue = SQ_EXCLUSIVE_QUEUE; /* Wake up all the waiters for the specific queue. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : "exclusive"); atomic_store_rel_ptr(&sx->sx_lock, x); wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue); sleepq_release(&sx->lock_object); if (wakeup_swapper) kick_proc0(); } int _sx_slock(struct sx *sx, int opts, const char *file, int line) { GIANT_DECLARE; #ifdef ADAPTIVE_SX volatile struct thread *owner; #endif #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; #endif uintptr_t x; int error = 0; #if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) struct lock_delay_arg lda; #endif #ifdef KDTRACE_HOOKS uintptr_t state; u_int sleep_cnt = 0; int64_t sleep_time = 0; int64_t all_time = 0; #endif if (SCHEDULER_STOPPED()) return (0); #if defined(ADAPTIVE_SX) lock_delay_arg_init(&lda, &sx_delay); #elif defined(KDTRACE_HOOKS) lock_delay_arg_init(&lda, NULL); #endif KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("sx_slock() by idle thread %p on sx %s @ %s:%d", curthread, sx->lock_object.lo_name, file, line)); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_slock() of destroyed sx @ %s:%d", file, line)); WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); #ifdef KDTRACE_HOOKS all_time -= lockstat_nsecs(&sx->lock_object); #endif x = SX_READ_VALUE(sx); #ifdef KDTRACE_HOOKS state = x; #endif /* * As with rwlocks, we don't make any attempt to try to block * shared locks once there is an exclusive waiter. */ for (;;) { /* * If no other thread has an exclusive lock then try to bump up * the count of sharers. Since we have to preserve the state * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the * shared lock loop back and retry. */ if (x & SX_LOCK_SHARED) { MPASS(!(x & SX_LOCK_SHARED_WAITERS)); if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", __func__, sx, (void *)x, (void *)(x + SX_ONE_SHARER)); break; } continue; } #ifdef KDTRACE_HOOKS lda.spin_cnt++; #endif #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime); #ifdef ADAPTIVE_SX /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { owner = lv_sx_owner(x); if (TD_IS_RUNNING(owner)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, sx, owner); KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread), "spinning", "lockname:\"%s\"", sx->lock_object.lo_name); GIANT_SAVE(); do { lock_delay(&lda); x = SX_READ_VALUE(sx); owner = lv_sx_owner(x); } while (owner != NULL && TD_IS_RUNNING(owner)); KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread), "running"); continue; } } #endif /* * Some other thread already has an exclusive lock, so * start the process of blocking. */ sleepq_lock(&sx->lock_object); x = SX_READ_VALUE(sx); /* * The lock could have been released while we spun. * In this case loop back and retry. */ if (x & SX_LOCK_SHARED) { sleepq_release(&sx->lock_object); continue; } #ifdef ADAPTIVE_SX /* * If the owner is running on another CPU, spin until * the owner stops running or the state of the lock * changes. */ if (!(x & SX_LOCK_SHARED) && (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { owner = (struct thread *)SX_OWNER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } } #endif /* * Try to set the SX_LOCK_SHARED_WAITERS flag. If we * fail to set it drop the sleep queue lock and loop * back. */ if (!(x & SX_LOCK_SHARED_WAITERS)) { if (!atomic_cmpset_ptr(&sx->sx_lock, x, x | SX_LOCK_SHARED_WAITERS)) { sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p set shared waiters flag", __func__, sx); } /* * Since we have been unable to acquire the shared lock, * we have to sleep. */ if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", __func__, sx); #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(&sx->lock_object); #endif GIANT_SAVE(); sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); if (!(opts & SX_INTERRUPTIBLE)) sleepq_wait(&sx->lock_object, 0); else error = sleepq_wait_sig(&sx->lock_object, 0); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(&sx->lock_object); sleep_cnt++; #endif if (error) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: interruptible sleep by %p suspended by signal", __func__, sx); break; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", __func__, sx); x = SX_READ_VALUE(sx); } #ifdef KDTRACE_HOOKS all_time += lockstat_nsecs(&sx->lock_object); if (sleep_time) LOCKSTAT_RECORD4(sx__block, sx, sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); if (lda.spin_cnt > sleep_cnt) LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); #endif if (error == 0) { LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, contested, waittime, file, line, LOCKSTAT_READER); LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); WITNESS_LOCK(&sx->lock_object, 0, file, line); TD_LOCKS_INC(curthread); } GIANT_RESTORE(); return (error); } void _sx_sunlock(struct sx *sx, const char *file, int line) { uintptr_t x; int wakeup_swapper; if (SCHEDULER_STOPPED()) return; KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_SLOCKED, file, line); WITNESS_UNLOCK(&sx->lock_object, 0, file, line); LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); x = SX_READ_VALUE(sx); for (;;) { /* * We should never have sharers while at least one thread * holds a shared lock. */ KASSERT(!(x & SX_LOCK_SHARED_WAITERS), ("%s: waiting sharers", __func__)); /* * See if there is more than one shared lock held. If * so, just drop one and return. */ if (SX_SHARERS(x) > 1) { if (atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, x - SX_ONE_SHARER)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR4(KTR_LOCK, "%s: %p succeeded %p -> %p", __func__, sx, (void *)x, (void *)(x - SX_ONE_SHARER)); break; } continue; } /* * If there aren't any waiters for an exclusive lock, * then try to drop it quickly. */ if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { MPASS(x == SX_SHARERS_LOCK(1)); x = SX_SHARERS_LOCK(1); if (atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, SX_LOCK_UNLOCKED)) { if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p last succeeded", __func__, sx); break; } continue; } /* * At this point, there should just be one sharer with * exclusive waiters. */ MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); sleepq_lock(&sx->lock_object); /* * Wake up semantic here is quite simple: * Just wake up all the exclusive waiters. * Note that the state of the lock could have changed, * so if it fails loop back and retry. */ if (!atomic_cmpset_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, SX_LOCK_UNLOCKED)) { sleepq_release(&sx->lock_object); x = SX_READ_VALUE(sx); continue; } if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p waking up all thread on" "exclusive queue", __func__, sx); wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, SQ_EXCLUSIVE_QUEUE); sleepq_release(&sx->lock_object); if (wakeup_swapper) kick_proc0(); break; } TD_LOCKS_DEC(curthread); } #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef _sx_assert #endif /* * In the non-WITNESS case, sx_assert() can only detect that at least * *some* thread owns an slock, but it cannot guarantee that *this* * thread owns an slock. */ void _sx_assert(const struct sx *sx, int what, const char *file, int line) { #ifndef WITNESS int slocked = 0; #endif if (panicstr != NULL) return; switch (what) { case SA_SLOCKED: case SA_SLOCKED | SA_NOTRECURSED: case SA_SLOCKED | SA_RECURSED: #ifndef WITNESS slocked = 1; /* FALLTHROUGH */ #endif case SA_LOCKED: case SA_LOCKED | SA_NOTRECURSED: case SA_LOCKED | SA_RECURSED: #ifdef WITNESS witness_assert(&sx->lock_object, what, file, line); #else /* * If some other thread has an exclusive lock or we * have one and are asserting a shared lock, fail. * Also, if no one has a lock at all, fail. */ if (sx->sx_lock == SX_LOCK_UNLOCKED || (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || sx_xholder(sx) != curthread))) panic("Lock %s not %slocked @ %s:%d\n", sx->lock_object.lo_name, slocked ? "share " : "", file, line); if (!(sx->sx_lock & SX_LOCK_SHARED)) { if (sx_recursed(sx)) { if (what & SA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } else if (what & SA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } #endif break; case SA_XLOCKED: case SA_XLOCKED | SA_NOTRECURSED: case SA_XLOCKED | SA_RECURSED: if (sx_xholder(sx) != curthread) panic("Lock %s not exclusively locked @ %s:%d\n", sx->lock_object.lo_name, file, line); if (sx_recursed(sx)) { if (what & SA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); } else if (what & SA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", sx->lock_object.lo_name, file, line); break; case SA_UNLOCKED: #ifdef WITNESS witness_assert(&sx->lock_object, what, file, line); #else /* * If we hold an exclusve lock fail. We can't * reliably check to see if we hold a shared lock or * not. */ if (sx_xholder(sx) == curthread) panic("Lock %s exclusively locked @ %s:%d\n", sx->lock_object.lo_name, file, line); #endif break; default: panic("Unknown sx lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB static void db_show_sx(const struct lock_object *lock) { struct thread *td; const struct sx *sx; sx = (const struct sx *)lock; db_printf(" state: "); if (sx->sx_lock == SX_LOCK_UNLOCKED) db_printf("UNLOCKED\n"); else if (sx->sx_lock == SX_LOCK_DESTROYED) { db_printf("DESTROYED\n"); return; } else if (sx->sx_lock & SX_LOCK_SHARED) db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); else { td = sx_xholder(sx); db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (sx_recursed(sx)) db_printf(" recursed: %d\n", sx->sx_recurse); } db_printf(" waiters: "); switch(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { case SX_LOCK_SHARED_WAITERS: db_printf("shared\n"); break; case SX_LOCK_EXCLUSIVE_WAITERS: db_printf("exclusive\n"); break; case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: db_printf("exclusive and shared\n"); break; default: db_printf("none\n"); } } /* * Check to see if a thread that is blocked on a sleep queue is actually * blocked on an sx lock. If so, output some details and return true. * If the lock has an exclusive owner, return that in *ownerp. */ int sx_chain(struct thread *td, struct thread **ownerp) { struct sx *sx; /* * Check to see if this thread is blocked on an sx lock. * First, we check the lock class. If that is ok, then we * compare the lock name against the wait message. */ sx = td->td_wchan; if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || sx->lock_object.lo_name != td->td_wmesg) return (0); /* We think we have an sx lock, so output some details. */ db_printf("blocked on sx \"%s\" ", td->td_wmesg); *ownerp = sx_xholder(sx); if (sx->sx_lock & SX_LOCK_SHARED) db_printf("SLOCK (count %ju)\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); else db_printf("XLOCK\n"); return (1); } #endif Index: projects/netbsd-tests-upstream-01-2017/sys/kern/kern_thread.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/kern_thread.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/kern_thread.c (revision 313399) @@ -1,1205 +1,1205 @@ /*- * Copyright (C) 2001 Julian Elischer . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "opt_witness.h" #include "opt_hwpmc_hooks.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include #include SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); /* * thread related storage. */ static uma_zone_t thread_zone; TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static struct mtx zombie_lock; MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN); static void thread_zombie(struct thread *); static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary); #define TID_BUFFER_SIZE 1024 struct mtx tid_lock; static struct unrhdr *tid_unrhdr; static lwpid_t tid_buffer[TID_BUFFER_SIZE]; static int tid_head, tid_tail; static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash"); struct tidhashhead *tidhashtbl; u_long tidhash; struct rwlock tidhash_lock; static lwpid_t tid_alloc(void) { lwpid_t tid; tid = alloc_unr(tid_unrhdr); if (tid != -1) return (tid); mtx_lock(&tid_lock); if (tid_head == tid_tail) { mtx_unlock(&tid_lock); return (-1); } tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); return (tid); } static void tid_free(lwpid_t tid) { lwpid_t tmp_tid = -1; mtx_lock(&tid_lock); if ((tid_tail + 1) % TID_BUFFER_SIZE == tid_head) { tmp_tid = tid_buffer[tid_head]; tid_head = (tid_head + 1) % TID_BUFFER_SIZE; } tid_buffer[tid_tail] = tid; tid_tail = (tid_tail + 1) % TID_BUFFER_SIZE; mtx_unlock(&tid_lock); if (tmp_tid != -1) free_unr(tid_unrhdr, tmp_tid); } /* * Prepare a thread for use. */ static int thread_ctor(void *mem, int size, void *arg, int flags) { struct thread *td; td = (struct thread *)mem; td->td_state = TDS_INACTIVE; td->td_oncpu = NOCPU; td->td_tid = tid_alloc(); /* * Note that td_critnest begins life as 1 because the thread is not * running and is thereby implicitly waiting to be on the receiving * end of a context switch. */ td->td_critnest = 1; td->td_lend_user_pri = PRI_MAX; EVENTHANDLER_INVOKE(thread_ctor, td); #ifdef AUDIT audit_thread_alloc(td); #endif umtx_thread_alloc(td); return (0); } /* * Reclaim a thread after use. */ static void thread_dtor(void *mem, int size, void *arg) { struct thread *td; td = (struct thread *)mem; #ifdef INVARIANTS /* Verify that this thread is in a safe state to free. */ switch (td->td_state) { case TDS_INHIBITED: case TDS_RUNNING: case TDS_CAN_RUN: case TDS_RUNQ: /* * We must never unlink a thread that is in one of * these states, because it is currently active. */ panic("bad state for thread unlinking"); /* NOTREACHED */ case TDS_INACTIVE: break; default: panic("bad thread state"); /* NOTREACHED */ } #endif #ifdef AUDIT audit_thread_free(td); #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); EVENTHANDLER_INVOKE(thread_dtor, td); tid_free(td->td_tid); } /* * Initialize type-stable parts of a thread (when newly created). */ static int thread_init(void *mem, int size, int flags) { struct thread *td; td = (struct thread *)mem; td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); td->td_rlqe = NULL; EVENTHANDLER_INVOKE(thread_init, td); umtx_thread_init(td); td->td_kstack = 0; td->td_sel = NULL; return (0); } /* * Tear down type-stable parts of a thread (just before being discarded). */ static void thread_fini(void *mem, int size) { struct thread *td; td = (struct thread *)mem; EVENTHANDLER_INVOKE(thread_fini, td); rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); seltdfini(td); } /* * For a newly created process, * link up all the structures and its initial threads etc. * called from: * {arch}/{arch}/machdep.c {arch}_init(), init386() etc. * proc_dtor() (should go away) * proc_init() */ void proc_linkup0(struct proc *p, struct thread *td) { TAILQ_INIT(&p->p_threads); /* all threads in proc */ proc_linkup(p, td); } void proc_linkup(struct proc *p, struct thread *td) { sigqueue_init(&p->p_sigqueue, p); p->p_ksi = ksiginfo_alloc(1); if (p->p_ksi != NULL) { /* XXX p_ksi may be null if ksiginfo zone is not ready */ p->p_ksi->ksi_flags = KSI_EXT | KSI_INS; } LIST_INIT(&p->p_mqnotifier); p->p_numthreads = 0; thread_link(td, p); } /* * Initialize global thread allocation resources. */ void threadinit(void) { mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF); /* * pid_max cannot be greater than PID_MAX. * leave one number for thread0. */ tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock); thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, - 16 - 1, UMA_ZONE_NOFREE); + 32 - 1, UMA_ZONE_NOFREE); tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash); rw_init(&tidhash_lock, "tidhash"); } /* * Place an unused thread on the zombie list. * Use the slpq as that must be unused by now. */ void thread_zombie(struct thread *td) { mtx_lock_spin(&zombie_lock); TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq); mtx_unlock_spin(&zombie_lock); } /* * Release a thread that has exited after cpu_throw(). */ void thread_stash(struct thread *td) { atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1); thread_zombie(td); } /* * Reap zombie resources. */ void thread_reap(void) { struct thread *td_first, *td_next; /* * Don't even bother to lock if none at this instant, * we really don't care about the next instant. */ if (!TAILQ_EMPTY(&zombie_threads)) { mtx_lock_spin(&zombie_lock); td_first = TAILQ_FIRST(&zombie_threads); if (td_first) TAILQ_INIT(&zombie_threads); mtx_unlock_spin(&zombie_lock); while (td_first) { td_next = TAILQ_NEXT(td_first, td_slpq); thread_cow_free(td_first); thread_free(td_first); td_first = td_next; } } } /* * Allocate a thread. */ struct thread * thread_alloc(int pages) { struct thread *td; thread_reap(); /* check if any zombies to get */ td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); if (!vm_thread_new(td, pages)) { uma_zfree(thread_zone, td); return (NULL); } cpu_thread_alloc(td); vm_domain_policy_init(&td->td_vm_dom_policy); return (td); } int thread_alloc_stack(struct thread *td, int pages) { KASSERT(td->td_kstack == 0, ("thread_alloc_stack called on a thread with kstack")); if (!vm_thread_new(td, pages)) return (0); cpu_thread_alloc(td); return (1); } /* * Deallocate a thread. */ void thread_free(struct thread *td) { lock_profile_thread_exit(td); if (td->td_cpuset) cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_kstack != 0) vm_thread_dispose(td); vm_domain_policy_cleanup(&td->td_vm_dom_policy); callout_drain(&td->td_slpcallout); uma_zfree(thread_zone, td); } void thread_cow_get_proc(struct thread *newtd, struct proc *p) { PROC_LOCK_ASSERT(p, MA_OWNED); newtd->td_ucred = crhold(p->p_ucred); newtd->td_limit = lim_hold(p->p_limit); newtd->td_cowgen = p->p_cowgen; } void thread_cow_get(struct thread *newtd, struct thread *td) { newtd->td_ucred = crhold(td->td_ucred); newtd->td_limit = lim_hold(td->td_limit); newtd->td_cowgen = td->td_cowgen; } void thread_cow_free(struct thread *td) { if (td->td_ucred != NULL) crfree(td->td_ucred); if (td->td_limit != NULL) lim_free(td->td_limit); } void thread_cow_update(struct thread *td) { struct proc *p; struct ucred *oldcred; struct plimit *oldlimit; p = td->td_proc; oldcred = NULL; oldlimit = NULL; PROC_LOCK(p); if (td->td_ucred != p->p_ucred) { oldcred = td->td_ucred; td->td_ucred = crhold(p->p_ucred); } if (td->td_limit != p->p_limit) { oldlimit = td->td_limit; td->td_limit = lim_hold(p->p_limit); } td->td_cowgen = p->p_cowgen; PROC_UNLOCK(p); if (oldcred != NULL) crfree(oldcred); if (oldlimit != NULL) lim_free(oldlimit); } /* * Discard the current thread and exit from its context. * Always called with scheduler locked. * * Because we can't free a thread while we're operating under its context, * push the current thread into our CPU's deadthread holder. This means * we needn't worry about someone else grabbing our context before we * do a cpu_throw(). */ void thread_exit(void) { uint64_t runtime, new_switchtime; struct thread *td; struct thread *td2; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(p != NULL, ("thread exiting without a process")); CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td, (long)p->p_pid, td->td_name); KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending")); #ifdef AUDIT AUDIT_SYSCALL_EXIT(0, td); #endif /* * drop FPU & debug register state storage, or any other * architecture specific resources that * would not be on a new untouched process. */ cpu_thread_exit(td); /* * The last thread is left attached to the process * So that the whole bundle gets recycled. Skip * all this stuff if we never had threads. * EXIT clears all sign of other threads when * it goes to single threading, so the last thread always * takes the short path. */ if (p->p_flag & P_HADTHREADS) { if (p->p_numthreads > 1) { atomic_add_int(&td->td_proc->p_exitthreads, 1); thread_unlink(td); td2 = FIRST_THREAD_IN_PROC(p); sched_exit_thread(td2, td); /* * The test below is NOT true if we are the * sole exiting thread. P_STOPPED_SINGLE is unset * in exit1() after it is the only survivor. */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PCPU_SET(deadthread, td); } else { /* * The last thread is exiting.. but not through exit() */ panic ("thread_exit: Last thread exiting on its own"); } } #ifdef HWPMC_HOOKS /* * If this thread is part of a process that is being tracked by hwpmc(4), * inform the module of the thread's impending exit. */ if (PMC_PROC_IS_USING_PMCS(td->td_proc)) PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT); #endif PROC_UNLOCK(p); PROC_STATLOCK(p); thread_lock(td); PROC_SUNLOCK(p); /* Do the same timestamp bookkeeping that mi_switch() would do. */ new_switchtime = cpu_ticks(); runtime = new_switchtime - PCPU_GET(switchtime); td->td_runtime += runtime; td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); PCPU_INC(cnt.v_swtch); /* Save our resource usage in our process. */ td->td_ru.ru_nvcsw++; ruxagg(p, td); rucollect(&p->p_ru, &td->td_ru); PROC_STATUNLOCK(p); td->td_state = TDS_INACTIVE; #ifdef WITNESS witness_thread_exit(td); #endif CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td); sched_throw(td); panic("I'm a teapot!"); /* NOTREACHED */ } /* * Do any thread specific cleanups that may be needed in wait() * called with Giant, proc and schedlock not held. */ void thread_wait(struct proc *p) { struct thread *td; mtx_assert(&Giant, MA_NOTOWNED); KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()")); KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking")); td = FIRST_THREAD_IN_PROC(p); /* Lock the last thread so we spin until it exits cpu_throw(). */ thread_lock(td); thread_unlock(td); lock_profile_thread_exit(td); cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_clean(td); thread_cow_free(td); callout_drain(&td->td_slpcallout); thread_reap(); /* check for zombie threads etc. */ } /* * Link a thread to a process. * set up anything that needs to be initialized for it to * be used by the process. */ void thread_link(struct thread *td, struct proc *p) { /* * XXX This can't be enabled because it's called for proc0 before * its lock has been created. * PROC_LOCK_ASSERT(p, MA_OWNED); */ td->td_state = TDS_INACTIVE; td->td_proc = p; td->td_flags = TDF_INMEM; LIST_INIT(&td->td_contested); LIST_INIT(&td->td_lprof[0]); LIST_INIT(&td->td_lprof[1]); sigqueue_init(&td->td_sigqueue, p); callout_init(&td->td_slpcallout, 1); TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist); p->p_numthreads++; } /* * Called from: * thread_exit() */ void thread_unlink(struct thread *td) { struct proc *p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); TAILQ_REMOVE(&p->p_threads, td, td_plist); p->p_numthreads--; /* could clear a few other things here */ /* Must NOT clear links to proc! */ } static int calc_remaining(struct proc *p, int mode) { int remaining; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); if (mode == SINGLE_EXIT) remaining = p->p_numthreads; else if (mode == SINGLE_BOUNDARY) remaining = p->p_numthreads - p->p_boundary_count; else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC) remaining = p->p_numthreads - p->p_suspcount; else panic("calc_remaining: wrong mode %d", mode); return (remaining); } static int remain_for_mode(int mode) { return (mode == SINGLE_ALLPROC ? 0 : 1); } static int weed_inhib(int mode, struct thread *td2, struct proc *p) { int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td2, MA_OWNED); wakeup_swapper = 0; switch (mode) { case SINGLE_EXIT: if (TD_IS_SUSPENDED(td2)) wakeup_swapper |= thread_unsuspend_one(td2, p, true); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: case SINGLE_NO_EXIT: if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) wakeup_swapper |= sleepq_abort(td2, ERESTART); break; case SINGLE_ALLPROC: /* * ALLPROC suspend tries to avoid spurious EINTR for * threads sleeping interruptable, by suspending the * thread directly, similarly to sig_suspend_threads(). * Since such sleep is not performed at the user * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP * is used to avoid immediate un-suspend. */ if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY | TDF_ALLPROCSUSP)) == 0) wakeup_swapper |= thread_unsuspend_one(td2, p, false); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0) { if ((td2->td_flags & TDF_SBDRY) == 0) { thread_suspend_one(td2); td2->td_flags |= TDF_ALLPROCSUSP; } else { wakeup_swapper |= sleepq_abort(td2, ERESTART); } } break; } return (wakeup_swapper); } /* * Enforce single-threading. * * Returns 1 if the caller must abort (another thread is waiting to * exit the process or similar). Process is locked! * Returns 0 when you are successfully the only thread running. * A process has successfully single threaded in the suspend mode when * There are no threads in user mode. Threads in the kernel must be * allowed to continue until they get to the user boundary. They may even * copy out their return values and data before suspending. They may however be * accelerated in reaching the user boundary as we will wake up * any sleeping threads that are interruptable. (PCATCH). */ int thread_single(struct proc *p, int mode) { struct thread *td; struct thread *td2; int remaining, wakeup_swapper; td = curthread; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); /* * If allowing non-ALLPROC singlethreading for non-curproc * callers, calc_remaining() and remain_for_mode() should be * adjusted to also account for td->td_proc != p. For now * this is not implemented because it is not used. */ KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) || (mode != SINGLE_ALLPROC && td->td_proc == p), ("mode %d proc %p curproc %p", mode, p, td->td_proc)); mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC) return (0); /* Is someone already single threading? */ if (p->p_singlethread != NULL && p->p_singlethread != td) return (1); if (mode == SINGLE_EXIT) { p->p_flag |= P_SINGLE_EXIT; p->p_flag &= ~P_SINGLE_BOUNDARY; } else { p->p_flag &= ~P_SINGLE_EXIT; if (mode == SINGLE_BOUNDARY) p->p_flag |= P_SINGLE_BOUNDARY; else p->p_flag &= ~P_SINGLE_BOUNDARY; } if (mode == SINGLE_ALLPROC) p->p_flag |= P_TOTAL_STOP; p->p_flag |= P_STOPPED_SINGLE; PROC_SLOCK(p); p->p_singlethread = td; remaining = calc_remaining(p, mode); while (remaining != remain_for_mode(mode)) { if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE) goto stopme; wakeup_swapper = 0; FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; if (TD_IS_INHIBITED(td2)) { wakeup_swapper |= weed_inhib(mode, td2, p); #ifdef SMP } else if (TD_IS_RUNNING(td2) && td != td2) { forward_signal(td2); #endif } thread_unlock(td2); } if (wakeup_swapper) kick_proc0(); remaining = calc_remaining(p, mode); /* * Maybe we suspended some threads.. was it enough? */ if (remaining == remain_for_mode(mode)) break; stopme: /* * Wake us up when everyone else has suspended. * In the mean time we suspend as well. */ thread_suspend_switch(td, p); remaining = calc_remaining(p, mode); } if (mode == SINGLE_EXIT) { /* * Convert the process to an unthreaded process. The * SINGLE_EXIT is called by exit1() or execve(), in * both cases other threads must be retired. */ KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads")); p->p_singlethread = NULL; p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS); /* * Wait for any remaining threads to exit cpu_throw(). */ while (p->p_exitthreads != 0) { PROC_SUNLOCK(p); PROC_UNLOCK(p); sched_relinquish(td); PROC_LOCK(p); PROC_SLOCK(p); } } else if (mode == SINGLE_BOUNDARY) { /* * Wait until all suspended threads are removed from * the processors. The thread_suspend_check() * increments p_boundary_count while it is still * running, which makes it possible for the execve() * to destroy vmspace while our other threads are * still using the address space. * * We lock the thread, which is only allowed to * succeed after context switch code finished using * the address space. */ FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; thread_lock(td2); KASSERT((td2->td_flags & TDF_BOUNDARY) != 0, ("td %p not on boundary", td2)); KASSERT(TD_IS_SUSPENDED(td2), ("td %p is not suspended", td2)); thread_unlock(td2); } } PROC_SUNLOCK(p); return (0); } bool thread_suspend_check_needed(void) { struct proc *p; struct thread *td; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 && (td->td_dbgflags & TDB_SUSPEND) != 0)); } /* * Called in from locations that can safely check to see * whether we have to suspend or at least throttle for a * single-thread event (e.g. fork). * * Such locations include userret(). * If the "return_instead" argument is non zero, the thread must be able to * accept 0 (caller may continue), or 1 (caller must abort) as a result. * * The 'return_instead' argument tells the function if it may do a * thread_exit() or suspend, or whether the caller must abort and back * out instead. * * If the thread that set the single_threading request has set the * P_SINGLE_EXIT bit in the process flags then this call will never return * if 'return_instead' is false, but will exit. * * P_SINGLE_EXIT | return_instead == 0| return_instead != 0 *---------------+--------------------+--------------------- * 0 | returns 0 | returns 0 or 1 * | when ST ends | immediately *---------------+--------------------+--------------------- * 1 | thread exits | returns 1 * | | immediately * 0 = thread_exit() or suspension ok, * other = return error instead of stopping the thread. * * While a full suspension is under effect, even a single threading * thread would be suspended if it made this call (but it shouldn't). * This call should only be made from places where * thread_exit() would be safe as that may be the outcome unless * return_instead is set. */ int thread_suspend_check(int return_instead) { struct thread *td; struct proc *p; int wakeup_swapper; td = curthread; p = td->td_proc; mtx_assert(&Giant, MA_NOTOWNED); PROC_LOCK_ASSERT(p, MA_OWNED); while (thread_suspend_check_needed()) { if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { KASSERT(p->p_singlethread != NULL, ("singlethread not set")); /* * The only suspension in action is a * single-threading. Single threader need not stop. * It is safe to access p->p_singlethread unlocked * because it can only be set to our address by us. */ if (p->p_singlethread == td) return (0); /* Exempt from stopping. */ } if ((p->p_flag & P_SINGLE_EXIT) && return_instead) return (EINTR); /* Should we goto user boundary if we didn't come from there? */ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && (p->p_flag & P_SINGLE_BOUNDARY) && return_instead) return (ERESTART); /* * Ignore suspend requests if they are deferred. */ if ((td->td_flags & TDF_SBDRY) != 0) { KASSERT(return_instead, ("TDF_SBDRY set for unsafe thread_suspend_check")); KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) != (TDF_SEINTR | TDF_SERESTART), ("both TDF_SEINTR and TDF_SERESTART")); return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0); } /* * If the process is waiting for us to exit, * this thread should just suicide. * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { PROC_UNLOCK(p); /* * Allow Linux emulation layer to do some work * before thread suicide. */ if (__predict_false(p->p_sysent->sv_thread_detach != NULL)) (p->p_sysent->sv_thread_detach)(td); umtx_thread_exit(td); kern_thr_exit(td); panic("stopped thread did not exit"); } PROC_SLOCK(p); thread_stopped(p); if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount + 1) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); if (wakeup_swapper) kick_proc0(); } } PROC_UNLOCK(p); thread_lock(td); /* * When a thread suspends, it just * gets taken off all queues. */ thread_suspend_one(td); if (return_instead == 0) { p->p_boundary_count++; td->td_flags |= TDF_BOUNDARY; } PROC_SUNLOCK(p); mi_switch(SW_INVOL | SWT_SUSPEND, NULL); thread_unlock(td); PROC_LOCK(p); } return (0); } void thread_suspend_switch(struct thread *td, struct proc *p) { KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); /* * We implement thread_suspend_one in stages here to avoid * dropping the proc lock while the thread lock is owned. */ if (p == td->td_proc) { thread_stopped(p); p->p_suspcount++; } PROC_UNLOCK(p); thread_lock(td); td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); PROC_SUNLOCK(p); DROP_GIANT(); mi_switch(SW_VOL | SWT_SUSPEND, NULL); thread_unlock(td); PICKUP_GIANT(); PROC_LOCK(p); PROC_SLOCK(p); } void thread_suspend_one(struct thread *td) { struct proc *p; p = td->td_proc; PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(!TD_IS_SUSPENDED(td), ("already suspended")); p->p_suspcount++; td->td_flags &= ~TDF_NEEDSUSPCHK; TD_SET_SUSPENDED(td); sched_sleep(td, 0); } static int thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary) { THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); td->td_flags &= ~TDF_ALLPROCSUSP; if (td->td_proc == p) { PROC_SLOCK_ASSERT(p, MA_OWNED); p->p_suspcount--; if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) { td->td_flags &= ~TDF_BOUNDARY; p->p_boundary_count--; } } return (setrunnable(td)); } /* * Allow all threads blocked by single threading to continue running. */ void thread_unsuspend(struct proc *p) { struct thread *td; int wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); wakeup_swapper = 0; if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, true); } thread_unlock(td); } } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE && p->p_numthreads == p->p_suspcount) { /* * Stopping everything also did the job for the single * threading request. Now we've downgraded to single-threaded, * let it continue. */ if (p->p_singlethread->td_proc == p) { thread_lock(p->p_singlethread); wakeup_swapper = thread_unsuspend_one( p->p_singlethread, p, false); thread_unlock(p->p_singlethread); } } if (wakeup_swapper) kick_proc0(); } /* * End the single threading mode.. */ void thread_single_end(struct proc *p, int mode) { struct thread *td; int wakeup_swapper; KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY || mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT, ("invalid mode %d", mode)); PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) || (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0), ("mode %d does not match P_TOTAL_STOP", mode)); KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread, ("thread_single_end from other thread %p %p", curthread, p->p_singlethread)); KASSERT(mode != SINGLE_BOUNDARY || (p->p_flag & P_SINGLE_BOUNDARY) != 0, ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag)); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY | P_TOTAL_STOP); PROC_SLOCK(p); p->p_singlethread = NULL; wakeup_swapper = 0; /* * If there are other threads they may now run, * unless of course there is a blanket 'stop order' * on the process. The single threader must be allowed * to continue however as this is a bad place to stop. */ if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); if (TD_IS_SUSPENDED(td)) { wakeup_swapper |= thread_unsuspend_one(td, p, mode == SINGLE_BOUNDARY); } thread_unlock(td); } } KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0, ("inconsistent boundary count %d", p->p_boundary_count)); PROC_SUNLOCK(p); if (wakeup_swapper) kick_proc0(); } struct thread * thread_find(struct proc *p, lwpid_t tid) { struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); FOREACH_THREAD_IN_PROC(p, td) { if (td->td_tid == tid) break; } return (td); } /* Locate a thread by number; return with proc lock held. */ struct thread * tdfind(lwpid_t tid, pid_t pid) { #define RUN_THRESH 16 struct thread *td; int run = 0; rw_rlock(&tidhash_lock); LIST_FOREACH(td, TIDHASH(tid), td_hash) { if (td->td_tid == tid) { if (pid != -1 && td->td_proc->p_pid != pid) { td = NULL; break; } PROC_LOCK(td->td_proc); if (td->td_proc->p_state == PRS_NEW) { PROC_UNLOCK(td->td_proc); td = NULL; break; } if (run > RUN_THRESH) { if (rw_try_upgrade(&tidhash_lock)) { LIST_REMOVE(td, td_hash); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); return (td); } } break; } run++; } rw_runlock(&tidhash_lock); return (td); } void tidhash_add(struct thread *td) { rw_wlock(&tidhash_lock); LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash); rw_wunlock(&tidhash_lock); } void tidhash_remove(struct thread *td) { rw_wlock(&tidhash_lock); LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); } Index: projects/netbsd-tests-upstream-01-2017/sys/kern/subr_lock.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/subr_lock.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/subr_lock.c (revision 313399) @@ -1,674 +1,700 @@ /*- * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This module holds the global variables and functions used to maintain * lock_object structures. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_mprof.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #include +SDT_PROVIDER_DEFINE(lock); +SDT_PROBE_DEFINE1(lock, , , starvation, "u_int"); + CTASSERT(LOCK_CLASS_MAX == 15); struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { &lock_class_mtx_spin, &lock_class_mtx_sleep, &lock_class_sx, &lock_class_rm, &lock_class_rm_sleepable, &lock_class_rw, &lock_class_lockmgr, }; void lock_init(struct lock_object *lock, struct lock_class *class, const char *name, const char *type, int flags) { int i; /* Check for double-init and zero object. */ KASSERT(flags & LO_NEW || !lock_initialized(lock), ("lock \"%s\" %p already initialized", name, lock)); /* Look up lock class to find its index. */ for (i = 0; i < LOCK_CLASS_MAX; i++) if (lock_classes[i] == class) { lock->lo_flags = i << LO_CLASSSHIFT; break; } KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); /* Initialize the lock object. */ lock->lo_name = name; lock->lo_flags |= flags | LO_INITIALIZED; LOCK_LOG_INIT(lock, 0); WITNESS_INIT(lock, (type != NULL) ? type : name); } void lock_destroy(struct lock_object *lock) { KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock)); WITNESS_DESTROY(lock); LOCK_LOG_DESTROY(lock, 0); lock->lo_flags &= ~LO_INITIALIZED; } +static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); +static SYSCTL_NODE(_debug_lock, OID_AUTO, delay, CTLFLAG_RD, NULL, + "lock delay"); + +static u_int __read_mostly starvation_limit = 131072; +SYSCTL_INT(_debug_lock_delay, OID_AUTO, starvation_limit, CTLFLAG_RW, + &starvation_limit, 0, ""); + +static u_int __read_mostly restrict_starvation = 0; +SYSCTL_INT(_debug_lock_delay, OID_AUTO, restrict_starvation, CTLFLAG_RW, + &restrict_starvation, 0, ""); + void lock_delay(struct lock_delay_arg *la) { - u_int i, delay, backoff, min, max; struct lock_delay_config *lc = la->config; + u_int i; - delay = la->delay; + la->delay <<= 1; + if (__predict_false(la->delay > lc->max)) + la->delay = lc->max; - if (delay == 0) - delay = lc->initial; - else { - delay += lc->step; - max = lc->max; - if (delay > max) - delay = max; + for (i = la->delay; i > 0; i--) + cpu_spinwait(); + + la->spin_cnt += la->delay; + if (__predict_false(la->spin_cnt > starvation_limit)) { + SDT_PROBE1(lock, , , starvation, la->delay); + if (restrict_starvation) + la->delay = lc->base; } +} - backoff = cpu_ticks() % delay; - min = lc->min; - if (backoff < min) - backoff = min; - for (i = 0; i < backoff; i++) - cpu_spinwait(); +static u_int +lock_roundup_2(u_int val) +{ + u_int res; - la->delay = delay; - la->spin_cnt += backoff; + for (res = 1; res <= val; res <<= 1) + continue; + + return (res); } +void +lock_delay_default_init(struct lock_delay_config *lc) +{ + + lc->base = lock_roundup_2(mp_ncpus) / 4; + lc->max = lc->base * 1024; +} + #ifdef DDB DB_SHOW_COMMAND(lock, db_show_lock) { struct lock_object *lock; struct lock_class *class; if (!have_addr) return; lock = (struct lock_object *)addr; if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); return; } class = LOCK_CLASS(lock); db_printf(" class: %s\n", class->lc_name); db_printf(" name: %s\n", lock->lo_name); class->lc_ddb_show(lock); } #endif #ifdef LOCK_PROFILING /* * One object per-thread for each lock the thread owns. Tracks individual * lock instances. */ struct lock_profile_object { LIST_ENTRY(lock_profile_object) lpo_link; struct lock_object *lpo_obj; const char *lpo_file; int lpo_line; uint16_t lpo_ref; uint16_t lpo_cnt; uint64_t lpo_acqtime; uint64_t lpo_waittime; u_int lpo_contest_locking; }; /* * One lock_prof for each (file, line, lock object) triple. */ struct lock_prof { SLIST_ENTRY(lock_prof) link; struct lock_class *class; const char *file; const char *name; int line; int ticks; uintmax_t cnt_wait_max; uintmax_t cnt_max; uintmax_t cnt_tot; uintmax_t cnt_wait; uintmax_t cnt_cur; uintmax_t cnt_contest_locking; }; SLIST_HEAD(lphead, lock_prof); #define LPROF_HASH_SIZE 4096 #define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) #define LPROF_CACHE_SIZE 4096 /* * Array of objects and profs for each type of object for each cpu. Spinlocks * are handled separately because a thread may be preempted and acquire a * spinlock while in the lock profiling code of a non-spinlock. In this way * we only need a critical section to protect the per-cpu lists. */ struct lock_prof_type { struct lphead lpt_lpalloc; struct lpohead lpt_lpoalloc; struct lphead lpt_hash[LPROF_HASH_SIZE]; struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; }; struct lock_prof_cpu { struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ }; struct lock_prof_cpu *lp_cpu[MAXCPU]; volatile int __read_mostly lock_prof_enable; static volatile int lock_prof_resetting; #define LPROF_SBUF_SIZE 256 static int lock_prof_rejected; static int lock_prof_skipspin; static int lock_prof_skipcount; #ifndef USE_CPU_NANOSECONDS uint64_t nanoseconds(void) { struct bintime bt; uint64_t ns; binuptime(&bt); /* From bintime2timespec */ ns = bt.sec * (uint64_t)1000000000; ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; return (ns); } #endif static void lock_prof_init_type(struct lock_prof_type *type) { int i; SLIST_INIT(&type->lpt_lpalloc); LIST_INIT(&type->lpt_lpoalloc); for (i = 0; i < LPROF_CACHE_SIZE; i++) { SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], link); LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], lpo_link); } } static void lock_prof_init(void *arg) { int cpu; for (cpu = 0; cpu <= mp_maxid; cpu++) { lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, M_WAITOK | M_ZERO); lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); } } SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); static void lock_prof_reset_wait(void) { /* * Spin relinquishing our cpu so that quiesce_all_cpus may * complete. */ while (lock_prof_resetting) sched_relinquish(curthread); } static void lock_prof_reset(void) { struct lock_prof_cpu *lpc; int enabled, i, cpu; /* * We not only race with acquiring and releasing locks but also * thread exit. To be certain that threads exit without valid head * pointers they must see resetting set before enabled is cleared. * Otherwise a lock may not be removed from a per-thread list due * to disabled being set but not wait for reset() to remove it below. */ atomic_store_rel_int(&lock_prof_resetting, 1); enabled = lock_prof_enable; lock_prof_enable = 0; quiesce_all_cpus("profreset", 0); /* * Some objects may have migrated between CPUs. Clear all links * before we zero the structures. Some items may still be linked * into per-thread lists as well. */ for (cpu = 0; cpu <= mp_maxid; cpu++) { lpc = lp_cpu[cpu]; for (i = 0; i < LPROF_CACHE_SIZE; i++) { LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); } } for (cpu = 0; cpu <= mp_maxid; cpu++) { lpc = lp_cpu[cpu]; bzero(lpc, sizeof(*lpc)); lock_prof_init_type(&lpc->lpc_types[0]); lock_prof_init_type(&lpc->lpc_types[1]); } atomic_store_rel_int(&lock_prof_resetting, 0); lock_prof_enable = enabled; } static void lock_prof_output(struct lock_prof *lp, struct sbuf *sb) { const char *p; for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); sbuf_printf(sb, "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, lp->cnt_wait / 1000, lp->cnt_cur, lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_tot / (lp->cnt_cur * 1000), lp->cnt_cur == 0 ? (uintmax_t)0 : lp->cnt_wait / (lp->cnt_cur * 1000), (uintmax_t)0, lp->cnt_contest_locking, p, lp->line, lp->class->lc_name, lp->name); } static void lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, int spin, int t) { struct lock_prof_type *type; struct lock_prof *l; int cpu; dst->file = match->file; dst->line = match->line; dst->class = match->class; dst->name = match->name; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (lp_cpu[cpu] == NULL) continue; type = &lp_cpu[cpu]->lpc_types[spin]; SLIST_FOREACH(l, &type->lpt_hash[hash], link) { if (l->ticks == t) continue; if (l->file != match->file || l->line != match->line || l->name != match->name) continue; l->ticks = t; if (l->cnt_max > dst->cnt_max) dst->cnt_max = l->cnt_max; if (l->cnt_wait_max > dst->cnt_wait_max) dst->cnt_wait_max = l->cnt_wait_max; dst->cnt_tot += l->cnt_tot; dst->cnt_wait += l->cnt_wait; dst->cnt_cur += l->cnt_cur; dst->cnt_contest_locking += l->cnt_contest_locking; } } } static void lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, int t) { struct lock_prof *l; int i; for (i = 0; i < LPROF_HASH_SIZE; ++i) { SLIST_FOREACH(l, &type->lpt_hash[i], link) { struct lock_prof lp = {}; if (l->ticks == t) continue; lock_prof_sum(l, &lp, i, spin, t); lock_prof_output(&lp, sb); } } } static int dump_lock_prof_stats(SYSCTL_HANDLER_ARGS) { struct sbuf *sb; int error, cpu, t; int enabled; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); enabled = lock_prof_enable; lock_prof_enable = 0; quiesce_all_cpus("profstat", 0); t = ticks; for (cpu = 0; cpu <= mp_maxid; cpu++) { if (lp_cpu[cpu] == NULL) continue; lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); } lock_prof_enable = enabled; error = sbuf_finish(sb); /* Output a trailing NUL. */ if (error == 0) error = SYSCTL_OUT(req, "", 1); sbuf_delete(sb); return (error); } static int enable_lock_prof(SYSCTL_HANDLER_ARGS) { int error, v; v = lock_prof_enable; error = sysctl_handle_int(oidp, &v, v, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == lock_prof_enable) return (0); if (v == 1) lock_prof_reset(); lock_prof_enable = !!v; return (0); } static int reset_lock_prof_stats(SYSCTL_HANDLER_ARGS) { int error, v; v = 0; error = sysctl_handle_int(oidp, &v, 0, req); if (error) return (error); if (req->newptr == NULL) return (error); if (v == 0) return (0); lock_prof_reset(); return (0); } static struct lock_prof * lock_profile_lookup(struct lock_object *lo, int spin, const char *file, int line) { const char *unknown = "(unknown)"; struct lock_prof_type *type; struct lock_prof *lp; struct lphead *head; const char *p; u_int hash; p = file; if (p == NULL || *p == '\0') p = unknown; hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; hash &= LPROF_HASH_MASK; type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; head = &type->lpt_hash[hash]; SLIST_FOREACH(lp, head, link) { if (lp->line == line && lp->file == p && lp->name == lo->lo_name) return (lp); } lp = SLIST_FIRST(&type->lpt_lpalloc); if (lp == NULL) { lock_prof_rejected++; return (lp); } SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); lp->file = p; lp->line = line; lp->class = LOCK_CLASS(lo); lp->name = lo->lo_name; SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); return (lp); } static struct lock_profile_object * lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, int line) { struct lock_profile_object *l; struct lock_prof_type *type; struct lpohead *head; head = &curthread->td_lprof[spin]; LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo && l->lpo_file == file && l->lpo_line == line) return (l); type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; l = LIST_FIRST(&type->lpt_lpoalloc); if (l == NULL) { lock_prof_rejected++; return (NULL); } LIST_REMOVE(l, lpo_link); l->lpo_obj = lo; l->lpo_file = file; l->lpo_line = line; l->lpo_cnt = 0; LIST_INSERT_HEAD(head, l, lpo_link); return (l); } void lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line) { static int lock_prof_count; struct lock_profile_object *l; int spin; if (SCHEDULER_STOPPED()) return; /* don't reset the timer when/if recursing */ if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) return; if (lock_prof_skipcount && (++lock_prof_count % lock_prof_skipcount) != 0) return; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; if (spin && lock_prof_skipspin == 1) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0) goto out; l = lock_profile_object_lookup(lo, spin, file, line); if (l == NULL) goto out; l->lpo_cnt++; if (++l->lpo_ref > 1) goto out; l->lpo_contest_locking = contested; l->lpo_acqtime = nanoseconds(); if (waittime && (l->lpo_acqtime > waittime)) l->lpo_waittime = l->lpo_acqtime - waittime; else l->lpo_waittime = 0; out: critical_exit(); } void lock_profile_thread_exit(struct thread *td) { #ifdef INVARIANTS struct lock_profile_object *l; MPASS(curthread->td_critnest == 0); #endif /* * If lock profiling was disabled we have to wait for reset to * clear our pointers before we can exit safely. */ lock_prof_reset_wait(); #ifdef INVARIANTS LIST_FOREACH(l, &td->td_lprof[0], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); LIST_FOREACH(l, &td->td_lprof[1], lpo_link) printf("thread still holds lock acquired at %s:%d\n", l->lpo_file, l->lpo_line); #endif MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); } void lock_profile_release_lock(struct lock_object *lo) { struct lock_profile_object *l; struct lock_prof_type *type; struct lock_prof *lp; uint64_t curtime, holdtime; struct lpohead *head; int spin; if (SCHEDULER_STOPPED()) return; if (lo->lo_flags & LO_NOPROFILE) return; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; head = &curthread->td_lprof[spin]; if (LIST_FIRST(head) == NULL) return; critical_enter(); /* Recheck enabled now that we're in a critical section. */ if (lock_prof_enable == 0 && lock_prof_resetting == 1) goto out; /* * If lock profiling is not enabled we still want to remove the * lpo from our queue. */ LIST_FOREACH(l, head, lpo_link) if (l->lpo_obj == lo) break; if (l == NULL) goto out; if (--l->lpo_ref > 0) goto out; lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); if (lp == NULL) goto release; curtime = nanoseconds(); if (curtime < l->lpo_acqtime) goto release; holdtime = curtime - l->lpo_acqtime; /* * Record if the lock has been held longer now than ever * before. */ if (holdtime > lp->cnt_max) lp->cnt_max = holdtime; if (l->lpo_waittime > lp->cnt_wait_max) lp->cnt_wait_max = l->lpo_waittime; lp->cnt_tot += holdtime; lp->cnt_wait += l->lpo_waittime; lp->cnt_contest_locking += l->lpo_contest_locking; lp->cnt_cur += l->lpo_cnt; release: LIST_REMOVE(l, lpo_link); type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); out: critical_exit(); } -static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling"); SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, &lock_prof_rejected, 0, "Number of rejected profiling records"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); #endif Index: projects/netbsd-tests-upstream-01-2017/sys/kern/subr_sfbuf.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/kern/subr_sfbuf.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/kern/subr_sfbuf.c (revision 313399) @@ -1,240 +1,241 @@ /*- * Copyright (c) 2014 Gleb Smirnoff * Copyright (c) 2003, 2005 Alan L. Cox * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include +#include #include #include #include #include #include #include #ifndef NSFBUFS #define NSFBUFS (512 + maxusers * 16) #endif static int nsfbufs; static int nsfbufspeak; static int nsfbufsused; SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0, "Maximum number of sendfile(2) sf_bufs available"); SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0, "Number of sendfile(2) sf_bufs at peak usage"); SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0, "Number of sendfile(2) sf_bufs in use"); static void sf_buf_init(void *arg); SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL); LIST_HEAD(sf_head, sf_buf); /* * A hash table of active sendfile(2) buffers */ static struct sf_head *sf_buf_active; static u_long sf_buf_hashmask; #define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask) static TAILQ_HEAD(, sf_buf) sf_buf_freelist; static u_int sf_buf_alloc_want; /* * A lock used to synchronize access to the hash table and free list */ static struct mtx sf_buf_lock; /* * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) */ static void sf_buf_init(void *arg) { struct sf_buf *sf_bufs; vm_offset_t sf_base; int i; #ifdef SFBUF_OPTIONAL_DIRECT_MAP if (SFBUF_OPTIONAL_DIRECT_MAP) return; #endif nsfbufs = NSFBUFS; TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs); sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); TAILQ_INIT(&sf_buf_freelist); sf_base = kva_alloc(nsfbufs * PAGE_SIZE); sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_WAITOK | M_ZERO); for (i = 0; i < nsfbufs; i++) { sf_bufs[i].kva = sf_base + i * PAGE_SIZE; TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry); } sf_buf_alloc_want = 0; mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF); } /* * Get an sf_buf from the freelist. May block if none are available. */ struct sf_buf * sf_buf_alloc(struct vm_page *m, int flags) { struct sf_head *hash_list; struct sf_buf *sf; int error; #ifdef SFBUF_OPTIONAL_DIRECT_MAP if (SFBUF_OPTIONAL_DIRECT_MAP) return ((struct sf_buf *)m); #endif KASSERT(curthread->td_pinned > 0 || (flags & SFB_CPUPRIVATE) == 0, ("sf_buf_alloc(SFB_CPUPRIVATE): curthread not pinned")); hash_list = &sf_buf_active[SF_BUF_HASH(m)]; mtx_lock(&sf_buf_lock); LIST_FOREACH(sf, hash_list, list_entry) { if (sf->m == m) { sf->ref_count++; if (sf->ref_count == 1) { TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); nsfbufsused++; nsfbufspeak = imax(nsfbufspeak, nsfbufsused); } #if defined(SMP) && defined(SFBUF_CPUSET) sf_buf_shootdown(sf, flags); #endif goto done; } } while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) { if (flags & SFB_NOWAIT) goto done; sf_buf_alloc_want++; SFSTAT_INC(sf_allocwait); error = msleep(&sf_buf_freelist, &sf_buf_lock, (flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0); sf_buf_alloc_want--; /* * If we got a signal, don't risk going back to sleep. */ if (error) goto done; } TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry); if (sf->m != NULL) LIST_REMOVE(sf, list_entry); LIST_INSERT_HEAD(hash_list, sf, list_entry); sf->ref_count = 1; sf->m = m; nsfbufsused++; nsfbufspeak = imax(nsfbufspeak, nsfbufsused); sf_buf_map(sf, flags); done: mtx_unlock(&sf_buf_lock); return (sf); } /* * Remove a reference from the given sf_buf, adding it to the free * list when its reference count reaches zero. A freed sf_buf still, * however, retains its virtual-to-physical mapping until it is * recycled or reactivated by sf_buf_alloc(9). */ void sf_buf_free(struct sf_buf *sf) { #ifdef SFBUF_OPTIONAL_DIRECT_MAP if (SFBUF_OPTIONAL_DIRECT_MAP) return; #endif mtx_lock(&sf_buf_lock); sf->ref_count--; if (sf->ref_count == 0) { TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry); nsfbufsused--; if (sf_buf_unmap(sf)) { sf->m = NULL; LIST_REMOVE(sf, list_entry); } if (sf_buf_alloc_want > 0) wakeup(&sf_buf_freelist); } mtx_unlock(&sf_buf_lock); } void sf_buf_ref(struct sf_buf *sf) { #ifdef SFBUF_OPTIONAL_DIRECT_MAP if (SFBUF_OPTIONAL_DIRECT_MAP) return; #endif mtx_lock(&sf_buf_lock); KASSERT(sf->ref_count > 0, ("%s: sf %p not allocated", __func__, sf)); sf->ref_count++; mtx_unlock(&sf_buf_lock); } #ifdef SFBUF_PROCESS_PAGE /* * Run callback function on sf_buf that holds a certain page. */ boolean_t sf_buf_process_page(vm_page_t m, void (*cb)(struct sf_buf *)) { struct sf_head *hash_list; struct sf_buf *sf; hash_list = &sf_buf_active[SF_BUF_HASH(m)]; mtx_lock(&sf_buf_lock); LIST_FOREACH(sf, hash_list, list_entry) { if (sf->m == m) { cb(sf); mtx_unlock(&sf_buf_lock); return (TRUE); } } mtx_unlock(&sf_buf_lock); return (FALSE); } #endif /* SFBUF_PROCESS_PAGE */ Index: projects/netbsd-tests-upstream-01-2017/sys/modules/dtb/allwinner/Makefile =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/modules/dtb/allwinner/Makefile (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/modules/dtb/allwinner/Makefile (revision 313399) @@ -1,17 +1,26 @@ # $FreeBSD$ # All the dts files for allwinner systems we support. DTS= \ - bananapi.dts \ - bananapim2.dts \ - cubieboard.dts \ - cubieboard2.dts \ nanopi-neo.dts \ - olimex-a20-som-evb.dts \ - olinuxino-lime.dts \ orangepi-plus-2e.dts \ - pcduino3.dts \ sinovoip-bpi-m3.dts \ + sun4i-a10-cubieboard.dts \ + sun4i-a10-olinuxino-lime.dts \ + sun6i-a31s-sinovoip-bpi-m2.dts \ sun5i-a13-olinuxino.dts \ - sun5i-r8-chip.dts + sun5i-r8-chip.dts \ + sun7i-a20-bananapi.dts \ + sun7i-a20-cubieboard2.dts \ + sun7i-a20-olimex-som-evb.dts \ + sun7i-a20-pcduino3.dts + +LINKS= \ + ${DTBDIR}/sun4i-a10-cubieboard.dtb ${DTBDIR}/cubieboard.dtb \ + ${DTBDIR}/sun4i-a10-olinuxino-lime.dtb ${DTBDIR}/olinuxino-lime.dtb \ + ${DTBDIR}/sun6i-a31s-sinovoip-bpi-m2.dtb ${DTBDIR}/bananapim2.dtb \ + ${DTBDIR}/sun7i-a20-bananapi.dtb ${DTBDIR}/bananapi.dtb \ + ${DTBDIR}/sun7i-a20-cubieboard2.dtb ${DTBDIR}/cubieboard2.dtb \ + ${DTBDIR}/sun7i-a20-olimex-som-evb.dtb ${DTBDIR}/olimex-a20-som-evb.dtb \ + ${DTBDIR}/sun7i-a20-pcduino3.dtb ${DTBDIR}/pcduino3.dtb .include Index: projects/netbsd-tests-upstream-01-2017/sys/sys/lock.h =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/sys/lock.h (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/sys/lock.h (revision 313399) @@ -1,337 +1,341 @@ /*- * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Berkeley Software Design Inc's name may not be used to endorse or * promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from BSDI Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp * $FreeBSD$ */ #ifndef _SYS_LOCK_H_ #define _SYS_LOCK_H_ #include #include #include struct lock_list_entry; struct thread; /* * Lock classes. Each lock has a class which describes characteristics * common to all types of locks of a given class. * * Spin locks in general must always protect against preemption, as it is * an error to perform any type of context switch while holding a spin lock. * Also, for an individual lock to be recursable, its class must allow * recursion and the lock itself must explicitly allow recursion. * * The 'lc_ddb_show' function pointer is used to dump class-specific * data for the 'show lock' DDB command. The 'lc_lock' and * 'lc_unlock' function pointers are used in sleep(9) and cv_wait(9) * to lock and unlock locks while blocking on a sleep queue. The * return value of 'lc_unlock' will be passed to 'lc_lock' on resume * to allow communication of state between the two routines. */ struct lock_class { const char *lc_name; u_int lc_flags; void (*lc_assert)(const struct lock_object *lock, int what); void (*lc_ddb_show)(const struct lock_object *lock); void (*lc_lock)(struct lock_object *lock, uintptr_t how); int (*lc_owner)(const struct lock_object *lock, struct thread **owner); uintptr_t (*lc_unlock)(struct lock_object *lock); }; #define LC_SLEEPLOCK 0x00000001 /* Sleep lock. */ #define LC_SPINLOCK 0x00000002 /* Spin lock. */ #define LC_SLEEPABLE 0x00000004 /* Sleeping allowed with this lock. */ #define LC_RECURSABLE 0x00000008 /* Locks of this type may recurse. */ #define LC_UPGRADABLE 0x00000010 /* Upgrades and downgrades permitted. */ #define LO_CLASSFLAGS 0x0000ffff /* Class specific flags. */ #define LO_INITIALIZED 0x00010000 /* Lock has been initialized. */ #define LO_WITNESS 0x00020000 /* Should witness monitor this lock. */ #define LO_QUIET 0x00040000 /* Don't log locking operations. */ #define LO_RECURSABLE 0x00080000 /* Lock may recurse. */ #define LO_SLEEPABLE 0x00100000 /* Lock may be held while sleeping. */ #define LO_UPGRADABLE 0x00200000 /* Lock may be upgraded/downgraded. */ #define LO_DUPOK 0x00400000 /* Don't check for duplicate acquires */ #define LO_IS_VNODE 0x00800000 /* Tell WITNESS about a VNODE lock */ #define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */ #define LO_NOPROFILE 0x10000000 /* Don't profile this lock */ #define LO_NEW 0x20000000 /* Don't check for double-init */ /* * Lock classes are statically assigned an index into the gobal lock_classes * array. Debugging code looks up the lock class for a given lock object * by indexing the array. */ #define LO_CLASSSHIFT 24 #define LO_CLASSINDEX(lock) ((((lock)->lo_flags) & LO_CLASSMASK) >> LO_CLASSSHIFT) #define LOCK_CLASS(lock) (lock_classes[LO_CLASSINDEX((lock))]) #define LOCK_CLASS_MAX (LO_CLASSMASK >> LO_CLASSSHIFT) /* * Option flags passed to lock operations that witness also needs to know * about or that are generic across all locks. */ #define LOP_NEWORDER 0x00000001 /* Define a new lock order. */ #define LOP_QUIET 0x00000002 /* Don't log locking operations. */ #define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */ #define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ /* Flags passed to witness_assert. */ #define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */ #define LA_LOCKED 0x00000001 /* Lock is at least share locked. */ #define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */ #define LA_XLOCKED 0x00000004 /* Lock is exclusively locked. */ #define LA_RECURSED 0x00000008 /* Lock is recursed. */ #define LA_NOTRECURSED 0x00000010 /* Lock is not recursed. */ #ifdef _KERNEL /* * If any of WITNESS, INVARIANTS, or KTR_LOCK KTR tracing has been enabled, * then turn on LOCK_DEBUG. When this option is on, extra debugging * facilities such as tracking the file and line number of lock operations * are enabled. Also, mutex locking operations are not inlined to avoid * bloat from all the extra debugging code. We also have to turn on all the * calling conventions for this debugging code in modules so that modules can * work with both debug and non-debug kernels. */ #if defined(KLD_MODULE) || defined(WITNESS) || defined(INVARIANTS) || defined(INVARIANT_SUPPORT) || defined(LOCK_PROFILING) || (defined(KTR) && (KTR_COMPILE & KTR_LOCK)) #define LOCK_DEBUG 1 #else #define LOCK_DEBUG 0 #endif /* * In the LOCK_DEBUG case, use the filename and line numbers for debugging * operations. Otherwise, use default values to avoid the unneeded bloat. */ #if LOCK_DEBUG > 0 #define LOCK_FILE __FILE__ #define LOCK_LINE __LINE__ #else #define LOCK_FILE NULL #define LOCK_LINE 0 #endif /* * Macros for KTR_LOCK tracing. * * opname - name of this operation (LOCK/UNLOCK/SLOCK, etc.) * lo - struct lock_object * for this lock * flags - flags passed to the lock operation * recurse - this locks recursion level (or 0 if class is not recursable) * result - result of a try lock operation * file - file name * line - line number */ #define LOCK_LOG_TEST(lo, flags) \ (((flags) & LOP_QUIET) == 0 && ((lo)->lo_flags & LO_QUIET) == 0) #define LOCK_LOG_LOCK(opname, lo, flags, recurse, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, opname " (%s) %s %p r = %d at %s:%d", \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(recurse), (file), (line)); \ } while (0) #define LOCK_LOG_TRY(opname, lo, flags, result, file, line) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR6(KTR_LOCK, "TRY_" opname " (%s) %s %p result=%d at %s:%d",\ LOCK_CLASS(lo)->lc_name, (lo)->lo_name, \ (lo), (u_int)(result), (file), (line)); \ } while (0) #define LOCK_LOG_INIT(lo, flags) do { \ if (LOCK_LOG_TEST((lo), (flags))) \ CTR4(KTR_LOCK, "%s: %p (%s) %s", __func__, (lo), \ LOCK_CLASS(lo)->lc_name, (lo)->lo_name); \ } while (0) #define LOCK_LOG_DESTROY(lo, flags) LOCK_LOG_INIT(lo, flags) #define lock_initialized(lo) ((lo)->lo_flags & LO_INITIALIZED) /* * Helpful macros for quickly coming up with assertions with informative * panic messages. */ #define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__) #define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__) #define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line) #define MPASS4(ex, what, file, line) \ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line)) extern struct lock_class lock_class_mtx_sleep; extern struct lock_class lock_class_mtx_spin; extern struct lock_class lock_class_sx; extern struct lock_class lock_class_rw; extern struct lock_class lock_class_rm; extern struct lock_class lock_class_rm_sleepable; extern struct lock_class lock_class_lockmgr; extern struct lock_class *lock_classes[]; struct lock_delay_config { - u_int initial; - u_int step; - u_int min; + u_int base; u_int max; }; struct lock_delay_arg { struct lock_delay_config *config; u_int delay; u_int spin_cnt; }; static inline void -lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc) { +lock_delay_arg_init(struct lock_delay_arg *la, struct lock_delay_config *lc) +{ la->config = lc; - la->delay = 0; + la->delay = lc->base; la->spin_cnt = 0; } #define LOCK_DELAY_SYSINIT(func) \ SYSINIT(func##_ld, SI_SUB_LOCK, SI_ORDER_ANY, func, NULL) +#define LOCK_DELAY_SYSINIT_DEFAULT(lc) \ + SYSINIT(lock_delay_##lc##_ld, SI_SUB_LOCK, SI_ORDER_ANY, \ + lock_delay_default_init, &lc) + void lock_init(struct lock_object *, struct lock_class *, const char *, const char *, int); void lock_destroy(struct lock_object *); void lock_delay(struct lock_delay_arg *); +void lock_delay_default_init(struct lock_delay_config *); void spinlock_enter(void); void spinlock_exit(void); void witness_init(struct lock_object *, const char *); void witness_destroy(struct lock_object *); int witness_defineorder(struct lock_object *, struct lock_object *); void witness_checkorder(struct lock_object *, int, const char *, int, struct lock_object *); void witness_lock(struct lock_object *, int, const char *, int); void witness_upgrade(struct lock_object *, int, const char *, int); void witness_downgrade(struct lock_object *, int, const char *, int); void witness_unlock(struct lock_object *, int, const char *, int); void witness_save(struct lock_object *, const char **, int *); void witness_restore(struct lock_object *, const char *, int); int witness_list_locks(struct lock_list_entry **, int (*)(const char *, ...)); int witness_warn(int, struct lock_object *, const char *, ...); void witness_assert(const struct lock_object *, int, const char *, int); void witness_display_spinlock(struct lock_object *, struct thread *, int (*)(const char *, ...)); int witness_line(struct lock_object *); void witness_norelease(struct lock_object *); void witness_releaseok(struct lock_object *); const char *witness_file(struct lock_object *); void witness_thread_exit(struct thread *); #ifdef WITNESS /* Flags for witness_warn(). */ #define WARN_GIANTOK 0x01 /* Giant is exempt from this check. */ #define WARN_PANIC 0x02 /* Panic if check fails. */ #define WARN_SLEEPOK 0x04 /* Sleepable locks are exempt from check. */ #define WITNESS_INIT(lock, type) \ witness_init((lock), (type)) #define WITNESS_DESTROY(lock) \ witness_destroy(lock) #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) \ witness_checkorder((lock), (flags), (file), (line), (interlock)) #define WITNESS_DEFINEORDER(lock1, lock2) \ witness_defineorder((struct lock_object *)(lock1), \ (struct lock_object *)(lock2)) #define WITNESS_LOCK(lock, flags, file, line) \ witness_lock((lock), (flags), (file), (line)) #define WITNESS_UPGRADE(lock, flags, file, line) \ witness_upgrade((lock), (flags), (file), (line)) #define WITNESS_DOWNGRADE(lock, flags, file, line) \ witness_downgrade((lock), (flags), (file), (line)) #define WITNESS_UNLOCK(lock, flags, file, line) \ witness_unlock((lock), (flags), (file), (line)) #define WITNESS_CHECK(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_WARN(flags, lock, fmt, ...) \ witness_warn((flags), (lock), (fmt), ## __VA_ARGS__) #define WITNESS_SAVE_DECL(n) \ const char * __CONCAT(n, __wf); \ int __CONCAT(n, __wl) #define WITNESS_SAVE(lock, n) \ witness_save((lock), &__CONCAT(n, __wf), &__CONCAT(n, __wl)) #define WITNESS_RESTORE(lock, n) \ witness_restore((lock), __CONCAT(n, __wf), __CONCAT(n, __wl)) #define WITNESS_NORELEASE(lock) \ witness_norelease(&(lock)->lock_object) #define WITNESS_RELEASEOK(lock) \ witness_releaseok(&(lock)->lock_object) #define WITNESS_FILE(lock) \ witness_file(lock) #define WITNESS_LINE(lock) \ witness_line(lock) #else /* WITNESS */ #define WITNESS_INIT(lock, type) (void)0 #define WITNESS_DESTROY(lock) (void)0 #define WITNESS_DEFINEORDER(lock1, lock2) 0 #define WITNESS_CHECKORDER(lock, flags, file, line, interlock) (void)0 #define WITNESS_LOCK(lock, flags, file, line) (void)0 #define WITNESS_UPGRADE(lock, flags, file, line) (void)0 #define WITNESS_DOWNGRADE(lock, flags, file, line) (void)0 #define WITNESS_UNLOCK(lock, flags, file, line) (void)0 #define WITNESS_CHECK(flags, lock, fmt, ...) 0 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 #define WITNESS_SAVE_DECL(n) (void)0 #define WITNESS_SAVE(lock, n) (void)0 #define WITNESS_RESTORE(lock, n) (void)0 #define WITNESS_NORELEASE(lock) (void)0 #define WITNESS_RELEASEOK(lock) (void)0 #define WITNESS_FILE(lock) ("?") #define WITNESS_LINE(lock) (0) #endif /* WITNESS */ #endif /* _KERNEL */ #endif /* _SYS_LOCK_H_ */ Index: projects/netbsd-tests-upstream-01-2017/sys/sys/rwlock.h =================================================================== --- projects/netbsd-tests-upstream-01-2017/sys/sys/rwlock.h (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/sys/sys/rwlock.h (revision 313399) @@ -1,289 +1,286 @@ /*- * Copyright (c) 2006 John Baldwin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_RWLOCK_H_ #define _SYS_RWLOCK_H_ #include #include #include #include #ifdef _KERNEL #include #include #endif /* * The rw_lock field consists of several fields. The low bit indicates * if the lock is locked with a read (shared) or write (exclusive) lock. * A value of 0 indicates a write lock, and a value of 1 indicates a read * lock. Bit 1 is a boolean indicating if there are any threads waiting * for a read lock. Bit 2 is a boolean indicating if there are any threads * waiting for a write lock. The rest of the variable's definition is * dependent on the value of the first bit. For a write lock, it is a * pointer to the thread holding the lock, similar to the mtx_lock field of * mutexes. For read locks, it is a count of read locks that are held. * * When the lock is not locked by any thread, it is encoded as a read lock * with zero waiters. */ #define RW_LOCK_READ 0x01 #define RW_LOCK_READ_WAITERS 0x02 #define RW_LOCK_WRITE_WAITERS 0x04 #define RW_LOCK_WRITE_SPINNER 0x08 +#define RW_LOCK_WRITER_RECURSED 0x10 #define RW_LOCK_FLAGMASK \ (RW_LOCK_READ | RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS | \ - RW_LOCK_WRITE_SPINNER) + RW_LOCK_WRITE_SPINNER | RW_LOCK_WRITER_RECURSED) #define RW_LOCK_WAITERS (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS) #define RW_OWNER(x) ((x) & ~RW_LOCK_FLAGMASK) #define RW_READERS_SHIFT 4 #define RW_READERS(x) (RW_OWNER((x)) >> RW_READERS_SHIFT) #define RW_READERS_LOCK(x) ((x) << RW_READERS_SHIFT | RW_LOCK_READ) #define RW_ONE_READER (1 << RW_READERS_SHIFT) #define RW_UNLOCKED RW_READERS_LOCK(0) #define RW_DESTROYED (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS) #ifdef _KERNEL #define rw_recurse lock_object.lo_data #define RW_READ_VALUE(x) ((x)->rw_lock) /* Very simple operations on rw_lock. */ /* Try to obtain a write lock once. */ #define _rw_write_lock(rw, tid) \ atomic_cmpset_acq_ptr(&(rw)->rw_lock, RW_UNLOCKED, (tid)) #define _rw_write_lock_fetch(rw, vp, tid) \ atomic_fcmpset_acq_ptr(&(rw)->rw_lock, vp, (tid)) /* Release a write lock quickly if there are no waiters. */ #define _rw_write_unlock(rw, tid) \ atomic_cmpset_rel_ptr(&(rw)->rw_lock, (tid), RW_UNLOCKED) /* * Full lock operations that are suitable to be inlined in non-debug * kernels. If the lock cannot be acquired or released trivially then * the work is deferred to another function. */ /* Acquire a write lock. */ #define __rw_wlock(rw, tid, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ uintptr_t _v = RW_UNLOCKED; \ \ if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) || \ !_rw_write_lock_fetch((rw), &_v, _tid))) \ _rw_wlock_hard((rw), _v, _tid, (file), (line)); \ } while (0) /* Release a write lock. */ #define __rw_wunlock(rw, tid, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ \ - if ((rw)->rw_recurse) \ - (rw)->rw_recurse--; \ - else { \ - if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) ||\ - !_rw_write_unlock((rw), _tid))) \ - _rw_wunlock_hard((rw), _tid, (file), (line)); \ - } \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) || \ + !_rw_write_unlock((rw), _tid))) \ + _rw_wunlock_hard((rw), _tid, (file), (line)); \ } while (0) /* * Function prototypes. Routines that start with _ are not part of the * external API and should not be called directly. Wrapper macros should * be used instead. */ void _rw_init_flags(volatile uintptr_t *c, const char *name, int opts); void _rw_destroy(volatile uintptr_t *c); void rw_sysinit(void *arg); void rw_sysinit_flags(void *arg); int _rw_wowned(const volatile uintptr_t *c); void _rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line); int __rw_try_wlock(volatile uintptr_t *c, const char *file, int line); void _rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line); void __rw_rlock(volatile uintptr_t *c, const char *file, int line); int __rw_try_rlock(volatile uintptr_t *c, const char *file, int line); void _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line); void __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, const char *file, int line); void __rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file, int line); int __rw_try_upgrade(volatile uintptr_t *c, const char *file, int line); void __rw_downgrade(volatile uintptr_t *c, const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void __rw_assert(const volatile uintptr_t *c, int what, const char *file, int line); #endif /* * Top-level macros to provide lock cookie once the actual rwlock is passed. * They will also prevent passing a malformed object to the rwlock KPI by * failing compilation as the rw_lock reserved member will not be found. */ #define rw_init(rw, n) \ _rw_init_flags(&(rw)->rw_lock, n, 0) #define rw_init_flags(rw, n, o) \ _rw_init_flags(&(rw)->rw_lock, n, o) #define rw_destroy(rw) \ _rw_destroy(&(rw)->rw_lock) #define rw_wowned(rw) \ _rw_wowned(&(rw)->rw_lock) #define _rw_wlock(rw, f, l) \ _rw_wlock_cookie(&(rw)->rw_lock, f, l) #define _rw_try_wlock(rw, f, l) \ __rw_try_wlock(&(rw)->rw_lock, f, l) #define _rw_wunlock(rw, f, l) \ _rw_wunlock_cookie(&(rw)->rw_lock, f, l) #define _rw_rlock(rw, f, l) \ __rw_rlock(&(rw)->rw_lock, f, l) #define _rw_try_rlock(rw, f, l) \ __rw_try_rlock(&(rw)->rw_lock, f, l) #define _rw_runlock(rw, f, l) \ _rw_runlock_cookie(&(rw)->rw_lock, f, l) #define _rw_wlock_hard(rw, v, t, f, l) \ __rw_wlock_hard(&(rw)->rw_lock, v, t, f, l) #define _rw_wunlock_hard(rw, t, f, l) \ __rw_wunlock_hard(&(rw)->rw_lock, t, f, l) #define _rw_try_upgrade(rw, f, l) \ __rw_try_upgrade(&(rw)->rw_lock, f, l) #define _rw_downgrade(rw, f, l) \ __rw_downgrade(&(rw)->rw_lock, f, l) #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define _rw_assert(rw, w, f, l) \ __rw_assert(&(rw)->rw_lock, w, f, l) #endif /* * Public interface for lock operations. */ #ifndef LOCK_DEBUG #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(RWLOCK_NOINLINE) #define rw_wlock(rw) _rw_wlock((rw), LOCK_FILE, LOCK_LINE) #define rw_wunlock(rw) _rw_wunlock((rw), LOCK_FILE, LOCK_LINE) #else #define rw_wlock(rw) \ __rw_wlock((rw), curthread, LOCK_FILE, LOCK_LINE) #define rw_wunlock(rw) \ __rw_wunlock((rw), curthread, LOCK_FILE, LOCK_LINE) #endif #define rw_rlock(rw) _rw_rlock((rw), LOCK_FILE, LOCK_LINE) #define rw_runlock(rw) _rw_runlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_rlock(rw) _rw_try_rlock((rw), LOCK_FILE, LOCK_LINE) #define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_try_wlock(rw) _rw_try_wlock((rw), LOCK_FILE, LOCK_LINE) #define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE) #define rw_unlock(rw) do { \ if (rw_wowned(rw)) \ rw_wunlock(rw); \ else \ rw_runlock(rw); \ } while (0) #define rw_sleep(chan, rw, pri, wmesg, timo) \ _sleep((chan), &(rw)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define rw_initialized(rw) lock_initialized(&(rw)->lock_object) struct rw_args { void *ra_rw; const char *ra_desc; }; struct rw_args_flags { void *ra_rw; const char *ra_desc; int ra_flags; }; #define RW_SYSINIT(name, rw, desc) \ static struct rw_args name##_args = { \ (rw), \ (desc), \ }; \ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rw_sysinit, &name##_args); \ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock)) #define RW_SYSINIT_FLAGS(name, rw, desc, flags) \ static struct rw_args_flags name##_args = { \ (rw), \ (desc), \ (flags), \ }; \ SYSINIT(name##_rw_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ rw_sysinit_flags, &name##_args); \ SYSUNINIT(name##_rw_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ _rw_destroy, __DEVOLATILE(void *, &(rw)->rw_lock)) /* * Options passed to rw_init_flags(). */ #define RW_DUPOK 0x01 #define RW_NOPROFILE 0x02 #define RW_NOWITNESS 0x04 #define RW_QUIET 0x08 #define RW_RECURSE 0x10 #define RW_NEW 0x20 /* * The INVARIANTS-enabled rw_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure * support as _rw_assert() itself uses them and the latter implies that * _rw_assert() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define RA_LOCKED LA_LOCKED #define RA_RLOCKED LA_SLOCKED #define RA_WLOCKED LA_XLOCKED #define RA_UNLOCKED LA_UNLOCKED #define RA_RECURSED LA_RECURSED #define RA_NOTRECURSED LA_NOTRECURSED #endif #ifdef INVARIANTS #define rw_assert(rw, what) _rw_assert((rw), (what), LOCK_FILE, LOCK_LINE) #else #define rw_assert(rw, what) #endif #endif /* _KERNEL */ #endif /* !_SYS_RWLOCK_H_ */ Index: projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/fibs_test.sh =================================================================== --- projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/fibs_test.sh (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/fibs_test.sh (revision 313399) @@ -1,680 +1,738 @@ # # Copyright (c) 2014 Spectra Logic Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions, and the following disclaimer, # without modification. # 2. Redistributions in binary form must reproduce at minimum a disclaimer # substantially similar to the "NO WARRANTY" disclaimer below # ("Disclaimer") and any redistribution must be conditioned upon # including a substantially similar Disclaimer requirement for further # binary redistribution. # # NO WARRANTY # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGES. # # Authors: Alan Somers (Spectra Logic Corporation) # # $FreeBSD$ # All of the tests in this file requires the test-suite config variable "fibs" # to be defined to a space-delimited list of FIBs that may be used for testing. # arpresolve should check the interface fib for routes to a target when # creating an ARP table entry. This is a regression for kern/167947, where # arpresolve only checked the default route. # # Outline: # Create two tap(4) interfaces # Simulate a crossover cable between them by using net/socat # Use nping (from security/nmap) to send an ICMP echo request from one # interface to the other, spoofing the source IP. The source IP must be # spoofed, or else it will already have an entry in the arp table. # Check whether an arp entry exists for the spoofed IP atf_test_case arpresolve_checks_interface_fib cleanup arpresolve_checks_interface_fib_head() { atf_set "descr" "arpresolve should check the interface fib, not the default fib, for routes" atf_set "require.user" "root" atf_set "require.config" "fibs" atf_set "require.progs" "socat nping" } arpresolve_checks_interface_fib_body() { # Configure the TAP interfaces to use a RFC5737 nonrouteable addresses # and a non-default fib ADDR0="192.0.2.2" ADDR1="192.0.2.3" SUBNET="192.0.2.0" # Due to bug TBD (regressed by multiple_fibs_on_same_subnet) we need # diffferent subnet masks, or FIB1 won't have a subnet route. MASK0="24" MASK1="25" # Spoof a MAC that is reserved per RFC7042 SPOOF_ADDR="192.0.2.4" SPOOF_MAC="00:00:5E:00:53:00" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure TAP interfaces setup_tap "$FIB0" inet ${ADDR0} ${MASK0} TAP0=$TAP setup_tap "$FIB1" inet ${ADDR1} ${MASK1} TAP1=$TAP # Simulate a crossover cable socat /dev/${TAP0} /dev/${TAP1} & SOCAT_PID=$! echo ${SOCAT_PID} >> "processes_to_kill" # Send an ICMP echo request with a spoofed source IP setfib 2 nping -c 1 -e ${TAP0} -S ${SPOOF_ADDR} \ --source-mac ${SPOOF_MAC} --icmp --icmp-type "echo-request" \ --icmp-code 0 --icmp-id 0xdead --icmp-seq 1 --data 0xbeef \ ${ADDR1} # For informational and debugging purposes only, look for the # characteristic error message dmesg | grep "llinfo.*${SPOOF_ADDR}" # Check that the ARP entry exists atf_check -o match:"${SPOOF_ADDR}.*expires" setfib 3 arp ${SPOOF_ADDR} } arpresolve_checks_interface_fib_cleanup() { if [ -f processes_to_kill ]; then for pid in $(cat processes_to_kill); do kill "${pid}" done rm -f processes_to_kill fi cleanup_tap } # Regression test for kern/187549 atf_test_case loopback_and_network_routes_on_nondefault_fib cleanup loopback_and_network_routes_on_nondefault_fib_head() { atf_set "descr" "When creating and deleting loopback IPv4 routes, use the interface's fib" atf_set "require.user" "root" atf_set "require.config" "fibs" } loopback_and_network_routes_on_nondefault_fib_body() { # Configure the TAP interface to use an RFC5737 nonrouteable address # and a non-default fib ADDR="192.0.2.2" SUBNET="192.0.2.0" MASK="24" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 1 # Configure a TAP interface setup_tap ${FIB0} inet ${ADDR} ${MASK} # Check whether the host route exists in only the correct FIB setfib ${FIB0} netstat -rn -f inet | grep -q "^${ADDR}.*UHS.*lo0" if [ 0 -ne $? ]; then setfib ${FIB0} netstat -rn -f inet atf_fail "Host route did not appear in the correct FIB" fi setfib 0 netstat -rn -f inet | grep -q "^${ADDR}.*UHS.*lo0" if [ 0 -eq $? ]; then setfib 0 netstat -rn -f inet atf_fail "Host route appeared in the wrong FIB" fi # Check whether the network route exists in only the correct FIB setfib ${FIB0} netstat -rn -f inet | \ grep -q "^${SUBNET}/${MASK}.*${TAPD}" if [ 0 -ne $? ]; then setfib ${FIB0} netstat -rn -f inet atf_fail "Network route did not appear in the correct FIB" fi setfib 0 netstat -rn -f inet | \ grep -q "^${SUBNET}/${MASK}.*${TAPD}" if [ 0 -eq $? ]; then setfib 0 netstat -rn -f inet atf_fail "Network route appeared in the wrong FIB" fi } loopback_and_network_routes_on_nondefault_fib_cleanup() { cleanup_tap } atf_test_case loopback_and_network_routes_on_nondefault_fib_inet6 cleanup loopback_and_network_routes_on_nondefault_fib_inet6_head() { atf_set "descr" "When creating and deleting loopback IPv6 routes, use the interface's fib" atf_set "require.user" "root" atf_set "require.config" "fibs" } loopback_and_network_routes_on_nondefault_fib_inet6_body() { atf_expect_fail "PR196361 IPv6 network routes don't respect net.add_addr_allfibs=0" # Configure the TAP interface to use a nonrouteable RFC3849 # address and a non-default fib ADDR="2001:db8::2" SUBNET="2001:db8::" MASK="64" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 1 # Configure a TAP interface setup_tap ${FIB0} inet6 ${ADDR} ${MASK} # Check whether the host route exists in only the correct FIB setfib ${FIB0} netstat -rn -f inet6 | grep -q "^${ADDR}.*UHS.*lo0" if [ 0 -ne $? ]; then setfib ${FIB0} netstat -rn -f inet6 atf_fail "Host route did not appear in the correct FIB" fi setfib 0 netstat -rn -f inet6 | grep -q "^${ADDR}.*UHS.*lo0" if [ 0 -eq $? ]; then setfib 0 netstat -rn -f inet6 atf_fail "Host route appeared in the wrong FIB" fi # Check whether the network route exists in only the correct FIB setfib ${FIB0} netstat -rn -f inet6 | \ grep -q "^${SUBNET}/${MASK}.*${TAPD}" if [ 0 -ne $? ]; then setfib ${FIB0} netstat -rn -f inet6 atf_fail "Network route did not appear in the correct FIB" fi setfib 0 netstat -rn -f inet6 | \ grep -q "^${SUBNET}/${MASK}.*${TAPD}" if [ 0 -eq $? ]; then setfib 0 netstat -rn -f inet6 atf_fail "Network route appeared in the wrong FIB" fi } loopback_and_network_routes_on_nondefault_fib_inet6_cleanup() { cleanup_tap } # Regression test for kern/187552 atf_test_case default_route_with_multiple_fibs_on_same_subnet cleanup default_route_with_multiple_fibs_on_same_subnet_head() { atf_set "descr" "Multiple interfaces on the same subnet but with different fibs can both have default IPv4 routes" atf_set "require.user" "root" atf_set "require.config" "fibs" } default_route_with_multiple_fibs_on_same_subnet_body() { # Configure the TAP interfaces to use a RFC5737 nonrouteable addresses # and a non-default fib ADDR0="192.0.2.2" ADDR1="192.0.2.3" GATEWAY="192.0.2.1" SUBNET="192.0.2.0" MASK="24" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure TAP interfaces setup_tap "$FIB0" inet ${ADDR0} ${MASK} TAP0=$TAP setup_tap "$FIB1" inet ${ADDR1} ${MASK} TAP1=$TAP # Attempt to add default routes setfib ${FIB0} route add default ${GATEWAY} setfib ${FIB1} route add default ${GATEWAY} # Verify that the default route exists for both fibs, with their # respective interfaces. atf_check -o match:"^default.*${TAP0}$" \ setfib ${FIB0} netstat -rn -f inet atf_check -o match:"^default.*${TAP1}$" \ setfib ${FIB1} netstat -rn -f inet } default_route_with_multiple_fibs_on_same_subnet_cleanup() { cleanup_tap } atf_test_case default_route_with_multiple_fibs_on_same_subnet_inet6 cleanup default_route_with_multiple_fibs_on_same_subnet_inet6_head() { atf_set "descr" "Multiple interfaces on the same subnet but with different fibs can both have default IPv6 routes" atf_set "require.user" "root" atf_set "require.config" "fibs" } default_route_with_multiple_fibs_on_same_subnet_inet6_body() { # Configure the TAP interfaces to use nonrouteable RFC3849 # addresses and non-default FIBs ADDR0="2001:db8::2" ADDR1="2001:db8::3" GATEWAY="2001:db8::1" SUBNET="2001:db8::" MASK="64" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure TAP interfaces setup_tap "$FIB0" inet6 ${ADDR0} ${MASK} TAP0=$TAP setup_tap "$FIB1" inet6 ${ADDR1} ${MASK} TAP1=$TAP # Attempt to add default routes setfib ${FIB0} route -6 add default ${GATEWAY} setfib ${FIB1} route -6 add default ${GATEWAY} # Verify that the default route exists for both fibs, with their # respective interfaces. atf_check -o match:"^default.*${TAP0}$" \ setfib ${FIB0} netstat -rn -f inet6 atf_check -o match:"^default.*${TAP1}$" \ setfib ${FIB1} netstat -rn -f inet6 } default_route_with_multiple_fibs_on_same_subnet_inet6_cleanup() { cleanup_tap } # Regression test for PR kern/189089 # Create two tap interfaces and assign them both the same IP address but with # different netmasks, and both on the default FIB. Then remove one's IP # address. Hopefully the machine won't panic. atf_test_case same_ip_multiple_ifaces_fib0 cleanup same_ip_multiple_ifaces_fib0_head() { atf_set "descr" "Can remove an IPv4 alias from an interface when the same IPv4 is also assigned to another interface." atf_set "require.user" "root" atf_set "require.config" "fibs" } same_ip_multiple_ifaces_fib0_body() { ADDR="192.0.2.2" MASK0="24" MASK1="32" # Unlike most of the tests in this file, this is applicable regardless # of net.add_addr_allfibs # Setup the interfaces, then remove one alias. It should not panic. setup_tap 0 inet ${ADDR} ${MASK0} TAP0=${TAP} setup_tap 0 inet ${ADDR} ${MASK1} TAP1=${TAP} ifconfig ${TAP1} -alias ${ADDR} # Do it again, in the opposite order. It should not panic. setup_tap 0 inet ${ADDR} ${MASK0} TAP0=${TAP} setup_tap 0 inet ${ADDR} ${MASK1} TAP1=${TAP} ifconfig ${TAP0} -alias ${ADDR} } same_ip_multiple_ifaces_fib0_cleanup() { cleanup_tap } # Regression test for PR kern/189088 # Test that removing an IP address works even if the same IP is assigned to a # different interface, on a different FIB. Tests the same code that whose # panic was regressed by same_ip_multiple_ifaces_fib0. # Create two tap interfaces and assign them both the same IP address but with # different netmasks, and on different FIBs. Then remove one's IP # address. Hopefully the machine won't panic. Also, the IP's hostroute should # dissappear from the correct fib. atf_test_case same_ip_multiple_ifaces cleanup same_ip_multiple_ifaces_head() { atf_set "descr" "Can remove an IPv4 alias from an interface when the same address is also assigned to another interface, on non-default FIBs." atf_set "require.user" "root" atf_set "require.config" "fibs" } same_ip_multiple_ifaces_body() { atf_expect_fail "kern/189088 Assigning the same IP to multiple interfaces in different FIBs creates a host route for only one" ADDR="192.0.2.2" MASK0="24" MASK1="32" # Unlike most of the tests in this file, this is applicable regardless # of net.add_addr_allfibs get_fibs 2 # Setup the interfaces, then remove one alias. It should not panic. setup_tap ${FIB0} inet ${ADDR} ${MASK0} TAP0=${TAP} setup_tap ${FIB1} inet ${ADDR} ${MASK1} TAP1=${TAP} ifconfig ${TAP1} -alias ${ADDR} atf_check -o not-match:"^${ADDR}[[:space:]]" \ setfib ${FIB1} netstat -rn -f inet # Do it again, in the opposite order. It should not panic. setup_tap ${FIB0} inet ${ADDR} ${MASK0} TAP0=${TAP} setup_tap ${FIB1} inet ${ADDR} ${MASK1} TAP1=${TAP} ifconfig ${TAP0} -alias ${ADDR} atf_check -o not-match:"^${ADDR}[[:space:]]" \ setfib ${FIB0} netstat -rn -f inet } same_ip_multiple_ifaces_cleanup() { # Due to PR kern/189088, we must destroy the interfaces in LIFO order # in order for the routes to be correctly cleaned up. for TAPD in `tail -r "tap_devices_to_cleanup"`; do echo ifconfig ${TAPD} destroy ifconfig ${TAPD} destroy done } atf_test_case same_ip_multiple_ifaces_inet6 cleanup same_ip_multiple_ifaces_inet6_head() { atf_set "descr" "Can remove an IPv6 alias from an interface when the same address is also assigned to another interface, on non-default FIBs." atf_set "require.user" "root" atf_set "require.config" "fibs" } same_ip_multiple_ifaces_inet6_body() { ADDR="2001:db8::2" MASK0="64" MASK1="128" # Unlike most of the tests in this file, this is applicable regardless # of net.add_addr_allfibs get_fibs 2 # Setup the interfaces, then remove one alias. It should not panic. setup_tap ${FIB0} inet6 ${ADDR} ${MASK0} TAP0=${TAP} setup_tap ${FIB1} inet6 ${ADDR} ${MASK1} TAP1=${TAP} atf_check -s exit:0 ifconfig ${TAP1} inet6 ${ADDR} -alias atf_check -o not-match:"^${ADDR}[[:space:]]" \ setfib ${FIB1} netstat -rn -f inet6 ifconfig ${TAP1} destroy ifconfig ${TAP0} destroy # Do it again, in the opposite order. It should not panic. setup_tap ${FIB0} inet6 ${ADDR} ${MASK0} TAP0=${TAP} setup_tap ${FIB1} inet6 ${ADDR} ${MASK1} TAP1=${TAP} atf_check -s exit:0 ifconfig ${TAP0} inet6 ${ADDR} -alias atf_check -o not-match:"^${ADDR}[[:space:]]" \ setfib ${FIB0} netstat -rn -f inet6 } same_ip_multiple_ifaces_inet6_cleanup() { cleanup_tap } # Regression test for kern/187550 atf_test_case subnet_route_with_multiple_fibs_on_same_subnet cleanup subnet_route_with_multiple_fibs_on_same_subnet_head() { atf_set "descr" "Multiple FIBs can have IPv4 subnet routes for the same subnet" atf_set "require.user" "root" atf_set "require.config" "fibs" } subnet_route_with_multiple_fibs_on_same_subnet_body() { # Configure the TAP interfaces to use a RFC5737 nonrouteable addresses # and a non-default fib ADDR0="192.0.2.2" ADDR1="192.0.2.3" SUBNET="192.0.2.0" MASK="24" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure TAP interfaces setup_tap "$FIB0" inet ${ADDR0} ${MASK} setup_tap "$FIB1" inet ${ADDR1} ${MASK} # Check that a subnet route exists on both fibs atf_check -o ignore setfib "$FIB0" route get $ADDR1 atf_check -o ignore setfib "$FIB1" route get $ADDR0 } subnet_route_with_multiple_fibs_on_same_subnet_cleanup() { cleanup_tap } atf_test_case subnet_route_with_multiple_fibs_on_same_subnet_inet6 cleanup subnet_route_with_multiple_fibs_on_same_subnet_inet6_head() { atf_set "descr" "Multiple FIBs can have IPv6 subnet routes for the same subnet" atf_set "require.user" "root" atf_set "require.config" "fibs" } subnet_route_with_multiple_fibs_on_same_subnet_inet6_body() { # Configure the TAP interfaces to use a RFC3849 nonrouteable addresses # and a non-default fib ADDR0="2001:db8::2" ADDR1="2001:db8::3" SUBNET="2001:db8::" MASK="64" # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure TAP interfaces setup_tap "$FIB0" inet6 ${ADDR0} ${MASK} setup_tap "$FIB1" inet6 ${ADDR1} ${MASK} # Check that a subnet route exists on both fibs atf_check -o ignore setfib "$FIB0" route -6 get $ADDR1 atf_check -o ignore setfib "$FIB1" route -6 get $ADDR0 } subnet_route_with_multiple_fibs_on_same_subnet_inet6_cleanup() { cleanup_tap } # Test that source address selection works correctly for UDP packets with # SO_DONTROUTE set that are sent on non-default FIBs. # This bug was discovered with "setfib 1 netperf -t UDP_STREAM -H some_host" # Regression test for kern/187553 # # The root cause was that ifa_ifwithnet() did not have a fib argument. It # would return an address from an interface on any FIB that had a subnet route # for the destination. If more than one were available, it would choose the # most specific. This is most easily tested by creating a FIB without a # default route, then trying to send a UDP packet with SO_DONTROUTE set to an # address which is not routable on that FIB. Absent the fix for this bug, # in_pcbladdr would choose an interface on any FIB with a default route. With # the fix, you will get EUNREACH or ENETUNREACH. atf_test_case udp_dontroute cleanup udp_dontroute_head() { atf_set "descr" "Source address selection for UDP packets with SO_DONTROUTE on non-default FIBs works" atf_set "require.user" "root" atf_set "require.config" "fibs" } udp_dontroute_body() { # Configure the TAP interface to use an RFC5737 nonrouteable address # and a non-default fib ADDR0="192.0.2.2" ADDR1="192.0.2.3" SUBNET="192.0.2.0" MASK="24" # Use a different IP on the same subnet as the target TARGET="192.0.2.100" SRCDIR=`atf_get_srcdir` # Check system configuration if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then atf_skip "This test requires net.add_addr_allfibs=0" fi get_fibs 2 # Configure the TAP interfaces setup_tap ${FIB0} inet ${ADDR0} ${MASK} TARGET_TAP=${TAP} setup_tap ${FIB1} inet ${ADDR1} ${MASK} # Send a UDP packet with SO_DONTROUTE. In the failure case, it will # return ENETUNREACH, or send the packet to the wrong tap atf_check -o ignore setfib ${FIB0} \ ${SRCDIR}/udp_dontroute ${TARGET} /dev/${TARGET_TAP} cleanup_tap # Repeat, but this time target the other tap setup_tap ${FIB0} inet ${ADDR0} ${MASK} setup_tap ${FIB1} inet ${ADDR1} ${MASK} TARGET_TAP=${TAP} atf_check -o ignore setfib ${FIB1} \ ${SRCDIR}/udp_dontroute ${TARGET} /dev/${TARGET_TAP} } udp_dontroute_cleanup() { cleanup_tap } +atf_test_case udp_dontroute6 cleanup +udp_dontroute6_head() +{ + atf_set "descr" "Source address selection for UDP IPv6 packets with SO_DONTROUTE on non-default FIBs works" + atf_set "require.user" "root" + atf_set "require.config" "fibs" +} +udp_dontroute6_body() +{ + # Configure the TAP interface to use an RFC3849 nonrouteable address + # and a non-default fib + ADDR0="2001:db8::2" + ADDR1="2001:db8::3" + SUBNET="2001:db8::" + MASK="64" + # Use a different IP on the same subnet as the target + TARGET="2001:db8::100" + SRCDIR=`atf_get_srcdir` + + atf_expect_fail "PR196361 IPv6 network routes don't respect net.add_addr_allfibs=0" + + # Check system configuration + if [ 0 != `sysctl -n net.add_addr_allfibs` ]; then + atf_skip "This test requires net.add_addr_allfibs=0" + fi + get_fibs 2 + + # Configure the TAP interfaces. Use no_dad so the addresses will be + # ready right away and won't be marked as tentative until DAD + # completes. + setup_tap ${FIB0} inet6 ${ADDR0} ${MASK} no_dad + TARGET_TAP=${TAP} + setup_tap ${FIB1} inet6 ${ADDR1} ${MASK} no_dad + + # Send a UDP packet with SO_DONTROUTE. In the failure case, it will + # return ENETUNREACH, or send the packet to the wrong tap + atf_check -o ignore setfib ${FIB0} \ + ${SRCDIR}/udp_dontroute -6 ${TARGET} /dev/${TARGET_TAP} + cleanup_tap + + # Repeat, but this time target the other tap + setup_tap ${FIB0} inet6 ${ADDR0} ${MASK} no_dad + setup_tap ${FIB1} inet6 ${ADDR1} ${MASK} no_dad + TARGET_TAP=${TAP} + + atf_check -o ignore setfib ${FIB1} \ + ${SRCDIR}/udp_dontroute -6 ${TARGET} /dev/${TARGET_TAP} +} + +udp_dontroute6_cleanup() +{ + cleanup_tap +} + + atf_init_test_cases() { atf_add_test_case arpresolve_checks_interface_fib atf_add_test_case loopback_and_network_routes_on_nondefault_fib atf_add_test_case loopback_and_network_routes_on_nondefault_fib_inet6 atf_add_test_case default_route_with_multiple_fibs_on_same_subnet atf_add_test_case default_route_with_multiple_fibs_on_same_subnet_inet6 atf_add_test_case same_ip_multiple_ifaces_fib0 atf_add_test_case same_ip_multiple_ifaces atf_add_test_case same_ip_multiple_ifaces_inet6 atf_add_test_case subnet_route_with_multiple_fibs_on_same_subnet atf_add_test_case subnet_route_with_multiple_fibs_on_same_subnet_inet6 atf_add_test_case udp_dontroute + atf_add_test_case udp_dontroute6 } # Looks up one or more fibs from the configuration data and validates them. # Returns the results in the env varilables FIB0, FIB1, etc. # parameter numfibs The number of fibs to lookup get_fibs() { NUMFIBS=$1 net_fibs=`sysctl -n net.fibs` i=0 while [ $i -lt "$NUMFIBS" ]; do fib=`atf_config_get "fibs" | \ awk -v i=$(( i + 1 )) '{print $i}'` echo "fib is ${fib}" eval FIB${i}=${fib} if [ "$fib" -ge "$net_fibs" ]; then atf_skip "The ${i}th configured fib is ${fib}, which is not less than net.fibs, which is ${net_fibs}" fi i=$(( $i + 1 )) done } # Creates a new tap(4) interface, registers it for cleanup, and returns the # name via the environment variable TAP get_tap() { local TAPN=0 while ! ifconfig tap${TAPN} create > /dev/null 2>&1; do if [ "$TAPN" -ge 8 ]; then atf_skip "Could not create a tap(4) interface" else TAPN=$(($TAPN + 1)) fi done local TAPD=tap${TAPN} # Record the TAP device so we can clean it up later echo ${TAPD} >> "tap_devices_to_cleanup" TAP=${TAPD} } # Create a tap(4) interface, configure it, and register it for cleanup. # parameters: # fib # Protocol (inet or inet6) # IP address # Netmask in number of bits (eg 24 or 8) +# Extra flags # Return: the tap interface name as the env variable TAP setup_tap() { local FIB=$1 local PROTO=$2 local ADDR=$3 local MASK=$4 + local FLAGS=$5 get_tap - echo setfib ${FIB} ifconfig $TAP ${PROTO} ${ADDR}/${MASK} fib $FIB - setfib ${FIB} ifconfig $TAP ${PROTO} ${ADDR}/${MASK} fib $FIB + echo setfib ${FIB} ifconfig $TAP ${PROTO} ${ADDR}/${MASK} fib $FIB $FLAGS + setfib ${FIB} ifconfig $TAP ${PROTO} ${ADDR}/${MASK} fib $FIB $FLAGS } cleanup_tap() { if [ -f tap_devices_to_cleanup ]; then for tap_device in $(cat tap_devices_to_cleanup); do echo ifconfig "${tap_device}" destroy ifconfig "${tap_device}" destroy 2>/dev/null || true done rm -f tap_devices_to_cleanup fi } Index: projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/udp_dontroute.c =================================================================== --- projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/udp_dontroute.c (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/tests/sys/netinet/udp_dontroute.c (revision 313399) @@ -1,111 +1,138 @@ /* * Copyright (c) 2014 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Alan Somers (Spectra Logic Corporation) * * $FreeBSD$ */ #include #include #include #include #include #include #include #include +#include #include #include #include /* * Sends a single UDP packet to the provided address, with SO_DONTROUTE set * I couldn't find a way to do this with builtin utilities like nc(1) */ int main(int argc, char **argv) { - struct sockaddr_in dst; + struct sockaddr_storage dst; int s, t; int opt; int ret; ssize_t len; const char* sendbuf = "Hello, World!"; const size_t buflen = 80; char recvbuf[buflen]; + bool v6 = false; + const char *addr, *tapdev; + const uint16_t port = 46120; - if (argc != 3) { - fprintf(stderr, "Usage: %s ip_address tapdev\n", argv[0]); + bzero(&dst, sizeof(dst)); + if (argc < 3 || argc > 4) { + fprintf(stderr, "Usage: %s [-6] ip_address tapdev\n", argv[0]); exit(2); } - t = open(argv[2], O_RDWR | O_NONBLOCK); + if (strcmp("-6", argv[1]) == 0) { + v6 = true; + addr = argv[2]; + tapdev = argv[3]; + } else { + addr = argv[1]; + tapdev = argv[2]; + } + + t = open(tapdev, O_RDWR | O_NONBLOCK); if (t < 0) err(EXIT_FAILURE, "open"); - s = socket(PF_INET, SOCK_DGRAM, 0); + if (v6) + s = socket(PF_INET6, SOCK_DGRAM, 0); + else + s = socket(PF_INET, SOCK_DGRAM, 0); if (s < 0) err(EXIT_FAILURE, "socket"); opt = 1; ret = setsockopt(s, SOL_SOCKET, SO_DONTROUTE, &opt, sizeof(opt)); if (ret == -1) err(EXIT_FAILURE, "setsockopt(SO_DONTROUTE)"); - dst.sin_len = sizeof(dst); - dst.sin_family = AF_INET; - dst.sin_port = htons(46120); - dst.sin_addr.s_addr = inet_addr(argv[1]); - if (dst.sin_addr.s_addr == htonl(INADDR_NONE)) { - fprintf(stderr, "Invalid address: %s\n", argv[1]); - exit(2); + if (v6) { + struct sockaddr_in6 *dst6 = ((struct sockaddr_in6*)&dst); + + dst.ss_len = sizeof(struct sockaddr_in6); + dst.ss_family = AF_INET6; + dst6->sin6_port = htons(port); + ret = inet_pton(AF_INET6, addr, &dst6->sin6_addr); + } else { + struct sockaddr_in *dst4 = ((struct sockaddr_in*)&dst); + + dst.ss_len = sizeof(struct sockaddr_in); + dst.ss_family = AF_INET; + dst4->sin_port = htons(port); + ret = inet_pton(AF_INET, addr, &dst4->sin_addr); } + if (ret != 1) + err(EXIT_FAILURE, "inet_pton returned %d", ret); + ret = sendto(s, sendbuf, strlen(sendbuf), 0, (struct sockaddr*)&dst, - dst.sin_len); + dst.ss_len); if (ret == -1) err(EXIT_FAILURE, "sendto"); /* Verify that the packet went to the desired tap device */ len = read(t, recvbuf, buflen); if (len == 0) errx(EXIT_FAILURE, "read returned EOF"); else if (len < 0 && errno == EAGAIN) errx(EXIT_FAILURE, "Did not receive any packets"); else if (len < 0) err(EXIT_FAILURE, "read"); /* * If read returned anything at all, consider it a success. The packet * should be an Ethernet frame containing an ARP request for * ip_address. We won't bother to decode it */ return (0); } Index: projects/netbsd-tests-upstream-01-2017/usr.bin/sed/tests/sed2_test.sh =================================================================== --- projects/netbsd-tests-upstream-01-2017/usr.bin/sed/tests/sed2_test.sh (revision 313398) +++ projects/netbsd-tests-upstream-01-2017/usr.bin/sed/tests/sed2_test.sh (revision 313399) @@ -1,61 +1,59 @@ # # Copyright 2017 Dell EMC. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # $FreeBSD$ # atf_test_case inplace_hardlink_src inplace_hardlink_src_head() { atf_set "descr" "Verify -i works with a symlinked source file" } inplace_hardlink_src_body() { echo foo > a atf_check ln a b atf_check sed -i '' -e 's,foo,bar,g' b atf_check -o 'inline:bar\n' -s exit:0 cat b } atf_test_case inplace_symlink_src inplace_symlink_src_head() { atf_set "descr" "Verify -i works with a symlinked source file" } inplace_symlink_src_body() { - atf_expect_fail "Check for S_IFREG reverted in r312404" - echo foo > a atf_check ln -s a b atf_check -e not-empty -s not-exit:0 sed -i '' -e 's,foo,bar,g' b } atf_init_test_cases() { atf_add_test_case inplace_hardlink_src atf_add_test_case inplace_symlink_src } Index: projects/netbsd-tests-upstream-01-2017 =================================================================== --- projects/netbsd-tests-upstream-01-2017 (revision 313398) +++ projects/netbsd-tests-upstream-01-2017 (revision 313399) Property changes on: projects/netbsd-tests-upstream-01-2017 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r313381-313398