Index: user/ngie/release-pkg-fix-tests/bin/sh/parser.c =================================================================== --- user/ngie/release-pkg-fix-tests/bin/sh/parser.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/bin/sh/parser.c (revision 299055) @@ -1,2115 +1,2116 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Kenneth Almquist. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef lint #if 0 static char sccsid[] = "@(#)parser.c 8.7 (Berkeley) 5/16/95"; #endif #endif /* not lint */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "shell.h" #include "parser.h" #include "nodes.h" #include "expand.h" /* defines rmescapes() */ #include "syntax.h" #include "options.h" #include "input.h" #include "output.h" #include "var.h" #include "error.h" #include "memalloc.h" #include "mystring.h" #include "alias.h" #include "show.h" #include "eval.h" #include "exec.h" /* to check for special builtins */ #ifndef NO_HISTORY #include "myhistedit.h" #endif /* * Shell command parser. */ #define PROMPTLEN 128 /* values of checkkwd variable */ #define CHKALIAS 0x1 #define CHKKWD 0x2 #define CHKNL 0x4 /* values returned by readtoken */ #include "token.h" struct heredoc { struct heredoc *next; /* next here document in list */ union node *here; /* redirection node */ char *eofmark; /* string indicating end of input */ int striptabs; /* if set, strip leading tabs */ }; struct parser_temp { struct parser_temp *next; void *data; }; static struct heredoc *heredoclist; /* list of here documents to read */ static int doprompt; /* if set, prompt the user */ static int needprompt; /* true if interactive and at start of line */ static int lasttoken; /* last token read */ static int tokpushback; /* last token pushed back */ static char *wordtext; /* text of last word returned by readtoken */ static int checkkwd; static struct nodelist *backquotelist; static union node *redirnode; static struct heredoc *heredoc; static int quoteflag; /* set if (part of) last token was quoted */ static int startlinno; /* line # where last token started */ static int funclinno; /* line # where the current function started */ static struct parser_temp *parser_temp; #define NOEOFMARK ((const char *)&heredoclist) static union node *list(int); static union node *andor(void); static union node *pipeline(void); static union node *command(void); static union node *simplecmd(union node **, union node *); static union node *makename(void); static union node *makebinary(int type, union node *n1, union node *n2); static void parsefname(void); static void parseheredoc(void); static int peektoken(void); static int readtoken(void); static int xxreadtoken(void); static int readtoken1(int, const char *, const char *, int); static int noexpand(char *); static void consumetoken(int); static void synexpect(int) __dead2; static void synerror(const char *) __dead2; static void setprompt(int); static int pgetc_linecont(void); static void * parser_temp_alloc(size_t len) { struct parser_temp *t; INTOFF; t = ckmalloc(sizeof(*t)); t->data = NULL; t->next = parser_temp; parser_temp = t; t->data = ckmalloc(len); INTON; return t->data; } static void * parser_temp_realloc(void *ptr, size_t len) { struct parser_temp *t; INTOFF; t = parser_temp; if (ptr != t->data) error("bug: parser_temp_realloc misused"); t->data = ckrealloc(t->data, len); INTON; return t->data; } static void parser_temp_free_upto(void *ptr) { struct parser_temp *t; int done = 0; INTOFF; while (parser_temp != NULL && !done) { t = parser_temp; parser_temp = t->next; done = t->data == ptr; ckfree(t->data); ckfree(t); } INTON; if (!done) error("bug: parser_temp_free_upto misused"); } static void parser_temp_free_all(void) { struct parser_temp *t; INTOFF; while (parser_temp != NULL) { t = parser_temp; parser_temp = t->next; ckfree(t->data); ckfree(t); } INTON; } /* * Read and parse a command. Returns NEOF on end of file. (NULL is a * valid parse tree indicating a blank line.) */ union node * parsecmd(int interact) { int t; /* This assumes the parser is not re-entered, * which could happen if we add command substitution on PS1/PS2. */ parser_temp_free_all(); heredoclist = NULL; tokpushback = 0; checkkwd = 0; doprompt = interact; if (doprompt) setprompt(1); else setprompt(0); needprompt = 0; t = readtoken(); if (t == TEOF) return NEOF; if (t == TNL) return NULL; tokpushback++; return list(1); } /* * Read and parse words for wordexp. * Returns a list of NARG nodes; NULL if there are no words. */ union node * parsewordexp(void) { union node *n, *first = NULL, **pnext; int t; /* This assumes the parser is not re-entered, * which could happen if we add command substitution on PS1/PS2. */ parser_temp_free_all(); heredoclist = NULL; tokpushback = 0; checkkwd = 0; doprompt = 0; setprompt(0); needprompt = 0; pnext = &first; while ((t = readtoken()) != TEOF) { if (t != TWORD) synexpect(TWORD); n = makename(); *pnext = n; pnext = &n->narg.next; } return first; } static union node * list(int nlflag) { union node *ntop, *n1, *n2, *n3; int tok; checkkwd = CHKNL | CHKKWD | CHKALIAS; if (!nlflag && tokendlist[peektoken()]) return NULL; ntop = n1 = NULL; for (;;) { n2 = andor(); tok = readtoken(); if (tok == TBACKGND) { if (n2 != NULL && n2->type == NPIPE) { n2->npipe.backgnd = 1; } else if (n2 != NULL && n2->type == NREDIR) { n2->type = NBACKGND; } else { n3 = (union node *)stalloc(sizeof (struct nredir)); n3->type = NBACKGND; n3->nredir.n = n2; n3->nredir.redirect = NULL; n2 = n3; } } if (ntop == NULL) ntop = n2; else if (n1 == NULL) { n1 = makebinary(NSEMI, ntop, n2); ntop = n1; } else { n3 = makebinary(NSEMI, n1->nbinary.ch2, n2); n1->nbinary.ch2 = n3; n1 = n3; } switch (tok) { case TBACKGND: case TSEMI: tok = readtoken(); /* FALLTHROUGH */ case TNL: if (tok == TNL) { parseheredoc(); if (nlflag) return ntop; } else if (tok == TEOF && nlflag) { parseheredoc(); return ntop; } else { tokpushback++; } checkkwd = CHKNL | CHKKWD | CHKALIAS; if (!nlflag && tokendlist[peektoken()]) return ntop; break; case TEOF: if (heredoclist) parseheredoc(); else pungetc(); /* push back EOF on input */ return ntop; default: if (nlflag) synexpect(-1); tokpushback++; return ntop; } } } static union node * andor(void) { union node *n; int t; n = pipeline(); for (;;) { if ((t = readtoken()) == TAND) { t = NAND; } else if (t == TOR) { t = NOR; } else { tokpushback++; return n; } n = makebinary(t, n, pipeline()); } } static union node * pipeline(void) { union node *n1, *n2, *pipenode; struct nodelist *lp, *prev; int negate, t; negate = 0; checkkwd = CHKNL | CHKKWD | CHKALIAS; TRACE(("pipeline: entered\n")); while (readtoken() == TNOT) negate = !negate; tokpushback++; n1 = command(); if (readtoken() == TPIPE) { pipenode = (union node *)stalloc(sizeof (struct npipe)); pipenode->type = NPIPE; pipenode->npipe.backgnd = 0; lp = (struct nodelist *)stalloc(sizeof (struct nodelist)); pipenode->npipe.cmdlist = lp; lp->n = n1; do { prev = lp; lp = (struct nodelist *)stalloc(sizeof (struct nodelist)); checkkwd = CHKNL | CHKKWD | CHKALIAS; t = readtoken(); tokpushback++; if (t == TNOT) lp->n = pipeline(); else lp->n = command(); prev->next = lp; } while (readtoken() == TPIPE); lp->next = NULL; n1 = pipenode; } tokpushback++; if (negate) { n2 = (union node *)stalloc(sizeof (struct nnot)); n2->type = NNOT; n2->nnot.com = n1; return n2; } else return n1; } static union node * command(void) { union node *n1, *n2; union node *ap, **app; union node *cp, **cpp; union node *redir, **rpp; int t; int is_subshell; checkkwd = CHKNL | CHKKWD | CHKALIAS; is_subshell = 0; redir = NULL; n1 = NULL; rpp = &redir; /* Check for redirection which may precede command */ while (readtoken() == TREDIR) { *rpp = n2 = redirnode; rpp = &n2->nfile.next; parsefname(); } tokpushback++; switch (readtoken()) { case TIF: n1 = (union node *)stalloc(sizeof (struct nif)); n1->type = NIF; if ((n1->nif.test = list(0)) == NULL) synexpect(-1); consumetoken(TTHEN); n1->nif.ifpart = list(0); n2 = n1; while (readtoken() == TELIF) { n2->nif.elsepart = (union node *)stalloc(sizeof (struct nif)); n2 = n2->nif.elsepart; n2->type = NIF; if ((n2->nif.test = list(0)) == NULL) synexpect(-1); consumetoken(TTHEN); n2->nif.ifpart = list(0); } if (lasttoken == TELSE) n2->nif.elsepart = list(0); else { n2->nif.elsepart = NULL; tokpushback++; } consumetoken(TFI); checkkwd = CHKKWD | CHKALIAS; break; case TWHILE: case TUNTIL: t = lasttoken; if ((n1 = list(0)) == NULL) synexpect(-1); consumetoken(TDO); n1 = makebinary((t == TWHILE)? NWHILE : NUNTIL, n1, list(0)); consumetoken(TDONE); checkkwd = CHKKWD | CHKALIAS; break; case TFOR: if (readtoken() != TWORD || quoteflag || ! goodname(wordtext)) synerror("Bad for loop variable"); n1 = (union node *)stalloc(sizeof (struct nfor)); n1->type = NFOR; n1->nfor.var = wordtext; while (readtoken() == TNL) ; if (lasttoken == TWORD && ! quoteflag && equal(wordtext, "in")) { app = ≈ while (readtoken() == TWORD) { n2 = makename(); *app = n2; app = &n2->narg.next; } *app = NULL; n1->nfor.args = ap; if (lasttoken != TNL && lasttoken != TSEMI) synexpect(-1); } else { static char argvars[5] = { CTLVAR, VSNORMAL|VSQUOTE, '@', '=', '\0' }; n2 = (union node *)stalloc(sizeof (struct narg)); n2->type = NARG; n2->narg.text = argvars; n2->narg.backquote = NULL; n2->narg.next = NULL; n1->nfor.args = n2; /* * Newline or semicolon here is optional (but note * that the original Bourne shell only allowed NL). */ if (lasttoken != TNL && lasttoken != TSEMI) tokpushback++; } checkkwd = CHKNL | CHKKWD | CHKALIAS; if ((t = readtoken()) == TDO) t = TDONE; else if (t == TBEGIN) t = TEND; else synexpect(-1); n1->nfor.body = list(0); consumetoken(t); checkkwd = CHKKWD | CHKALIAS; break; case TCASE: n1 = (union node *)stalloc(sizeof (struct ncase)); n1->type = NCASE; consumetoken(TWORD); n1->ncase.expr = makename(); while (readtoken() == TNL); if (lasttoken != TWORD || ! equal(wordtext, "in")) synerror("expecting \"in\""); cpp = &n1->ncase.cases; checkkwd = CHKNL | CHKKWD, readtoken(); while (lasttoken != TESAC) { *cpp = cp = (union node *)stalloc(sizeof (struct nclist)); cp->type = NCLIST; app = &cp->nclist.pattern; if (lasttoken == TLP) readtoken(); for (;;) { *app = ap = makename(); checkkwd = CHKNL | CHKKWD; if (readtoken() != TPIPE) break; app = &ap->narg.next; readtoken(); } ap->narg.next = NULL; if (lasttoken != TRP) synexpect(TRP); cp->nclist.body = list(0); checkkwd = CHKNL | CHKKWD | CHKALIAS; if ((t = readtoken()) != TESAC) { if (t == TENDCASE) ; else if (t == TFALLTHRU) cp->type = NCLISTFALLTHRU; else synexpect(TENDCASE); checkkwd = CHKNL | CHKKWD, readtoken(); } cpp = &cp->nclist.next; } *cpp = NULL; checkkwd = CHKKWD | CHKALIAS; break; case TLP: n1 = (union node *)stalloc(sizeof (struct nredir)); n1->type = NSUBSHELL; n1->nredir.n = list(0); n1->nredir.redirect = NULL; consumetoken(TRP); checkkwd = CHKKWD | CHKALIAS; is_subshell = 1; break; case TBEGIN: n1 = list(0); consumetoken(TEND); checkkwd = CHKKWD | CHKALIAS; break; /* A simple command must have at least one redirection or word. */ case TBACKGND: case TSEMI: case TAND: case TOR: case TPIPE: case TENDCASE: case TFALLTHRU: case TEOF: case TNL: case TRP: if (!redir) synexpect(-1); case TWORD: tokpushback++; n1 = simplecmd(rpp, redir); return n1; default: synexpect(-1); } /* Now check for redirection which may follow command */ while (readtoken() == TREDIR) { *rpp = n2 = redirnode; rpp = &n2->nfile.next; parsefname(); } tokpushback++; *rpp = NULL; if (redir) { if (!is_subshell) { n2 = (union node *)stalloc(sizeof (struct nredir)); n2->type = NREDIR; n2->nredir.n = n1; n1 = n2; } n1->nredir.redirect = redir; } return n1; } static union node * simplecmd(union node **rpp, union node *redir) { union node *args, **app; union node **orig_rpp = rpp; union node *n = NULL; int special; int savecheckkwd; /* If we don't have any redirections already, then we must reset */ /* rpp to be the address of the local redir variable. */ if (redir == NULL) rpp = &redir; args = NULL; app = &args; /* * We save the incoming value, because we need this for shell * functions. There can not be a redirect or an argument between * the function name and the open parenthesis. */ orig_rpp = rpp; savecheckkwd = CHKALIAS; for (;;) { checkkwd = savecheckkwd; if (readtoken() == TWORD) { n = makename(); *app = n; app = &n->narg.next; if (savecheckkwd != 0 && !isassignment(wordtext)) savecheckkwd = 0; } else if (lasttoken == TREDIR) { *rpp = n = redirnode; rpp = &n->nfile.next; parsefname(); /* read name of redirection file */ } else if (lasttoken == TLP && app == &args->narg.next && rpp == orig_rpp) { /* We have a function */ consumetoken(TRP); funclinno = plinno; /* * - Require plain text. * - Functions with '/' cannot be called. * - Reject name=(). * - Reject ksh extended glob patterns. */ if (!noexpand(n->narg.text) || quoteflag || strchr(n->narg.text, '/') || strchr("!%*+-=?@}~", n->narg.text[strlen(n->narg.text) - 1])) synerror("Bad function name"); rmescapes(n->narg.text); if (find_builtin(n->narg.text, &special) >= 0 && special) synerror("Cannot override a special builtin with a function"); n->type = NDEFUN; n->narg.next = command(); funclinno = 0; return n; } else { tokpushback++; break; } } *app = NULL; *rpp = NULL; n = (union node *)stalloc(sizeof (struct ncmd)); n->type = NCMD; n->ncmd.args = args; n->ncmd.redirect = redir; return n; } static union node * makename(void) { union node *n; n = (union node *)stalloc(sizeof (struct narg)); n->type = NARG; n->narg.next = NULL; n->narg.text = wordtext; n->narg.backquote = backquotelist; return n; } static union node * makebinary(int type, union node *n1, union node *n2) { union node *n; n = (union node *)stalloc(sizeof (struct nbinary)); n->type = type; n->nbinary.ch1 = n1; n->nbinary.ch2 = n2; return (n); } void forcealias(void) { checkkwd |= CHKALIAS; } void fixredir(union node *n, const char *text, int err) { TRACE(("Fix redir %s %d\n", text, err)); if (!err) n->ndup.vname = NULL; if (is_digit(text[0]) && text[1] == '\0') n->ndup.dupfd = digit_val(text[0]); else if (text[0] == '-' && text[1] == '\0') n->ndup.dupfd = -1; else { if (err) synerror("Bad fd number"); else n->ndup.vname = makename(); } } static void parsefname(void) { union node *n = redirnode; consumetoken(TWORD); if (n->type == NHERE) { struct heredoc *here = heredoc; struct heredoc *p; if (quoteflag == 0) n->type = NXHERE; TRACE(("Here document %d\n", n->type)); if (here->striptabs) { while (*wordtext == '\t') wordtext++; } if (! noexpand(wordtext)) synerror("Illegal eof marker for << redirection"); rmescapes(wordtext); here->eofmark = wordtext; here->next = NULL; if (heredoclist == NULL) heredoclist = here; else { for (p = heredoclist ; p->next ; p = p->next); p->next = here; } } else if (n->type == NTOFD || n->type == NFROMFD) { fixredir(n, wordtext, 0); } else { n->nfile.fname = makename(); } } /* * Input any here documents. */ static void parseheredoc(void) { struct heredoc *here; union node *n; while (heredoclist) { here = heredoclist; heredoclist = here->next; if (needprompt) { setprompt(2); needprompt = 0; } readtoken1(pgetc(), here->here->type == NHERE? SQSYNTAX : DQSYNTAX, here->eofmark, here->striptabs); n = makename(); here->here->nhere.doc = n; } } static int peektoken(void) { int t; t = readtoken(); tokpushback++; return (t); } static int readtoken(void) { int t; struct alias *ap; #ifdef DEBUG int alreadyseen = tokpushback; #endif top: t = xxreadtoken(); /* * eat newlines */ if (checkkwd & CHKNL) { while (t == TNL) { parseheredoc(); t = xxreadtoken(); } } /* * check for keywords and aliases */ if (t == TWORD && !quoteflag) { const char * const *pp; if (checkkwd & CHKKWD) for (pp = parsekwd; *pp; pp++) { if (**pp == *wordtext && equal(*pp, wordtext)) { lasttoken = t = pp - parsekwd + KWDOFFSET; TRACE(("keyword %s recognized\n", tokname[t])); goto out; } } if (checkkwd & CHKALIAS && (ap = lookupalias(wordtext, 1)) != NULL) { pushstring(ap->val, strlen(ap->val), ap); goto top; } } out: if (t != TNOT) checkkwd = 0; #ifdef DEBUG if (!alreadyseen) TRACE(("token %s %s\n", tokname[t], t == TWORD ? wordtext : "")); else TRACE(("reread token %s %s\n", tokname[t], t == TWORD ? wordtext : "")); #endif return (t); } /* * Read the next input token. * If the token is a word, we set backquotelist to the list of cmds in * backquotes. We set quoteflag to true if any part of the word was * quoted. * If the token is TREDIR, then we set redirnode to a structure containing * the redirection. * In all cases, the variable startlinno is set to the number of the line * on which the token starts. * * [Change comment: here documents and internal procedures] * [Readtoken shouldn't have any arguments. Perhaps we should make the * word parsing code into a separate routine. In this case, readtoken * doesn't need to have any internal procedures, but parseword does. * We could also make parseoperator in essence the main routine, and * have parseword (readtoken1?) handle both words and redirection.] */ #define RETURN(token) return lasttoken = token static int xxreadtoken(void) { int c; if (tokpushback) { tokpushback = 0; return lasttoken; } if (needprompt) { setprompt(2); needprompt = 0; } startlinno = plinno; for (;;) { /* until token or start of word found */ c = pgetc_macro(); switch (c) { case ' ': case '\t': continue; case '#': while ((c = pgetc()) != '\n' && c != PEOF); pungetc(); continue; case '\\': if (pgetc() == '\n') { startlinno = ++plinno; if (doprompt) setprompt(2); else setprompt(0); continue; } pungetc(); /* FALLTHROUGH */ default: return readtoken1(c, BASESYNTAX, (char *)NULL, 0); case '\n': plinno++; needprompt = doprompt; RETURN(TNL); case PEOF: RETURN(TEOF); case '&': if (pgetc_linecont() == '&') RETURN(TAND); pungetc(); RETURN(TBACKGND); case '|': if (pgetc_linecont() == '|') RETURN(TOR); pungetc(); RETURN(TPIPE); case ';': c = pgetc_linecont(); if (c == ';') RETURN(TENDCASE); else if (c == '&') RETURN(TFALLTHRU); pungetc(); RETURN(TSEMI); case '(': RETURN(TLP); case ')': RETURN(TRP); } } #undef RETURN } #define MAXNEST_static 8 struct tokenstate { const char *syntax; /* *SYNTAX */ int parenlevel; /* levels of parentheses in arithmetic */ enum tokenstate_category { TSTATE_TOP, TSTATE_VAR_OLD, /* ${var+-=?}, inherits dquotes */ TSTATE_VAR_NEW, /* other ${var...}, own dquote state */ TSTATE_ARITH } category; }; /* * Check to see whether we are at the end of the here document. When this * is called, c is set to the first character of the next input line. If * we are at the end of the here document, this routine sets the c to PEOF. * The new value of c is returned. */ static int checkend(int c, const char *eofmark, int striptabs) { if (striptabs) { while (c == '\t') c = pgetc(); } if (c == *eofmark) { int c2; const char *q; for (q = eofmark + 1; c2 = pgetc(), *q != '\0' && c2 == *q; q++) ; if ((c2 == PEOF || c2 == '\n') && *q == '\0') { c = PEOF; if (c2 == '\n') { plinno++; needprompt = doprompt; } } else { pungetc(); pushstring(eofmark + 1, q - (eofmark + 1), NULL); } } else if (c == '\n' && *eofmark == '\0') { c = PEOF; plinno++; needprompt = doprompt; } return (c); } /* * Parse a redirection operator. The variable "out" points to a string * specifying the fd to be redirected. The variable "c" contains the * first character of the redirection operator. */ static void parseredir(char *out, int c) { char fd = *out; union node *np; np = (union node *)stalloc(sizeof (struct nfile)); if (c == '>') { np->nfile.fd = 1; c = pgetc_linecont(); if (c == '>') np->type = NAPPEND; else if (c == '&') np->type = NTOFD; else if (c == '|') np->type = NCLOBBER; else { np->type = NTO; pungetc(); } } else { /* c == '<' */ np->nfile.fd = 0; c = pgetc_linecont(); if (c == '<') { if (sizeof (struct nfile) != sizeof (struct nhere)) { np = (union node *)stalloc(sizeof (struct nhere)); np->nfile.fd = 0; } np->type = NHERE; heredoc = (struct heredoc *)stalloc(sizeof (struct heredoc)); heredoc->here = np; if ((c = pgetc_linecont()) == '-') { heredoc->striptabs = 1; } else { heredoc->striptabs = 0; pungetc(); } } else if (c == '&') np->type = NFROMFD; else if (c == '>') np->type = NFROMTO; else { np->type = NFROM; pungetc(); } } if (fd != '\0') np->nfile.fd = digit_val(fd); redirnode = np; } /* * Called to parse command substitutions. */ static char * parsebackq(char *out, struct nodelist **pbqlist, int oldstyle, int dblquote, int quoted) { struct nodelist **nlpp; union node *n; char *volatile str; struct jmploc jmploc; struct jmploc *const savehandler = handler; size_t savelen; int saveprompt; const int bq_startlinno = plinno; char *volatile ostr = NULL; struct parsefile *const savetopfile = getcurrentfile(); struct heredoc *const saveheredoclist = heredoclist; struct heredoc *here; str = NULL; if (setjmp(jmploc.loc)) { popfilesupto(savetopfile); if (str) ckfree(str); if (ostr) ckfree(ostr); heredoclist = saveheredoclist; handler = savehandler; if (exception == EXERROR) { startlinno = bq_startlinno; synerror("Error in command substitution"); } longjmp(handler->loc, 1); } INTOFF; savelen = out - stackblock(); if (savelen > 0) { str = ckmalloc(savelen); memcpy(str, stackblock(), savelen); } handler = &jmploc; heredoclist = NULL; INTON; if (oldstyle) { /* We must read until the closing backquote, giving special treatment to some slashes, and then push the string and reread it as input, interpreting it normally. */ char *oout; int c; int olen; STARTSTACKSTR(oout); for (;;) { if (needprompt) { setprompt(2); needprompt = 0; } CHECKSTRSPACE(2, oout); c = pgetc_linecont(); if (c == '`') break; switch (c) { case '\\': c = pgetc(); if (c != '\\' && c != '`' && c != '$' && (!dblquote || c != '"')) USTPUTC('\\', oout); break; case '\n': plinno++; needprompt = doprompt; break; case PEOF: startlinno = plinno; synerror("EOF in backquote substitution"); break; default: break; } USTPUTC(c, oout); } USTPUTC('\0', oout); olen = oout - stackblock(); INTOFF; ostr = ckmalloc(olen); memcpy(ostr, stackblock(), olen); setinputstring(ostr, 1); INTON; } nlpp = pbqlist; while (*nlpp) nlpp = &(*nlpp)->next; *nlpp = (struct nodelist *)stalloc(sizeof (struct nodelist)); (*nlpp)->next = NULL; if (oldstyle) { saveprompt = doprompt; doprompt = 0; } n = list(0); if (oldstyle) { if (peektoken() != TEOF) synexpect(-1); doprompt = saveprompt; } else consumetoken(TRP); (*nlpp)->n = n; if (oldstyle) { /* * Start reading from old file again, ignoring any pushed back * tokens left from the backquote parsing */ popfile(); tokpushback = 0; } STARTSTACKSTR(out); CHECKSTRSPACE(savelen + 1, out); INTOFF; if (str) { memcpy(out, str, savelen); STADJUST(savelen, out); ckfree(str); str = NULL; } if (ostr) { ckfree(ostr); ostr = NULL; } here = saveheredoclist; if (here != NULL) { while (here->next != NULL) here = here->next; here->next = heredoclist; heredoclist = saveheredoclist; } handler = savehandler; INTON; if (quoted) USTPUTC(CTLBACKQ | CTLQUOTE, out); else USTPUTC(CTLBACKQ, out); return out; } /* * Called to parse a backslash escape sequence inside $'...'. * The backslash has already been read. */ static char * readcstyleesc(char *out) { int c, vc, i, n; unsigned int v; c = pgetc(); switch (c) { case '\0': synerror("Unterminated quoted string"); case '\n': plinno++; if (doprompt) setprompt(2); else setprompt(0); return out; case '\\': case '\'': case '"': v = c; break; case 'a': v = '\a'; break; case 'b': v = '\b'; break; case 'e': v = '\033'; break; case 'f': v = '\f'; break; case 'n': v = '\n'; break; case 'r': v = '\r'; break; case 't': v = '\t'; break; case 'v': v = '\v'; break; case 'x': v = 0; for (;;) { c = pgetc(); if (c >= '0' && c <= '9') v = (v << 4) + c - '0'; else if (c >= 'A' && c <= 'F') v = (v << 4) + c - 'A' + 10; else if (c >= 'a' && c <= 'f') v = (v << 4) + c - 'a' + 10; else break; } pungetc(); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': v = c - '0'; c = pgetc(); if (c >= '0' && c <= '7') { v <<= 3; v += c - '0'; c = pgetc(); if (c >= '0' && c <= '7') { v <<= 3; v += c - '0'; } else pungetc(); } else pungetc(); break; case 'c': c = pgetc(); if (c < 0x3f || c > 0x7a || c == 0x60) synerror("Bad escape sequence"); if (c == '\\' && pgetc() != '\\') synerror("Bad escape sequence"); if (c == '?') v = 127; else v = c & 0x1f; break; case 'u': case 'U': n = c == 'U' ? 8 : 4; v = 0; for (i = 0; i < n; i++) { c = pgetc(); if (c >= '0' && c <= '9') v = (v << 4) + c - '0'; else if (c >= 'A' && c <= 'F') v = (v << 4) + c - 'A' + 10; else if (c >= 'a' && c <= 'f') v = (v << 4) + c - 'a' + 10; else synerror("Bad escape sequence"); } if (v == 0 || (v >= 0xd800 && v <= 0xdfff)) synerror("Bad escape sequence"); /* We really need iconv here. */ if (initial_localeisutf8 && v > 127) { CHECKSTRSPACE(4, out); /* * We cannot use wctomb() as the locale may have * changed. */ if (v <= 0x7ff) { USTPUTC(0xc0 | v >> 6, out); USTPUTC(0x80 | (v & 0x3f), out); return out; } else if (v <= 0xffff) { USTPUTC(0xe0 | v >> 12, out); USTPUTC(0x80 | ((v >> 6) & 0x3f), out); USTPUTC(0x80 | (v & 0x3f), out); return out; } else if (v <= 0x10ffff) { USTPUTC(0xf0 | v >> 18, out); USTPUTC(0x80 | ((v >> 12) & 0x3f), out); USTPUTC(0x80 | ((v >> 6) & 0x3f), out); USTPUTC(0x80 | (v & 0x3f), out); return out; } } if (v > 127) v = '?'; break; default: synerror("Bad escape sequence"); } vc = (char)v; /* * We can't handle NUL bytes. * POSIX says we should skip till the closing quote. */ if (vc == '\0') { while ((c = pgetc()) != '\'') { if (c == '\\') c = pgetc(); if (c == PEOF) synerror("Unterminated quoted string"); if (c == '\n') { plinno++; if (doprompt) setprompt(2); else setprompt(0); } } pungetc(); return out; } if (SQSYNTAX[vc] == CCTL) USTPUTC(CTLESC, out); USTPUTC(vc, out); return out; } /* * If eofmark is NULL, read a word or a redirection symbol. If eofmark * is not NULL, read a here document. In the latter case, eofmark is the * word which marks the end of the document and striptabs is true if * leading tabs should be stripped from the document. The argument firstc * is the first character of the input token or document. * * Because C does not have internal subroutines, I have simulated them * using goto's to implement the subroutine linkage. The following macros * will run code that appears at the end of readtoken1. */ #define PARSESUB() {goto parsesub; parsesub_return:;} #define PARSEARITH() {goto parsearith; parsearith_return:;} static int readtoken1(int firstc, char const *initialsyntax, const char *eofmark, int striptabs) { int c = firstc; char *out; int len; struct nodelist *bqlist; int quotef; int newvarnest; int level; int synentry; struct tokenstate state_static[MAXNEST_static]; int maxnest = MAXNEST_static; struct tokenstate *state = state_static; int sqiscstyle = 0; startlinno = plinno; quotef = 0; bqlist = NULL; newvarnest = 0; level = 0; state[level].syntax = initialsyntax; state[level].parenlevel = 0; state[level].category = TSTATE_TOP; STARTSTACKSTR(out); loop: { /* for each line, until end of word */ if (eofmark && eofmark != NOEOFMARK) /* set c to PEOF if at end of here document */ c = checkend(c, eofmark, striptabs); for (;;) { /* until end of line or end of word */ CHECKSTRSPACE(4, out); /* permit 4 calls to USTPUTC */ synentry = state[level].syntax[c]; switch(synentry) { case CNL: /* '\n' */ if (state[level].syntax == BASESYNTAX) goto endword; /* exit outer loop */ USTPUTC(c, out); plinno++; if (doprompt) setprompt(2); else setprompt(0); c = pgetc(); goto loop; /* continue outer loop */ case CSBACK: if (sqiscstyle) { out = readcstyleesc(out); break; } /* FALLTHROUGH */ case CWORD: USTPUTC(c, out); break; case CCTL: if (eofmark == NULL || initialsyntax != SQSYNTAX) USTPUTC(CTLESC, out); USTPUTC(c, out); break; case CBACK: /* backslash */ c = pgetc(); if (c == PEOF) { USTPUTC('\\', out); pungetc(); } else if (c == '\n') { plinno++; if (doprompt) setprompt(2); else setprompt(0); } else { if (state[level].syntax == DQSYNTAX && c != '\\' && c != '`' && c != '$' && (c != '"' || (eofmark != NULL && newvarnest == 0)) && (c != '}' || state[level].category != TSTATE_VAR_OLD)) USTPUTC('\\', out); if ((eofmark == NULL || newvarnest > 0) && state[level].syntax == BASESYNTAX) USTPUTC(CTLQUOTEMARK, out); if (SQSYNTAX[c] == CCTL) USTPUTC(CTLESC, out); USTPUTC(c, out); if ((eofmark == NULL || newvarnest > 0) && state[level].syntax == BASESYNTAX && state[level].category == TSTATE_VAR_OLD) USTPUTC(CTLQUOTEEND, out); quotef++; } break; case CSQUOTE: USTPUTC(CTLQUOTEMARK, out); state[level].syntax = SQSYNTAX; sqiscstyle = 0; break; case CDQUOTE: USTPUTC(CTLQUOTEMARK, out); state[level].syntax = DQSYNTAX; break; case CENDQUOTE: if (eofmark != NULL && newvarnest == 0) USTPUTC(c, out); else { if (state[level].category == TSTATE_VAR_OLD) USTPUTC(CTLQUOTEEND, out); state[level].syntax = BASESYNTAX; quotef++; } break; case CVAR: /* '$' */ PARSESUB(); /* parse substitution */ break; case CENDVAR: /* '}' */ if (level > 0 && ((state[level].category == TSTATE_VAR_OLD && state[level].syntax == state[level - 1].syntax) || (state[level].category == TSTATE_VAR_NEW && state[level].syntax == BASESYNTAX))) { if (state[level].category == TSTATE_VAR_NEW) newvarnest--; level--; USTPUTC(CTLENDVAR, out); } else { USTPUTC(c, out); } break; case CLP: /* '(' in arithmetic */ state[level].parenlevel++; USTPUTC(c, out); break; case CRP: /* ')' in arithmetic */ if (state[level].parenlevel > 0) { USTPUTC(c, out); --state[level].parenlevel; } else { if (pgetc_linecont() == ')') { if (level > 0 && state[level].category == TSTATE_ARITH) { level--; USTPUTC(CTLENDARI, out); } else USTPUTC(')', out); } else { /* * unbalanced parens * (don't 2nd guess - no error) */ pungetc(); USTPUTC(')', out); } } break; case CBQUOTE: /* '`' */ out = parsebackq(out, &bqlist, 1, state[level].syntax == DQSYNTAX && (eofmark == NULL || newvarnest > 0), state[level].syntax == DQSYNTAX || state[level].syntax == ARISYNTAX); break; case CEOF: goto endword; /* exit outer loop */ case CIGN: break; default: if (level == 0) goto endword; /* exit outer loop */ USTPUTC(c, out); } c = pgetc_macro(); } } endword: if (state[level].syntax == ARISYNTAX) synerror("Missing '))'"); if (state[level].syntax != BASESYNTAX && eofmark == NULL) synerror("Unterminated quoted string"); if (state[level].category == TSTATE_VAR_OLD || state[level].category == TSTATE_VAR_NEW) { startlinno = plinno; synerror("Missing '}'"); } if (state != state_static) parser_temp_free_upto(state); USTPUTC('\0', out); len = out - stackblock(); out = stackblock(); if (eofmark == NULL) { if ((c == '>' || c == '<') && quotef == 0 && len <= 2 && (*out == '\0' || is_digit(*out))) { parseredir(out, c); return lasttoken = TREDIR; } else { pungetc(); } } quoteflag = quotef; backquotelist = bqlist; grabstackblock(len); wordtext = out; return lasttoken = TWORD; /* end of readtoken routine */ /* * Parse a substitution. At this point, we have read the dollar sign * and nothing else. */ parsesub: { int subtype; int typeloc; int flags; char *p; static const char types[] = "}-+?="; int linno; int length; int c1; c = pgetc_linecont(); if (c == '(') { /* $(command) or $((arith)) */ if (pgetc_linecont() == '(') { PARSEARITH(); } else { pungetc(); out = parsebackq(out, &bqlist, 0, state[level].syntax == DQSYNTAX && (eofmark == NULL || newvarnest > 0), state[level].syntax == DQSYNTAX || state[level].syntax == ARISYNTAX); } } else if (c == '{' || is_name(c) || is_special(c)) { USTPUTC(CTLVAR, out); typeloc = out - stackblock(); USTPUTC(VSNORMAL, out); subtype = VSNORMAL; flags = 0; if (c == '{') { c = pgetc_linecont(); subtype = 0; } varname: if (!is_eof(c) && is_name(c)) { length = 0; do { STPUTC(c, out); c = pgetc_linecont(); length++; } while (!is_eof(c) && is_in_name(c)); if (length == 6 && strncmp(out - length, "LINENO", length) == 0) { /* Replace the variable name with the * current line number. */ STADJUST(-6, out); CHECKSTRSPACE(11, out); linno = plinno; if (funclinno != 0) linno -= funclinno - 1; length = snprintf(out, 11, "%d", linno); if (length > 10) length = 10; out += length; flags |= VSLINENO; } } else if (is_digit(c)) { if (subtype != VSNORMAL) { do { STPUTC(c, out); c = pgetc_linecont(); } while (is_digit(c)); } else { USTPUTC(c, out); c = pgetc_linecont(); } } else if (is_special(c)) { c1 = c; c = pgetc_linecont(); if (subtype == 0 && c1 == '#') { subtype = VSLENGTH; if (strchr(types, c) == NULL && c != ':' && c != '#' && c != '%') goto varname; c1 = c; c = pgetc_linecont(); if (c1 != '}' && c == '}') { pungetc(); c = c1; goto varname; } pungetc(); c = c1; c1 = '#'; subtype = 0; } USTPUTC(c1, out); } else { subtype = VSERROR; if (c == '}') pungetc(); else if (c == '\n' || c == PEOF) synerror("Unexpected end of line in substitution"); else if (BASESYNTAX[c] != CCTL) USTPUTC(c, out); } if (subtype == 0) { switch (c) { case ':': flags |= VSNUL; c = pgetc_linecont(); /*FALLTHROUGH*/ default: p = strchr(types, c); if (p == NULL) { if (c == '\n' || c == PEOF) synerror("Unexpected end of line in substitution"); if (flags == VSNUL) STPUTC(':', out); if (BASESYNTAX[c] != CCTL) STPUTC(c, out); subtype = VSERROR; } else subtype = p - types + VSNORMAL; break; case '%': case '#': { int cc = c; subtype = c == '#' ? VSTRIMLEFT : VSTRIMRIGHT; c = pgetc_linecont(); if (c == cc) subtype++; else pungetc(); break; } } } else if (subtype != VSERROR) { if (subtype == VSLENGTH && c != '}') subtype = VSERROR; pungetc(); } STPUTC('=', out); if (state[level].syntax == DQSYNTAX || state[level].syntax == ARISYNTAX) flags |= VSQUOTE; *(stackblock() + typeloc) = subtype | flags; if (subtype != VSNORMAL) { if (level + 1 >= maxnest) { maxnest *= 2; if (state == state_static) { state = parser_temp_alloc( maxnest * sizeof(*state)); memcpy(state, state_static, MAXNEST_static * sizeof(*state)); } else state = parser_temp_realloc(state, maxnest * sizeof(*state)); } level++; state[level].parenlevel = 0; if (subtype == VSMINUS || subtype == VSPLUS || subtype == VSQUESTION || subtype == VSASSIGN) { /* * For operators that were in the Bourne shell, * inherit the double-quote state. */ state[level].syntax = state[level - 1].syntax; state[level].category = TSTATE_VAR_OLD; } else { /* * The other operators take a pattern, * so go to BASESYNTAX. * Also, ' and " are now special, even * in here documents. */ state[level].syntax = BASESYNTAX; state[level].category = TSTATE_VAR_NEW; newvarnest++; } } } else if (c == '\'' && state[level].syntax == BASESYNTAX) { /* $'cstylequotes' */ USTPUTC(CTLQUOTEMARK, out); state[level].syntax = SQSYNTAX; sqiscstyle = 1; } else { USTPUTC('$', out); pungetc(); } goto parsesub_return; } /* * Parse an arithmetic expansion (indicate start of one and set state) */ parsearith: { if (level + 1 >= maxnest) { maxnest *= 2; if (state == state_static) { state = parser_temp_alloc( maxnest * sizeof(*state)); memcpy(state, state_static, MAXNEST_static * sizeof(*state)); } else state = parser_temp_realloc(state, maxnest * sizeof(*state)); } level++; state[level].syntax = ARISYNTAX; state[level].parenlevel = 0; state[level].category = TSTATE_ARITH; USTPUTC(CTLARI, out); if (state[level - 1].syntax == DQSYNTAX) USTPUTC('"',out); else USTPUTC(' ',out); goto parsearith_return; } } /* end of readtoken */ /* * Returns true if the text contains nothing to expand (no dollar signs * or backquotes). */ static int noexpand(char *text) { char *p; char c; p = text; while ((c = *p++) != '\0') { if ( c == CTLQUOTEMARK) continue; if (c == CTLESC) p++; else if (BASESYNTAX[(int)c] == CCTL) return 0; } return 1; } /* * Return true if the argument is a legal variable name (a letter or * underscore followed by zero or more letters, underscores, and digits). */ int goodname(const char *name) { const char *p; p = name; if (! is_name(*p)) return 0; while (*++p) { if (! is_in_name(*p)) return 0; } return 1; } int isassignment(const char *p) { if (!is_name(*p)) return 0; p++; for (;;) { if (*p == '=') return 1; else if (!is_in_name(*p)) return 0; p++; } } static void consumetoken(int token) { if (readtoken() != token) synexpect(token); } /* * Called when an unexpected token is read during the parse. The argument * is the token that is expected, or -1 if more than one type of token can * occur at this point. */ static void synexpect(int token) { char msg[64]; if (token >= 0) { fmtstr(msg, 64, "%s unexpected (expecting %s)", tokname[lasttoken], tokname[token]); } else { fmtstr(msg, 64, "%s unexpected", tokname[lasttoken]); } synerror(msg); } static void synerror(const char *msg) { if (commandname) outfmt(out2, "%s: %d: ", commandname, startlinno); else if (arg0) outfmt(out2, "%s: ", arg0); outfmt(out2, "Syntax error: %s\n", msg); error((char *)NULL); } static void setprompt(int which) { whichprompt = which; if (which == 0) return; #ifndef NO_HISTORY if (!el) #endif { out2str(getprompt(NULL)); flushout(out2); } } static int pgetc_linecont(void) { int c; while ((c = pgetc_macro()) == '\\') { c = pgetc(); if (c == '\n') { plinno++; if (doprompt) setprompt(2); else setprompt(0); } else { pungetc(); /* Allow the backslash to be pushed back. */ pushstring("\\", 1, NULL); return (pgetc()); } } return (c); } /* * called by editline -- any expansions to the prompt * should be added here. */ char * getprompt(void *unused __unused) { static char ps[PROMPTLEN]; const char *fmt; const char *pwd; int i, trim; static char internal_error[] = "??"; /* * Select prompt format. */ switch (whichprompt) { case 0: fmt = ""; break; case 1: fmt = ps1val(); break; case 2: fmt = ps2val(); break; default: return internal_error; } /* * Format prompt string. */ for (i = 0; (i < 127) && (*fmt != '\0'); i++, fmt++) if (*fmt == '\\') switch (*++fmt) { /* * Hostname. * * \h specifies just the local hostname, * \H specifies fully-qualified hostname. */ case 'h': case 'H': ps[i] = '\0'; gethostname(&ps[i], PROMPTLEN - i); /* Skip to end of hostname. */ trim = (*fmt == 'h') ? '.' : '\0'; - while ((ps[i+1] != '\0') && (ps[i+1] != trim)) + while ((ps[i] != '\0') && (ps[i] != trim)) i++; + --i; break; /* * Working directory. * * \W specifies just the final component, * \w specifies the entire path. */ case 'W': case 'w': pwd = lookupvar("PWD"); - if (pwd == NULL) + if (pwd == NULL || *pwd == '\0') pwd = "?"; if (*fmt == 'W' && *pwd == '/' && pwd[1] != '\0') strlcpy(&ps[i], strrchr(pwd, '/') + 1, PROMPTLEN - i); else strlcpy(&ps[i], pwd, PROMPTLEN - i); /* Skip to end of path. */ while (ps[i + 1] != '\0') i++; break; /* * Superuser status. * * '$' for normal users, '#' for root. */ case '$': ps[i] = (geteuid() != 0) ? '$' : '#'; break; /* * A literal \. */ case '\\': ps[i] = '\\'; break; /* * Emit unrecognized formats verbatim. */ default: ps[i++] = '\\'; ps[i] = *fmt; break; } else ps[i] = *fmt; ps[i] = '\0'; return (ps); } const char * expandstr(const char *ps) { union node n; struct jmploc jmploc; struct jmploc *const savehandler = handler; const int saveprompt = doprompt; struct parsefile *const savetopfile = getcurrentfile(); struct parser_temp *const saveparser_temp = parser_temp; const char *result = NULL; if (!setjmp(jmploc.loc)) { handler = &jmploc; parser_temp = NULL; setinputstring(ps, 1); doprompt = 0; readtoken1(pgetc(), DQSYNTAX, NOEOFMARK, 0); if (backquotelist != NULL) error("Command substitution not allowed here"); n.narg.type = NARG; n.narg.next = NULL; n.narg.text = wordtext; n.narg.backquote = backquotelist; expandarg(&n, NULL, 0); result = stackblock(); INTOFF; } handler = savehandler; doprompt = saveprompt; popfilesupto(savetopfile); if (parser_temp != saveparser_temp) { parser_temp_free_all(); parser_temp = saveparser_temp; } if (result != NULL) { INTON; } else if (exception == EXINT) raise(SIGINT); return result; } Index: user/ngie/release-pkg-fix-tests/share/mk/local.dirdeps.mk =================================================================== --- user/ngie/release-pkg-fix-tests/share/mk/local.dirdeps.mk (revision 299054) +++ user/ngie/release-pkg-fix-tests/share/mk/local.dirdeps.mk (revision 299055) @@ -1,203 +1,204 @@ # $FreeBSD$ .if !target(_DIRDEP_USE) # we are the 1st makefile .if !defined(MK_CLANG) .include "${SRCTOP}/share/mk/src.opts.mk" .endif # DEP_MACHINE is set before we get here, this may not be. DEP_RELDIR ?= ${RELDIR} # making universe is special .if defined(UNIVERSE_GUARD) # these should be done by now DIRDEPS_FILTER+= N*.host .endif # pseudo machines get no qualification .for m in host common M_dep_qual_fixes += C;($m),[^/.,]*$$;\1; .endfor #.info M_dep_qual_fixes=${M_dep_qual_fixes} # Cheat for including src.libnames.mk ____: # Pull in _INTERNALLIBS .include # Host libraries should mostly be excluded from the build so the # host version in /usr/lib is used. Internal libraries need to be # allowed to be built though since they are never installed. _need_host_libs= .for lib in ${_INTERNALLIBS} _need_host_libs+= ${LIB${lib:tu}DIR:S,^${OBJTOP}/,,} .endfor N_host_libs:= ${cd ${SRCTOP} && echo lib/lib*:L:sh:${_need_host_libs:${M_ListToSkip}}:${M_ListToSkip}} DIRDEPS_FILTER.host = \ ${N_host_libs} \ Ninclude* \ Nlib/csu* \ Nlib/[mn]* \ Ngnu/lib/csu* \ Ngnu/lib/lib[a-r]* \ + Nsecure/lib* \ Nusr.bin/xinstall* \ DIRDEPS_FILTER+= \ Nbin/cat.host \ ${DIRDEPS_FILTER.xtras:U} .endif # reset this each time DIRDEPS_FILTER.xtras= .if ${DEP_MACHINE:Npkgs*} != "" DIRDEPS_FILTER.xtras+= Nusr.bin/clang/clang.host .endif .if ${DEP_MACHINE} != "host" # this is how we can handle optional dependencies .if ${DEP_RELDIR} == "lib/libc" DIRDEPS += lib/libc_nonshared .if ${MK_SSP:Uno} != "no" DIRDEPS += gnu/lib/libssp/libssp_nonshared .endif .else DIRDEPS_FILTER.xtras+= Nlib/libc_nonshared .endif # some optional things .if ${MK_CTF} == "yes" && ${DEP_RELDIR:Mcddl/usr.bin/ctf*} == "" DIRDEPS += \ cddl/usr.bin/ctfconvert.host \ cddl/usr.bin/ctfmerge.host .endif # Bootstrap support. Give hints to DIRDEPS if there is no Makefile.depend* # generated yet. This can be based on things such as SRC files and LIBADD. # These hints will not factor into the final Makefile.depend as only what is # used will be added in and handled via [local.]gendirdeps.mk. This is not # done for MACHINE=host builds. # XXX: Include this in local.autodep.mk as well for gendirdeps without filemon. # Only do this for main build target .if ${RELDIR} == ${DEP_RELDIR} && !defined(_RECURSING_PROGS) .for _depfile in ${.MAKE.DEPENDFILE_PREFERENCE:T} .if !defined(_have_depfile) && exists(${.CURDIR}/${_depfile}) _have_depfile= .endif .endfor .if !defined(_have_depfile) # KMOD does not use any stdlibs. .if !defined(KMOD) # Gather PROGS dependencies first .if !empty(PROGS) _PROGS_LIBADD= _PROGS_DPADD= _PROGS_SRCS= .for _prog in ${PROGS} .for s in . _ .if !empty(LIBADD${s}${_prog}) _PROGS_LIBADD+= ${LIBADD${s}${_prog}} .endif .if !empty(DPADD${s}${_prog}) _PROGS_DPADD+= ${DPADD${s}${_prog}} .endif .if !empty(SRCS${s}${_prog}) _PROGS_SRCS+= ${SRCS${s}${_prog}} .endif .endfor # .for s in . _ # Add in assumed source (bsd.prog.mk) .if !target(${_prog}) .if defined(PROG_CXX) _PROGS_SRCS+= ${_prog}.cc .else _PROGS_SRCS+= ${_prog}.c .endif .endif # !target(${_prog}) .endfor # .for _prog in ${PROGS} .endif # !empty(PROGS) _SRCS= ${SRCS} ${_PROGS_SRCS} # Has C files. The C_DIRDEPS are shared with C++ files as well. C_DIRDEPS= \ gnu/lib/csu \ gnu/lib/libgcc \ include \ include/arpa \ include/protocols \ include/rpc \ include/rpcsvc \ include/xlocale \ lib/${CSU_DIR} \ lib/libc \ lib/libcompiler_rt \ .if ${MK_GSSAPI} != "no" C_DIRDEPS+= include/gssapi .endif .if !empty(_SRCS:M*.c) DIRDEPS+= ${C_DIRDEPS} .endif # Has C++ files .if !empty(_SRCS:M*.cc) || !empty(_SRCS:M*.C) || !empty(_SRCS:M*.cpp) || \ !empty(_SRCS:M*.cxx) DIRDEPS+= ${C_DIRDEPS} .if ${MK_CLANG} == "yes" DIRDEPS+= lib/libc++ lib/libcxxrt .else DIRDEPS+= gnu/lib/libstdc++ gnu/lib/libsupc++ .endif # XXX: Clang and GCC always adds -lm currently, even when not needed. DIRDEPS+= lib/msun .endif # CXX .endif # !defined(KMOD) # Has yacc files. .if !empty(_SRCS:M*.y) DIRDEPS+= usr.bin/yacc.host .endif _DPADD= ${DPADD} ${_PROGS_DPADD} .if !empty(_DPADD) # Taken from meta.autodep.mk (where it only does something with # BUILD_AT_LEVEL0, which we don't use). # This only works for DPADD with full OBJ/SRC paths, which is mostly just # _INTERNALLIBS. _DP_DIRDEPS= \ ${_DPADD:O:u:M${OBJTOP}*:H:N.:tA:C,${OBJTOP}[^/]*/,,:N.:O:u} \ ${_DPADD:O:u:M${OBJROOT}*:N${OBJTOP}*:N${STAGE_ROOT}/*:H:S,${OBJROOT},,:C,^([^/]+)/(.*),\2.\1,:S,${HOST_TARGET}$,host,:N.*:O:u} # Resolve the paths to RELDIRs .if !empty(_DP_DIRDEPS) DIRDEPS+= ${_DP_DIRDEPS:C,^,${SRCTOP}/,:tA:C,^${SRCTOP}/,,} .endif .endif # !empty(DPADD) _ALL_LIBADD= ${LIBADD} ${_PROGS_LIBADD} .if !empty(_ALL_LIBADD) # Also handle LIBADD for non-internal libraries. .for _lib in ${_ALL_LIBADD:O:u} _lib${_lib}reldir= ${LIB${_lib:tu}DIR:C,${OBJTOP}/,,} .if defined(LIB${_lib:tu}DIR) && ${DIRDEPS:M${_lib${_lib}reldir}} == "" && \ exists(${SRCTOP}/${_lib${_lib}reldir}) DIRDEPS+= ${_lib${_lib}reldir} .endif .endfor .endif # !empty(LIBADD) .endif # no Makefile.depend* .endif # ${RELDIR} == ${DEP_RELDIR} .endif # ${DEP_MACHINE} != "host" .if ${MK_STAGING} == "yes" # we need targets/pseudo/stage to prep the stage tree .if ${DEP_RELDIR} != "targets/pseudo/stage" DIRDEPS += targets/pseudo/stage .endif .endif DEP_MACHINE_ARCH = ${MACHINE_ARCH.${DEP_MACHINE}} CSU_DIR.${DEP_MACHINE_ARCH} ?= csu/${DEP_MACHINE_ARCH} CSU_DIR := ${CSU_DIR.${DEP_MACHINE_ARCH}} BOOT_MACHINE_DIR:= ${BOOT_MACHINE_DIR.${DEP_MACHINE}} KERNEL_NAME:= ${KERNEL_NAME.${DEP_MACHINE}} Index: user/ngie/release-pkg-fix-tests/sys/dev/acpi_support/atk0110.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/acpi_support/atk0110.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/acpi_support/atk0110.c (revision 299055) @@ -1,358 +1,358 @@ /* $NetBSD: atk0110.c,v 1.4 2010/02/11 06:54:57 cnst Exp $ */ /* $OpenBSD: atk0110.c,v 1.1 2009/07/23 01:38:16 cnst Exp $ */ /* * Copyright (c) 2009, 2010 Constantine A. Murenin * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* * ASUSTeK AI Booster (ACPI ASOC ATK0110). * * This code was originally written for OpenBSD after the techniques * described in the Linux's asus_atk0110.c and FreeBSD's Takanori Watanabe's * acpi_aiboost.c were verified to be accurate on the actual hardware kindly * provided by Sam Fourman Jr. It was subsequently ported from OpenBSD to * DragonFly BSD, to NetBSD's sysmon_envsys(9) and to FreeBSD's sysctl(9). * * -- Constantine A. Murenin */ #define _COMPONENT ACPI_OEM ACPI_MODULE_NAME("aibs"); ACPI_SERIAL_DECL(aibs, "aibs"); #define AIBS_MORE_SENSORS #define AIBS_VERBOSE enum aibs_type { AIBS_VOLT, AIBS_TEMP, AIBS_FAN }; struct aibs_sensor { ACPI_INTEGER v; ACPI_INTEGER i; ACPI_INTEGER l; ACPI_INTEGER h; enum aibs_type t; }; struct aibs_softc { - struct device *sc_dev; + device_t sc_dev; ACPI_HANDLE sc_ah; struct aibs_sensor *sc_asens_volt; struct aibs_sensor *sc_asens_temp; struct aibs_sensor *sc_asens_fan; }; static int aibs_probe(device_t); static int aibs_attach(device_t); static int aibs_detach(device_t); static int aibs_sysctl(SYSCTL_HANDLER_ARGS); static void aibs_attach_sif(struct aibs_softc *, enum aibs_type); static device_method_t aibs_methods[] = { DEVMETHOD(device_probe, aibs_probe), DEVMETHOD(device_attach, aibs_attach), DEVMETHOD(device_detach, aibs_detach), { NULL, NULL } }; static driver_t aibs_driver = { "aibs", aibs_methods, sizeof(struct aibs_softc) }; static devclass_t aibs_devclass; DRIVER_MODULE(aibs, acpi, aibs_driver, aibs_devclass, NULL, NULL); MODULE_DEPEND(aibs, acpi, 1, 1, 1); static char* aibs_hids[] = { "ATK0110", NULL }; static int aibs_probe(device_t dev) { if (acpi_disabled("aibs") || ACPI_ID_PROBE(device_get_parent(dev), dev, aibs_hids) == NULL) return ENXIO; device_set_desc(dev, "ASUSTeK AI Booster (ACPI ASOC ATK0110)"); return 0; } static int aibs_attach(device_t dev) { struct aibs_softc *sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_ah = acpi_get_handle(dev); aibs_attach_sif(sc, AIBS_VOLT); aibs_attach_sif(sc, AIBS_TEMP); aibs_attach_sif(sc, AIBS_FAN); return 0; } static void aibs_attach_sif(struct aibs_softc *sc, enum aibs_type st) { ACPI_STATUS s; ACPI_BUFFER b; ACPI_OBJECT *bp, *o; int i, n; const char *node; char name[] = "?SIF"; struct aibs_sensor *as; struct sysctl_oid *so; switch (st) { case AIBS_VOLT: node = "volt"; name[0] = 'V'; break; case AIBS_TEMP: node = "temp"; name[0] = 'T'; break; case AIBS_FAN: node = "fan"; name[0] = 'F'; break; default: return; } b.Length = ACPI_ALLOCATE_BUFFER; s = AcpiEvaluateObjectTyped(sc->sc_ah, name, NULL, &b, ACPI_TYPE_PACKAGE); if (ACPI_FAILURE(s)) { device_printf(sc->sc_dev, "%s not found\n", name); return; } bp = b.Pointer; o = bp->Package.Elements; if (o[0].Type != ACPI_TYPE_INTEGER) { device_printf(sc->sc_dev, "%s[0]: invalid type\n", name); AcpiOsFree(b.Pointer); return; } n = o[0].Integer.Value; if (bp->Package.Count - 1 < n) { device_printf(sc->sc_dev, "%s: invalid package\n", name); AcpiOsFree(b.Pointer); return; } else if (bp->Package.Count - 1 > n) { int on = n; #ifdef AIBS_MORE_SENSORS n = bp->Package.Count - 1; #endif device_printf(sc->sc_dev, "%s: malformed package: %i/%i" ", assume %i\n", name, on, bp->Package.Count - 1, n); } if (n < 1) { device_printf(sc->sc_dev, "%s: no members in the package\n", name); AcpiOsFree(b.Pointer); return; } as = malloc(sizeof(*as) * n, M_DEVBUF, M_NOWAIT | M_ZERO); if (as == NULL) { device_printf(sc->sc_dev, "%s: malloc fail\n", name); AcpiOsFree(b.Pointer); return; } switch (st) { case AIBS_VOLT: sc->sc_asens_volt = as; break; case AIBS_TEMP: sc->sc_asens_temp = as; break; case AIBS_FAN: sc->sc_asens_fan = as; break; } /* sysctl subtree for sensors of this type */ so = SYSCTL_ADD_NODE(device_get_sysctl_ctx(sc->sc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)), st, node, CTLFLAG_RD, NULL, NULL); for (i = 0, o++; i < n; i++, o++) { ACPI_OBJECT *oi; char si[3]; const char *desc; /* acpica5 automatically evaluates the referenced package */ if (o[0].Type != ACPI_TYPE_PACKAGE) { device_printf(sc->sc_dev, "%s: %i: not a package: %i type\n", name, i, o[0].Type); continue; } oi = o[0].Package.Elements; if (o[0].Package.Count != 5 || oi[0].Type != ACPI_TYPE_INTEGER || oi[1].Type != ACPI_TYPE_STRING || oi[2].Type != ACPI_TYPE_INTEGER || oi[3].Type != ACPI_TYPE_INTEGER || oi[4].Type != ACPI_TYPE_INTEGER) { device_printf(sc->sc_dev, "%s: %i: invalid package\n", name, i); continue; } as[i].i = oi[0].Integer.Value; desc = oi[1].String.Pointer; as[i].l = oi[2].Integer.Value; as[i].h = oi[3].Integer.Value; as[i].t = st; #ifdef AIBS_VERBOSE device_printf(sc->sc_dev, "%c%i: " "0x%08"PRIx64" %20s %5"PRIi64" / %5"PRIi64" " "0x%"PRIx64"\n", name[0], i, (uint64_t)as[i].i, desc, (int64_t)as[i].l, (int64_t)as[i].h, (uint64_t)oi[4].Integer.Value); #endif snprintf(si, sizeof(si), "%i", i); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->sc_dev), SYSCTL_CHILDREN(so), i, si, CTLTYPE_INT | CTLFLAG_RD, sc, st, aibs_sysctl, st == AIBS_TEMP ? "IK" : "I", desc); } AcpiOsFree(b.Pointer); } static int aibs_detach(device_t dev) { struct aibs_softc *sc = device_get_softc(dev); if (sc->sc_asens_volt != NULL) free(sc->sc_asens_volt, M_DEVBUF); if (sc->sc_asens_temp != NULL) free(sc->sc_asens_temp, M_DEVBUF); if (sc->sc_asens_fan != NULL) free(sc->sc_asens_fan, M_DEVBUF); return 0; } #ifdef AIBS_VERBOSE #define ddevice_printf(x...) device_printf(x) #else #define ddevice_printf(x...) #endif static int aibs_sysctl(SYSCTL_HANDLER_ARGS) { struct aibs_softc *sc = arg1; enum aibs_type st = arg2; int i = oidp->oid_number; ACPI_STATUS rs; ACPI_OBJECT p, *bp; ACPI_OBJECT_LIST mp; ACPI_BUFFER b; char *name; struct aibs_sensor *as; ACPI_INTEGER v, l, h; int so[3]; switch (st) { case AIBS_VOLT: name = "RVLT"; as = sc->sc_asens_volt; break; case AIBS_TEMP: name = "RTMP"; as = sc->sc_asens_temp; break; case AIBS_FAN: name = "RFAN"; as = sc->sc_asens_fan; break; default: return ENOENT; } if (as == NULL) return ENOENT; l = as[i].l; h = as[i].h; p.Type = ACPI_TYPE_INTEGER; p.Integer.Value = as[i].i; mp.Count = 1; mp.Pointer = &p; b.Length = ACPI_ALLOCATE_BUFFER; ACPI_SERIAL_BEGIN(aibs); rs = AcpiEvaluateObjectTyped(sc->sc_ah, name, &mp, &b, ACPI_TYPE_INTEGER); if (ACPI_FAILURE(rs)) { ddevice_printf(sc->sc_dev, "%s: %i: evaluation failed\n", name, i); ACPI_SERIAL_END(aibs); return EIO; } bp = b.Pointer; v = bp->Integer.Value; AcpiOsFree(b.Pointer); ACPI_SERIAL_END(aibs); switch (st) { case AIBS_VOLT: break; case AIBS_TEMP: v += 2732; l += 2732; h += 2732; break; case AIBS_FAN: break; } so[0] = v; so[1] = l; so[2] = h; return sysctl_handle_opaque(oidp, &so, sizeof(so), req); } Index: user/ngie/release-pkg-fix-tests/sys/dev/buslogic/btreg.h =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/buslogic/btreg.h (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/buslogic/btreg.h (revision 299055) @@ -1,702 +1,702 @@ /*- * Generic register and struct definitions for the BusLogic * MultiMaster SCSI host adapters. Product specific probe and * attach routines can be found in: * sys/dev/buslogic/bt_isa.c BT-54X, BT-445 cards * sys/dev/buslogic/bt_mca.c BT-64X, SDC3211B, SDC3211F * sys/dev/buslogic/bt_eisa.c BT-74X, BT-75x cards, SDC3222F * sys/dev/buslogic/bt_pci.c BT-946, BT-948, BT-956, BT-958 cards * * Copyright (c) 1998, 1999 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _BTREG_H_ #define _BTREG_H_ #include #define BT_MAXTRANSFER_SIZE 0xffffffff /* limited by 32bit counter */ #define BT_NSEG 32 /* The number of dma segments supported. * BT_NSEG can be maxed out at 8192 entries, * but the kernel will never need to transfer * such a large request. To reduce the * driver's memory consumption, we reduce the * max to 32. 16 would work if all transfers * are paged alined since the kernel will only * generate at most a 64k transfer, but to * handle non-page aligned transfers, you need * 17, so we round to the next power of two * to make allocating SG space easy and * efficient. */ #define ALL_TARGETS (~0) /* * Control Register pp. 1-8, 1-9 (Write Only) */ #define CONTROL_REG 0x00 #define HARD_RESET 0x80 /* Hard Reset - return to POST state */ #define SOFT_RESET 0x40 /* Soft Reset - Clears Adapter state */ #define RESET_INTR 0x20 /* Reset/Ack Interrupt */ #define RESET_SBUS 0x10 /* Drive SCSI bus reset signal */ /* * Status Register pp. 1-9, 1-10 (Read Only) */ #define STATUS_REG 0x00 #define DIAG_ACTIVE 0x80 /* Performing Internal Diags */ #define DIAG_FAIL 0x40 /* Internal Diags failed */ #define INIT_REQUIRED 0x20 /* MBOXes need initialization */ #define HA_READY 0x10 /* HA ready for new commands */ #define CMD_REG_BUSY 0x08 /* HA busy with last cmd byte */ #define DATAIN_REG_READY 0x04 /* Data-in Byte available */ #define STATUS_REG_RSVD 0x02 #define CMD_INVALID 0x01 /* Invalid Command detected */ /* * Command/Parameter Register pp. 1-10, 1-11 (Write Only) */ #define COMMAND_REG 0x01 /* * Data in Register p. 1-11 (Read Only) */ #define DATAIN_REG 0x01 /* * Interrupt Status Register pp. 1-12 -> 1-14 (Read Only) */ #define INTSTAT_REG 0x02 #define INTR_PENDING 0x80 /* There is a pending INTR */ #define INTSTAT_REG_RSVD 0x70 #define SCSI_BUS_RESET 0x08 /* Bus Reset detected */ #define CMD_COMPLETE 0x04 #define OMB_READY 0x02 /* Outgoin Mailbox Ready */ #define IMB_LOADED 0x01 /* Incoming Mailbox loaded */ /* * Definitions for the "undocumented" geometry register */ typedef enum { GEOM_NODISK, GEOM_64x32, GEOM_128x32, GEOM_255x32 } disk_geom_t; #define GEOMETRY_REG 0x03 #define DISK0_GEOMETRY 0x03 #define DISK1_GEOMETRY 0x0c #define EXTENDED_TRANSLATION 0x80 #define GEOMETRY_DISK0(g_reg) (greg & DISK0_GEOMETRY) #define GEOMETRY_DISK1(g_reg) ((greg & DISK1_GEOMETRY) >> 2) #define BT_NREGS (4) /* * Opcodes for Adapter commands. * pp 1-18 -> 1-20 */ typedef enum { BOP_TEST_CMDC_INTR = 0x00, BOP_INITIALIZE_24BMBOX = 0x01, BOP_START_MBOX = 0x02, BOP_EXECUTE_BIOS_CMD = 0x03, BOP_INQUIRE_BOARD_ID = 0x04, BOP_ENABLE_OMBR_INT = 0x05, BOP_SET_SEL_TIMOUT = 0x06, BOP_SET_TIME_ON_BUS = 0x07, BOP_SET_TIME_OFF_BUS = 0x08, BOP_SET_BUS_TRANS_RATE = 0x09, BOP_INQUIRE_INST_LDEVS = 0x0A, BOP_INQUIRE_CONFIG = 0x0B, BOP_ENABLE_TARGET_MODE = 0x0C, BOP_INQUIRE_SETUP_INFO = 0x0D, BOP_WRITE_LRAM = 0x1A, BOP_READ_LRAM = 0x1B, BOP_WRITE_CHIP_FIFO = 0x1C, BOP_READ_CHIP_FIFO = 0x1C, BOP_ECHO_DATA_BYTE = 0x1F, BOP_ADAPTER_DIAGNOSTICS = 0x20, BOP_SET_ADAPTER_OPTIONS = 0x21, BOP_INQUIRE_INST_HDEVS = 0x23, BOP_INQUIRE_TARG_DEVS = 0x24, BOP_DISABLE_HAC_INTR = 0x25, BOP_INITIALIZE_32BMBOX = 0x81, BOP_EXECUTE_SCSI_CMD = 0x83, BOP_INQUIRE_FW_VER_3DIG = 0x84, BOP_INQUIRE_FW_VER_4DIG = 0x85, BOP_INQUIRE_PCI_INFO = 0x86, BOP_INQUIRE_MODEL = 0x8B, BOP_TARG_SYNC_INFO = 0x8C, BOP_INQUIRE_ESETUP_INFO = 0x8D, BOP_ENABLE_STRICT_RR = 0x8F, BOP_STORE_LRAM = 0x90, BOP_FETCH_LRAM = 0x91, BOP_SAVE_TO_EEPROM = 0x92, BOP_UPLOAD_AUTO_SCSI = 0x94, BOP_MODIFY_IO_ADDR = 0x95, BOP_SET_CCB_FORMAT = 0x96, BOP_FLASH_ROM_DOWNLOAD = 0x97, BOP_FLASH_WRITE_ENABLE = 0x98, BOP_WRITE_INQ_BUFFER = 0x9A, BOP_READ_INQ_BUFFER = 0x9B, BOP_FLASH_UP_DOWNLOAD = 0xA7, BOP_READ_SCAM_DATA = 0xA8, BOP_WRITE_SCAM_DATA = 0xA9 } bt_op_t; /************** Definitions of Multi-byte commands and responses ************/ typedef struct { u_int8_t num_mboxes; u_int8_t base_addr[3]; } init_24b_mbox_params_t; typedef struct { u_int8_t board_type; #define BOARD_TYPE_NON_MCA 0x41 #define BOARD_TYPE_MCA 0x42 u_int8_t cust_features; #define FEATURES_STANDARD 0x41 u_int8_t firmware_rev_major; u_int8_t firmware_rev_minor; } board_id_data_t; typedef struct { u_int8_t enable; } enable_ombr_intr_params_t; typedef struct { u_int8_t enable; u_int8_t reserved; u_int8_t timeout[2]; /* timeout in milliseconds */ } set_selto_parmas_t; typedef struct { u_int8_t time; /* time in milliseconds (2-15) */ } set_timeon_bus_params_t; typedef struct { u_int8_t time; /* time in milliseconds (2-15) */ } set_timeoff_bus_params_t; typedef struct { u_int8_t rate; } set_bus_trasfer_rate_params_t; typedef struct { u_int8_t targets[8]; } installed_ldevs_data_t; typedef struct { u_int8_t dma_chan; #define DMA_CHAN_5 0x20 #define DMA_CHAN_6 0x40 #define DMA_CHAN_7 0x80 u_int8_t irq; #define IRQ_9 0x01 #define IRQ_10 0x02 #define IRQ_11 0x04 #define IRQ_12 0x08 #define IRQ_14 0x20 #define IRQ_15 0x40 u_int8_t scsi_id; } config_data_t; typedef struct { u_int8_t enable; } target_mode_params_t; typedef struct { u_int8_t offset : 4, period : 3, sync : 1; } targ_syncinfo_t; typedef enum { HAB_ISA = 'A', HAB_MCA = 'B', HAB_EISA = 'C', HAB_NUBUS = 'D', HAB_VESA = 'E', HAB_PCI = 'F' } ha_type_t; typedef struct { u_int8_t initiate_sync : 1, parity_enable : 1, : 6; u_int8_t bus_transfer_rate; u_int8_t time_on_bus; u_int8_t time_off_bus; u_int8_t num_mboxes; u_int8_t mbox_base_addr[3]; targ_syncinfo_t low_syncinfo[8]; /* For fast and ultra, use 8C */ u_int8_t low_discinfo; u_int8_t customer_sig; u_int8_t letter_d; u_int8_t ha_type; u_int8_t low_wide_allowed; u_int8_t low_wide_active; targ_syncinfo_t high_syncinfo[8]; u_int8_t high_discinfo; u_int8_t high_wide_allowed; u_int8_t high_wide_active; } setup_data_t; typedef struct { u_int8_t phys_addr[3]; } write_adapter_lram_params_t; typedef struct { u_int8_t phys_addr[3]; } read_adapter_lram_params_t; typedef struct { u_int8_t phys_addr[3]; } write_chip_fifo_params_t; typedef struct { u_int8_t phys_addr[3]; } read_chip_fifo_params_t; typedef struct { u_int8_t length; /* Excludes this member */ u_int8_t low_disc_disable; u_int8_t low_busy_retry_disable; u_int8_t high_disc_disable; u_int8_t high_busy_retry_disable; } set_adapter_options_params_t; typedef struct { u_int8_t targets[8]; } installed_hdevs_data_t; typedef struct { u_int8_t low_devs; u_int8_t high_devs; } target_devs_data_t; typedef struct { u_int8_t enable; } enable_hac_interrupt_params_t; typedef struct { u_int8_t num_boxes; u_int8_t base_addr[4]; } init_32b_mbox_params_t; typedef u_int8_t fw_ver_3dig_data_t; typedef u_int8_t fw_ver_4dig_data_t; typedef struct { u_int8_t offset; u_int8_t response_len; } fetch_lram_params_t; #define AUTO_SCSI_BYTE_OFFSET 64 typedef struct { u_int8_t factory_sig[2]; u_int8_t auto_scsi_data_size; /* 2 -> 64 bytes */ u_int8_t model_num[6]; u_int8_t adapter_ioport; u_int8_t floppy_enabled :1, floppy_secondary :1, level_trigger :1, :2, system_ram_area :3; u_int8_t dma_channel :7, dma_autoconf :1; u_int8_t irq_channel :7, irq_autoconf :1; u_int8_t dma_trans_rate; u_int8_t scsi_id; u_int8_t low_termination :1, scsi_parity :1, high_termination :1, req_ack_filter :1, fast_sync :1, bus_reset :1, :1, active_negation :1; u_int8_t bus_on_delay; u_int8_t bus_off_delay; u_int8_t bios_enabled :1, int19h_redirect :1, extended_trans :1, removable_drives :1, :1, morethan2disks :1, interrupt_mode :1, floptical_support:1; u_int8_t low_device_enabled; u_int8_t high_device_enabled; u_int8_t low_wide_permitted; u_int8_t high_wide_permitted; u_int8_t low_fast_permitted; u_int8_t high_fast_permitted; u_int8_t low_sync_permitted; u_int8_t high_sync_permitted; u_int8_t low_disc_permitted; u_int8_t high_disc_permitted; u_int8_t low_send_start_unit; u_int8_t high_send_start_unit; u_int8_t low_ignore_in_bios_scan; u_int8_t high_ignore_in_bios_scan; u_int8_t pci_int_pin :2, host_ioport :2, round_robin :1, vesa_bus_over_33 :1, vesa_burst_write :1, vesa_burst_read :1; u_int8_t low_ultra_permitted; u_int8_t high_ultra_permitted; u_int8_t reserved[5]; u_int8_t auto_scsi_max_lun; u_int8_t :1, scam_dominant :1, scam_enabled :1, scam_level2 :1, :4; u_int8_t int13_extensions :1, :1, cdrom_boot :1, :2, multi_boot :1, :2; u_int8_t boot_target_id :4, boot_channel :4; u_int8_t force_dev_scan :1, :7; u_int8_t low_tagged_lun_independance; u_int8_t high_tagged_lun_independance; u_int8_t low_renegotiate_after_cc; u_int8_t high_renegotiate_after_cc; u_int8_t reserverd2[10]; u_int8_t manufacturing_diagnotic[2]; u_int8_t checksum[2]; } auto_scsi_data_t; struct bt_isa_port { u_int16_t addr; u_int8_t probed; u_int8_t bio; }; extern struct bt_isa_port bt_isa_ports[]; #define BT_NUM_ISAPORTS 6 typedef enum { BIO_330 = 0, BIO_334 = 1, BIO_230 = 2, BIO_234 = 3, BIO_130 = 4, BIO_134 = 5, BIO_DISABLED = 6, BIO_DISABLED2 = 7 } isa_compat_io_t; typedef struct { u_int8_t io_port; u_int8_t irq_num; u_int8_t low_byte_term :1, high_byte_term :1, :2, jp1_status :1, jp2_status :1, jp3_status :1, :1; u_int8_t reserved; } pci_info_data_t; typedef struct { u_int8_t ascii_model[5]; /* Fifth byte is always 0 */ } ha_model_data_t; typedef struct { u_int8_t sync_rate[16]; /* Sync in 10ns units */ } target_sync_info_data_t; typedef struct { u_int8_t bus_type; u_int8_t bios_addr; u_int16_t max_sg; u_int8_t num_mboxes; u_int8_t mbox_base[4]; u_int8_t :2, sync_neg10MB :1, floppy_disable :1, floppy_secondary_port :1, burst_mode_enabled :1, level_trigger_ints :1, :1; u_int8_t fw_ver_bytes_2_to_4[3]; u_int8_t wide_bus :1, diff_bus :1, scam_capable :1, ultra_scsi :1, auto_term :1, :3; } esetup_info_data_t; typedef struct { u_int32_t len; u_int32_t addr; } bt_sg_t; /********************** Mail Box definitions *******************************/ typedef enum { BMBO_FREE = 0x0, /* MBO intry is free */ BMBO_START = 0x1, /* MBO activate entry */ BMBO_ABORT = 0x2 /* MBO abort entry */ } bt_mbo_action_code_t; typedef struct bt_mbox_out { u_int32_t ccb_addr; u_int8_t reserved[3]; u_int8_t action_code; } bt_mbox_out_t; typedef enum { BMBI_FREE = 0x0, /* MBI entry is free */ BMBI_OK = 0x1, /* completed without error */ BMBI_ABORT = 0x2, /* aborted ccb */ BMBI_NOT_FOUND = 0x3, /* Tried to abort invalid CCB */ BMBI_ERROR = 0x4 /* Completed with error */ } bt_mbi_comp_code_t; typedef struct bt_mbox_in { u_int32_t ccb_addr; u_int8_t btstat; u_int8_t sdstat; u_int8_t reserved; u_int8_t comp_code; } bt_mbox_in_t; /***************** Compiled Probe Information *******************************/ struct bt_probe_info { int drq; int irq; }; /****************** Hardware CCB definition *********************************/ typedef enum { INITIATOR_CCB = 0x00, INITIATOR_SG_CCB = 0x02, INITIATOR_CCB_WRESID = 0x03, INITIATOR_SG_CCB_WRESID = 0x04, INITIATOR_BUS_DEV_RESET = 0x81 } bt_ccb_opcode_t; typedef enum { BTSTAT_NOERROR = 0x00, BTSTAT_LINKED_CMD_COMPLETE = 0x0A, BTSTAT_LINKED_CMD_FLAG_COMPLETE = 0x0B, BTSTAT_DATAUNDERUN_ERROR = 0x0C, BTSTAT_SELTIMEOUT = 0x11, BTSTAT_DATARUN_ERROR = 0x12, BTSTAT_UNEXPECTED_BUSFREE = 0x13, BTSTAT_INVALID_PHASE = 0x14, BTSTAT_INVALID_ACTION_CODE = 0x15, BTSTAT_INVALID_OPCODE = 0x16, BTSTAT_LINKED_CCB_LUN_MISMATCH = 0x17, BTSTAT_INVALID_CCB_OR_SG_PARAM = 0x1A, BTSTAT_AUTOSENSE_FAILED = 0x1B, BTSTAT_TAGGED_MSG_REJECTED = 0x1C, BTSTAT_UNSUPPORTED_MSG_RECEIVED = 0x1D, BTSTAT_HARDWARE_FAILURE = 0x20, BTSTAT_TARGET_IGNORED_ATN = 0x21, BTSTAT_HA_SCSI_BUS_RESET = 0x22, BTSTAT_OTHER_SCSI_BUS_RESET = 0x23, BTSTAT_INVALID_RECONNECT = 0x24, BTSTAT_HA_BDR = 0x25, BTSTAT_ABORT_QUEUE_GENERATED = 0x26, BTSTAT_HA_SOFTWARE_ERROR = 0x27, BTSTAT_HA_WATCHDOG_ERROR = 0x28, BTSTAT_SCSI_PERROR_DETECTED = 0x30 } btstat_t; struct bt_hccb { u_int8_t opcode; u_int8_t :3, datain :1, dataout :1, wide_tag_enable :1, /* Wide Lun CCB format */ wide_tag_type :2; /* Wide Lun CCB format */ u_int8_t cmd_len; u_int8_t sense_len; int32_t data_len; /* residuals can be negative */ u_int32_t data_addr; u_int8_t reserved[2]; u_int8_t btstat; u_int8_t sdstat; u_int8_t target_id; u_int8_t target_lun :5, tag_enable :1, tag_type :2; u_int8_t scsi_cdb[12]; u_int8_t reserved2[6]; u_int32_t sense_addr; }; typedef enum { BCCB_FREE = 0x0, BCCB_ACTIVE = 0x1, BCCB_DEVICE_RESET = 0x2, BCCB_RELEASE_SIMQ = 0x4 } bccb_flags_t; struct bt_ccb { struct bt_hccb hccb; SLIST_ENTRY(bt_ccb) links; u_int32_t flags; union ccb *ccb; bus_dmamap_t dmamap; struct callout timer; bt_sg_t *sg_list; u_int32_t sg_list_phys; }; struct sg_map_node { bus_dmamap_t sg_dmamap; bus_addr_t sg_physaddr; bt_sg_t* sg_vaddr; SLIST_ENTRY(sg_map_node) links; }; struct bt_softc { - struct device *dev; + device_t dev; struct resource *port; struct resource *irq; struct resource *drq; void *ih; struct mtx lock; struct cam_sim *sim; struct cam_path *path; bt_mbox_out_t *cur_outbox; bt_mbox_in_t *cur_inbox; bt_mbox_out_t *last_outbox; bt_mbox_in_t *last_inbox; struct bt_ccb *bt_ccb_array; SLIST_HEAD(,bt_ccb) free_bt_ccbs; LIST_HEAD(,ccb_hdr) pending_ccbs; u_int active_ccbs; u_int32_t bt_ccb_physbase; bt_mbox_in_t *in_boxes; bt_mbox_out_t *out_boxes; struct scsi_sense_data *sense_buffers; u_int32_t sense_buffers_physbase; struct bt_ccb *recovery_bccb; u_int num_boxes; bus_dma_tag_t parent_dmat; /* * All dmat's derive from * the dmat defined by our * bus. */ bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */ bus_dma_tag_t mailbox_dmat; /* dmat for our mailboxes */ bus_dmamap_t mailbox_dmamap; bus_dma_tag_t ccb_dmat; /* dmat for our ccb array */ bus_dmamap_t ccb_dmamap; bus_dma_tag_t sg_dmat; /* dmat for our sg segments */ bus_dma_tag_t sense_dmat; /* dmat for our sense buffers */ bus_dmamap_t sense_dmamap; SLIST_HEAD(, sg_map_node) sg_maps; bus_addr_t mailbox_physbase; bus_addr_t mailbox_addrlimit; u_int num_ccbs; /* Number of CCBs malloc'd */ u_int max_ccbs; /* Maximum allocatable CCBs */ u_int max_sg; u_int scsi_id; u_int32_t extended_trans :1, wide_bus :1, diff_bus :1, ultra_scsi :1, extended_lun :1, strict_rr :1, tag_capable :1, wide_lun_ccb :1, resource_shortage :1, level_trigger_ints:1, :22; u_int16_t tags_permitted; u_int16_t disc_permitted; u_int16_t sync_permitted; u_int16_t fast_permitted; u_int16_t ultra_permitted; u_int16_t wide_permitted; u_int8_t init_level; volatile u_int8_t command_cmp; volatile u_int8_t latched_status; u_int32_t bios_addr; char firmware_ver[6]; char model[5]; }; #define BT_TEMP_UNIT 0xFF /* Unit for probes */ void bt_init_softc(device_t dev, struct resource *port, struct resource *irq, struct resource *drq); void bt_free_softc(device_t dev); int bt_port_probe(device_t dev, struct bt_probe_info *info); int bt_probe(device_t dev); int bt_fetch_adapter_info(device_t dev); int bt_init(device_t dev); int bt_attach(device_t dev); void bt_intr(void *arg); int bt_check_probed_iop(u_int ioport); void bt_mark_probed_bio(isa_compat_io_t port); void bt_mark_probed_iop(u_int ioport); void bt_find_probe_range(int ioport, int *port_index, int *max_port_index); int bt_iop_from_bio(isa_compat_io_t bio_index); #define DEFAULT_CMD_TIMEOUT 100000 /* 10 sec */ int bt_cmd(struct bt_softc *bt, bt_op_t opcode, u_int8_t *params, u_int param_len, u_int8_t *reply_data, u_int reply_len, u_int cmd_timeout); #define bt_name(bt) device_get_nameunit(bt->dev) #define bt_inb(bt, reg) \ bus_read_1((bt)->port, reg) #define bt_outb(bt, reg, value) \ bus_write_1((bt)->port, reg, value) #endif /* _BT_H_ */ Index: user/ngie/release-pkg-fix-tests/sys/dev/bwn/if_bwn.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/bwn/if_bwn.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/bwn/if_bwn.c (revision 299055) @@ -1,6888 +1,6881 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_msi_disable = 0; /* MSI disabled */ TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, int); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static int bwn_dma_mask2type(uint64_t); static uint64_t bwn_dma_mask(struct bwn_mac *); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static int bwn_dma_gettype(struct bwn_mac *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static void bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static void bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_sysctl_node(struct bwn_softc *); static struct resource_spec bwn_res_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec bwn_res_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; static const struct siba_devid bwn_devs[] = { SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"), SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"), SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"), SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"), SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"), SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"), SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"), SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"), SIBA_DEV(BROADCOM, 80211, 16, "Revision 16") }; static int bwn_probe(device_t dev) { int i; for (i = 0; i < nitems(bwn_devs); i++) { if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor && siba_get_device(dev) == bwn_devs[i].sd_device && siba_get_revid(dev) == bwn_devs[i].sd_rev) return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc = device_get_softc(dev); int error, i, msic, reg; sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } if (!TAILQ_EMPTY(&sc->sc_maclist)) { if (siba_get_pci_device(dev) != 0x4313 && siba_get_pci_device(dev) != 0x431a && siba_get_pci_device(dev) != 0x4321) { device_printf(sc->sc_dev, "skip 802.11 cores\n"); return (ENODEV); } } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail0; bwn_led_attach(mac); device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_method.dma.dmatype); else device_printf(sc->sc_dev, "PIO\n"); /* * setup PCI resources and interrupt. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(sc->sc_dev, "MSI count : %d\n", msic); } else msic = 0; mac->mac_intr_spec = bwn_res_spec_legacy; if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { device_printf(sc->sc_dev, "Using %d MSI messages\n", msic); mac->mac_intr_spec = bwn_res_spec_msi; mac->mac_msi = 1; } } error = bus_alloc_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (error) { device_printf(sc->sc_dev, "couldn't allocate IRQ resources (%d)\n", error); goto fail1; } if (mac->mac_msi == 0) error = bus_setup_intr(dev, mac->mac_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[0]); else { for (i = 0; i < BWN_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, mac->mac_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[i]); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); break; } } } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail1: if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) pci_release_msi(dev); fail0: free(mac, M_DEVBUF); return (error); } static int bwn_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ IEEE80211_ADDR_COPY(ic->ic_macaddr, bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ? siba_sprom_get_mac_80211a(sc->sc_dev) : siba_sprom_get_mac_80211bg(sc->sc_dev)); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; int i; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); for (i = 0; i < BWN_MSI_MESSAGES; i++) { if (mac->mac_intrhand[i] != NULL) { bus_teardown_intr(dev, mac->mac_res_irq[i], mac->mac_intrhand[i]); mac->mac_intrhand[i] = NULL; } } bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (mac->mac_msi != 0) pci_release_msi(dev); mbufq_drain(&sc->sc_snd); BWN_LOCK_DESTROY(sc); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { #define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \ ((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \ (siba_get_pci_device(dev) == _device) && \ (siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \ (siba_get_pci_subdevice(dev) == _subdevice)) if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE && siba_get_pci_subdevice(dev) == 0x4e && siba_get_pci_revid(dev) > 0x40) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL); if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL && siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST); if (siba_get_type(dev) == SIBA_TYPE_PCI) { if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) || BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) || BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) || BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010)) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST); } #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (siba_get_revid(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint32_t high; KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("unsupported revision %d", siba_get_revid(sc->sc_dev))); siba_powerup(sc->sc_dev, 0); high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); bwn_reset_core(mac, (high & BWN_TGSHIGH_HAVE_2GHZ) ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_phy_getinfo(mac, high); if (error) goto fail; have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0; have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; if (siba_get_pci_device(sc->sc_dev) != 0x4312 && siba_get_pci_device(sc->sc_dev) != 0x4319 && siba_get_pci_device(sc->sc_dev) != 0x4324) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* XXX turns off PHY A because it's not supported */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { have_a = 0; have_bg = 1; } if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } bwn_reset_core(mac, have_bg ? BWN_TGSLOW_SUPPORT_G : 0); error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); fail: siba_powerdown(sc->sc_dev); return (error); } void bwn_reset_core(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; uint32_t low, ctl; flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET); siba_dev_up(sc->sc_dev, flags); DELAY(2000); low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) & ~BWN_TGSLOW_PHYRESET; siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, low & ~SIBA_TGSLOW_FGC); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(1000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (flags & BWN_TGSLOW_SUPPORT_G) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); } static int bwn_phy_getinfo(struct bwn_mac *mac, int tgshigh) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = (tgshigh & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 4) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ if (siba_get_chipid(sc->sc_dev) == 0x4317) { if (siba_get_chiprev(sc->sc_dev) == 0) tmp = 0x3205017f; else if (siba_get_chiprev(sc->sc_dev) == 1) tmp = 0x4205017f; else tmp = 0x5205017f; } else { BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; } phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((siba_get_revid(sc->sc_dev) >= 3) && (siba_get_revid(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT | IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT | IEEE80211_CHAN_A) static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; if (have_bg) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, IEEE80211_CHAN_G); if (mac->mac_phy.type == BWN_PHYTYPE_N) { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_n, IEEE80211_CHAN_HTA); } else { if (have_a) bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, IEEE80211_CHAN_A); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow) { c->ic_freq = freq; c->ic_flags = flags; c->ic_ieee = ieee; c->ic_minpower = 0; c->ic_maxpower = 2 * txpow; c->ic_maxregpower = txpow; } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, int flags) { struct ieee80211_channel *c; int i; c = &chans[*nchans]; for (i = 0; i < ci->nchannels; i++) { const struct bwn_channel *hc; hc = &ci->channels[i]; if (*nchans >= maxchans) break; bwn_addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow); c++, (*nchans)++; if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) { /* g channel have a separate b-only entry */ if (*nchans >= maxchans) break; c[0] = c[-1]; c[-1].ic_flags = IEEE80211_CHAN_B; c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTG) { /* HT g channel have a separate g-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_G; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } if (flags == IEEE80211_CHAN_HTA) { /* HT a channel have a separate a-only entry */ if (*nchans >= maxchans) break; c[-1].ic_flags = IEEE80211_CHAN_A; c[0] = c[-1]; c[0].ic_flags &= ~IEEE80211_CHAN_HT; c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */ c++, (*nchans)++; } } } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct wmeParams *wmep; int i; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); siba_powerup(sc->sc_dev, 0); if (!siba_dev_isup(sc->sc_dev)) bwn_reset_core(mac, mac->mac_phy.gmode ? BWN_TGSLOW_SUPPORT_G : 0); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); siba_pcicore_intr(sc->sc_dev); siba_fix_imcfglobug(sc->sc_dev); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, siba_get_revid(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) && (siba_get_pcicore_revid(sc->sc_dev) <= 10)) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); siba_powerup(sc->sc_dev, !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; return (error); fail0: siba_powerdown(sc->sc_dev); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (siba_get_revid(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); siba_powerdown(sc->sc_dev); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (siba_get_revid(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (siba_get_revid(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); siba_write_4(sc->sc_dev, SIBA_TGSLOW, siba_read_4(sc->sc_dev, SIBA_TGSLOW) | 0x00100000); BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev)); return (error); } /* read hostflags */ uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (siba_get_revid(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (siba_get_revid(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = siba_get_revid(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; int type; uint16_t base; type = bwn_dma_mask2type(bwn_dma_mask(mac)); base = bwn_dma_base(type, idx); if (type == BWN_DMA_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static uint64_t bwn_dma_mask(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_BIT_MASK(64)); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_BIT_MASK(32)); return (BWN_DMA_BIT_MASK(30)); } static int bwn_dma_mask2type(uint64_t dmamask) { if (dmamask == BWN_DMA_BIT_MASK(30)) return (BWN_DMA_30BIT); if (dmamask == BWN_DMA_BIT_MASK(32)) return (BWN_DMA_32BIT); if (dmamask == BWN_DMA_BIT_MASK(64)) return (BWN_DMA_64BIT); KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (BWN_DMA_30BIT); } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BWN_DMA_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx, int type) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = type; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(type, controller_index); dr->dr_index = controller_index; if (type == BWN_DMA_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET; } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = malloc((dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac), M_DEVBUF, M_NOWAIT | M_ZERO); KASSERT(dr->dr_txhdr_cache != NULL, ("%s:%d: fail", __func__, __LINE__)); /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail1; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail1; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: free(dr->dr_txhdr_cache, M_DEVBUF); fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); free((*dr)->dr_txhdr_cache, M_DEVBUF); free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; uint32_t addr, addrext, ctl; int slot; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK); addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30; addr |= siba_dma_translation(sc->sc_dev); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; int slot; uint32_t ctl0 = 0, ctl1 = 0; uint32_t addrlo, addrhi; uint32_t addrext; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addrlo = (uint32_t) (dmaaddr & 0xffffffff); addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK); addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; addrhi |= (siba_dma_translation(sc->sc_dev) << 1); if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_softc *sc = dr->dr_mac->mac_sc; uint64_t ring64; uint32_t addrext, ring32, value; uint32_t trans = siba_dma_translation(sc->sc_dev); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA64_TXENABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA32_TXENABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (siba_get_revid(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; bwn_phy_exit(mac); siba_gpio_set(sc->sc_dev, 0); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t mask = 0x1f, set = 0xf, value; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f); if (siba_get_chipid(sc->sc_dev) == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (siba_get_revid(sc->sc_dev) >= 2) mask |= 0x0010; value = siba_gpio_get(sc->sc_dev); if (value == -1) return (0); siba_gpio_set(sc->sc_dev, (value & mask) | set); return (0); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (siba_get_revid(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chiprev(sc->sc_dev) == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static int bwn_dma_gettype(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_64BIT); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_32BIT); return (BWN_DMA_30BIT); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = (ofdm ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_N || phy->type == BWN_PHYTYPE_LP || phy->type == BWN_PHYTYPE_LCN) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); /* XXX TODO: n phy pa override? */ switch (phy->type) { case BWN_PHYTYPE_N: case BWN_PHYTYPE_LCN: BWN_WRITE_2(mac, 0x0502, 0x00d0); break; case BWN_PHYTYPE_LP: BWN_WRITE_2(mac, 0x0502, 0x0050); break; default: BWN_WRITE_2(mac, 0x0502, 0x0030); break; } /* flush */ BWN_READ_2(mac, 0x0502); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) device_printf(sc->sc_dev, "warn: firmware state (%d)\n", state); mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (siba_get_revid(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = siba_get_revid(sc->sc_dev); const char *filename; uint32_t high; int error; /* microcode */ if (rev >= 5 && rev <= 10) filename = "ucode5"; else if (rev >= 11 && rev <= 12) filename = "ucode11"; else if (rev == 13) filename = "ucode13"; else if (rev == 14) filename = "ucode14"; else if (rev >= 15) filename = "ucode15"; else { device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); return (EOPNOTSUPP); } /* initvals */ high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (siba_get_revid(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); device_printf(sc->sc_dev, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) bwn_phy_reset(down_dev); up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } static void bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; siba_write_4(sc->sc_dev, SIBA_TGSLOW, ((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) | BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, (siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC) | BWN_TGSLOW_PHYRESET); DELAY(1000); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); + DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, + "%s: stat0=0x%08x, stat1=0x%08x\n", + __func__, + stat0, + stat1); + stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } macstat = le32toh(rxhdr->mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { - struct bwn_dma_ring *dr; - struct bwn_dmadesc_generic *desc; - struct bwn_dmadesc_meta *meta; - struct bwn_pio_txqueue *tq; - struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; - struct ieee80211_node *ni; - struct ieee80211vap *vap; - int retrycnt = 0, slot; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { - if (status->ack) { - dr = bwn_dma_parse_cookie(mac, status, - status->cookie, &slot); - if (dr == NULL) { - device_printf(sc->sc_dev, - "failed to parse cookie\n"); - return; - } - while (1) { - dr->getdesc(dr, slot, &desc, &meta); - if (meta->mt_islast) { - ni = meta->mt_ni; - vap = ni->ni_vap; - ieee80211_ratectl_tx_complete(vap, ni, - status->ack ? - IEEE80211_RATECTL_TX_SUCCESS : - IEEE80211_RATECTL_TX_FAILURE, - &retrycnt, 0); - break; - } - slot = bwn_dma_nextslot(dr, slot); - } - } bwn_dma_handle_txeof(mac, status); } else { - if (status->ack) { - tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); - if (tq == NULL) { - device_printf(sc->sc_dev, - "failed to parse cookie\n"); - return; - } - ni = tp->tp_ni; - vap = ni->ni_vap; - ieee80211_ratectl_tx_complete(vap, ni, - status->ack ? - IEEE80211_RATECTL_TX_SUCCESS : - IEEE80211_RATECTL_TX_FAILURE, - &retrycnt, 0); - } bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO8_RXDATA); else siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO_RXDATA); len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } macstat = le32toh(rxhdr.mac_status); if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3), prq->prq_base + BWN_PIO8_RXDATA); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1), prq->prq_base + BWN_PIO_RXDATA); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); static int rx_mac_dec_rpt = 0; BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); phystat3 = le16toh(rxhdr->phy_status3); macstat = le32toh(rxhdr->mac_status); chanstat = le16toh(rxhdr->channel); phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50) device_printf(sc->sc_dev, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); /* XXX calculating RSSI & noise & antenna */ if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); rssi = rxhdr->phy.abg.rssi; /* XXX incorrect RSSI calculation? */ noise = mac->mac_stats.link_noise; BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; + int retrycnt = 0; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); + /* Just count full frame retries for now */ + retrycnt = status->framecnt - 1; + ieee80211_ratectl_tx_complete(meta->mt_ni->ni_vap, meta->mt_ni, + status->ack ? + IEEE80211_RATECTL_TX_SUCCESS : + IEEE80211_RATECTL_TX_FAILURE, + &retrycnt, 0); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; + int retrycnt = 0; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ + + /* Just count full frame retries for now */ + retrycnt = status->framecnt - 1; + ieee80211_ratectl_tx_complete(tp->tp_ni->ni_vap, tp->tp_ni, + status->ack ? + IEEE80211_RATECTL_TX_SUCCESS : + IEEE80211_RATECTL_TX_FAILURE, + &retrycnt, 0); + if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; int result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; struct ieee80211_frame_cts *cts; struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; unsigned int len; uint32_t macctl = 0; int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; /* * Find TX rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { + /* XXX TODO: don't fall back to CCK rates for OFDM */ rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; + /* Note: this maps the select ieee80211 rate to hardware rate */ rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); + /* XXX rate/rate_fb is the hardware rate */ if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ bwn_plcp_genhdr(BWN_ISOLDFMT(mac) ? (struct bwn_plcp4 *)(&txhdr->body.old.plcp) : (struct bwn_plcp4 *)(&txhdr->body.new.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; + /* XXX preamble? obey net80211 */ if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if (ic->ic_flags & IEEE80211_F_USEPROT) { /* XXX RTS rate is always 1MB??? */ + /* XXX TODO: don't fall back to CCK rates for OFDM */ rts_rate = BWN_CCK_RATE_1MB; rts_rate_fb = bwn_get_fbrate(rts_rate); + /* XXX 'rate' here is hardware rate now, not the net80211 rate */ protdur = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { cts = (struct ieee80211_frame_cts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { rts = (struct ieee80211_frame_rts *)(BWN_ISOLDFMT(mac) ? (txhdr->body.old.rts_frame) : (txhdr->body.new.rts_frame)); + /* XXX rate/rate_fb is the hardware rate */ protdur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; bwn_plcp_genhdr((struct bwn_plcp4 *)((BWN_ISOLDFMT(mac)) ? &txhdr->body.old.rts_plcp : &txhdr->body.new.rts_plcp), len, rts_rate); bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); protwh = (struct ieee80211_frame *)(BWN_ISOLDFMT(mac) ? (&txhdr->body.old.rts_frame) : (&txhdr->body.new.rts_frame)); txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; } if (BWN_ISOLDFMT(mac)) txhdr->body.old.cookie = htole16(cookie); else txhdr->body.new.cookie = htole16(cookie); txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = siba_sprom_get_ant_bg(sc->sc_dev); else mask = siba_sprom_get_ant_a(sc->sc_dev); if (!(mask & (1 << (n - 1)))) return (0); return (n); } +/* + * Return a fallback rate for the given rate. + * + * Note: Don't fall back from OFDM to CCK. + */ static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { + /* CCK */ case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); + + /* OFDM */ case BWN_OFDM_RATE_6MB: - return (BWN_CCK_RATE_5MB); + return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); siba_write_multi_4(sc->sc_dev, data, (len & ~3), tq->tq_base + BWN_PIO8_TXDATA); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); siba_write_multi_2(sc->sc_dev, data, (len & ~1), tq->tq_base + BWN_PIO_TXDATA); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_LOCK(sc); if (mac && mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; tsf += le16toh(rxhdr->mac_time); if (low_mactime_now < le16toh(rxhdr->mac_time)) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; bus_addr_t lowaddr = 0; int error; if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) return (0); KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__)); mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->dmatype = bwn_dma_gettype(mac); if (dma->dmatype == BWN_DMA_30BIT) lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT; else if (dma->dmatype == BWN_DMA_32BIT) lowaddr = BUS_SPACE_MAXADDR_32BIT; else lowaddr = BUS_SPACE_MAXADDR; /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static void bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; uint16_t val[BWN_LED_MAX]; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (siba_get_pci_subvendor(sc->sc_dev) == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; val[0] = siba_sprom_get_gpio0(sc->sc_dev); val[1] = siba_sprom_get_gpio1(sc->sc_dev); val[2] = siba_sprom_get_gpio2(sc->sc_dev); val[3] = siba_sprom_get_gpio3(sc->sc_dev); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->led_act = led_act[i]; } else { if (val[i] & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val[i] & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); Index: user/ngie/release-pkg-fix-tests/sys/dev/esp/esp_pci.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/esp/esp_pci.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/esp/esp_pci.c (revision 299055) @@ -1,654 +1,654 @@ /*- * Copyright (c) 2011 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $NetBSD: pcscp.c,v 1.45 2010/11/13 13:52:08 uebayasi Exp $ */ /*- * Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center; Izumi Tsutsui. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * esp_pci.c: device dependent code for AMD Am53c974 (PCscsi-PCI) * written by Izumi Tsutsui * * Technical manual available at * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/19113.pdf */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PCI_DEVICE_ID_AMD53C974 0x20201022 struct esp_pci_softc { struct ncr53c9x_softc sc_ncr53c9x; /* glue to MI code */ - struct device *sc_dev; + device_t sc_dev; struct resource *sc_res[2]; #define ESP_PCI_RES_INTR 0 #define ESP_PCI_RES_IO 1 bus_dma_tag_t sc_pdmat; bus_dma_tag_t sc_xferdmat; /* DMA tag for transfers */ bus_dmamap_t sc_xferdmam; /* DMA map for transfers */ void *sc_ih; /* interrupt handler */ size_t sc_dmasize; /* DMA size */ void **sc_dmaaddr; /* DMA address */ size_t *sc_dmalen; /* DMA length */ int sc_active; /* DMA state */ int sc_datain; /* DMA Data Direction */ }; static struct resource_spec esp_pci_res_spec[] = { { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* ESP_PCI_RES_INTR */ { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, /* ESP_PCI_RES_IO */ { -1, 0 } }; #define READ_DMAREG(sc, reg) \ bus_read_4((sc)->sc_res[ESP_PCI_RES_IO], (reg)) #define WRITE_DMAREG(sc, reg, var) \ bus_write_4((sc)->sc_res[ESP_PCI_RES_IO], (reg), (var)) #define READ_ESPREG(sc, reg) \ bus_read_1((sc)->sc_res[ESP_PCI_RES_IO], (reg) << 2) #define WRITE_ESPREG(sc, reg, val) \ bus_write_1((sc)->sc_res[ESP_PCI_RES_IO], (reg) << 2, (val)) static int esp_pci_probe(device_t); static int esp_pci_attach(device_t); static int esp_pci_detach(device_t); static int esp_pci_suspend(device_t); static int esp_pci_resume(device_t); static device_method_t esp_pci_methods[] = { DEVMETHOD(device_probe, esp_pci_probe), DEVMETHOD(device_attach, esp_pci_attach), DEVMETHOD(device_detach, esp_pci_detach), DEVMETHOD(device_suspend, esp_pci_suspend), DEVMETHOD(device_resume, esp_pci_resume), DEVMETHOD_END }; static driver_t esp_pci_driver = { "esp", esp_pci_methods, sizeof(struct esp_pci_softc) }; DRIVER_MODULE(esp, pci, esp_pci_driver, esp_devclass, 0, 0); MODULE_DEPEND(esp, pci, 1, 1, 1); /* * Functions and the switch for the MI code */ static void esp_pci_dma_go(struct ncr53c9x_softc *); static int esp_pci_dma_intr(struct ncr53c9x_softc *); static int esp_pci_dma_isactive(struct ncr53c9x_softc *); static int esp_pci_dma_isintr(struct ncr53c9x_softc *); static void esp_pci_dma_reset(struct ncr53c9x_softc *); static int esp_pci_dma_setup(struct ncr53c9x_softc *, void **, size_t *, int, size_t *); static void esp_pci_dma_stop(struct ncr53c9x_softc *); static void esp_pci_write_reg(struct ncr53c9x_softc *, int, uint8_t); static uint8_t esp_pci_read_reg(struct ncr53c9x_softc *, int); static void esp_pci_xfermap(void *arg, bus_dma_segment_t *segs, int nseg, int error); static struct ncr53c9x_glue esp_pci_glue = { esp_pci_read_reg, esp_pci_write_reg, esp_pci_dma_isintr, esp_pci_dma_reset, esp_pci_dma_intr, esp_pci_dma_setup, esp_pci_dma_go, esp_pci_dma_stop, esp_pci_dma_isactive, }; static int esp_pci_probe(device_t dev) { if (pci_get_devid(dev) == PCI_DEVICE_ID_AMD53C974) { device_set_desc(dev, "AMD Am53C974 Fast-SCSI"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach this instance, and then all the sub-devices */ static int esp_pci_attach(device_t dev) { struct esp_pci_softc *esc; struct ncr53c9x_softc *sc; int error; esc = device_get_softc(dev); sc = &esc->sc_ncr53c9x; NCR_LOCK_INIT(sc); esc->sc_dev = dev; sc->sc_glue = &esp_pci_glue; pci_enable_busmaster(dev); error = bus_alloc_resources(dev, esp_pci_res_spec, esc->sc_res); if (error != 0) { device_printf(dev, "failed to allocate resources\n"); bus_release_resources(dev, esp_pci_res_spec, esc->sc_res); return (error); } error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &esc->sc_pdmat); if (error != 0) { device_printf(dev, "cannot create parent DMA tag\n"); goto fail_res; } /* * XXX More of this should be in ncr53c9x_attach(), but * XXX should we really poke around the chip that much in * XXX the MI code? Think about this more... */ /* * Set up static configuration info. * * XXX we should read the configuration from the EEPROM. */ sc->sc_id = 7; sc->sc_cfg1 = sc->sc_id | NCRCFG1_PARENB; sc->sc_cfg2 = NCRCFG2_SCSI2 | NCRCFG2_FE; sc->sc_cfg3 = NCRAMDCFG3_IDM | NCRAMDCFG3_FCLK; sc->sc_cfg4 = NCRAMDCFG4_GE12NS | NCRAMDCFG4_RADE; sc->sc_rev = NCR_VARIANT_AM53C974; sc->sc_features = NCR_F_FASTSCSI | NCR_F_DMASELECT; sc->sc_cfg3_fscsi = NCRAMDCFG3_FSCSI; sc->sc_freq = 40; /* MHz */ /* * This is the value used to start sync negotiations * Note that the NCR register "SYNCTP" is programmed * in "clocks per byte", and has a minimum value of 4. * The SCSI period used in negotiation is one-fourth * of the time (in nanoseconds) needed to transfer one byte. * Since the chip's clock is given in MHz, we have the following * formula: 4 * period = (1000 / freq) * 4 */ sc->sc_minsync = 1000 / sc->sc_freq; sc->sc_maxxfer = DFLTPHYS; /* see below */ sc->sc_maxoffset = 15; sc->sc_extended_geom = 1; #define MDL_SEG_SIZE 0x1000 /* 4kbyte per segment */ /* * Create the DMA tag and map for the data transfers. * * Note: given that bus_dma(9) only adheres to the requested alignment * for the first segment (and that also only for bus_dmamem_alloc()ed * DMA maps) we can't use the Memory Descriptor List. However, also * when not using the MDL, the maximum transfer size apparently is * limited to 4k so we have to split transfers up, which plain sucks. */ error = bus_dma_tag_create(esc->sc_pdmat, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MDL_SEG_SIZE, 1, MDL_SEG_SIZE, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->sc_lock, &esc->sc_xferdmat); if (error != 0) { device_printf(dev, "cannot create transfer DMA tag\n"); goto fail_pdmat; } error = bus_dmamap_create(esc->sc_xferdmat, 0, &esc->sc_xferdmam); if (error != 0) { device_printf(dev, "cannot create transfer DMA map\n"); goto fail_xferdmat; } error = bus_setup_intr(dev, esc->sc_res[ESP_PCI_RES_INTR], INTR_MPSAFE | INTR_TYPE_CAM, NULL, ncr53c9x_intr, sc, &esc->sc_ih); if (error != 0) { device_printf(dev, "cannot set up interrupt\n"); goto fail_xferdmam; } /* Do the common parts of attachment. */ sc->sc_dev = esc->sc_dev; error = ncr53c9x_attach(sc); if (error != 0) { device_printf(esc->sc_dev, "ncr53c9x_attach failed\n"); goto fail_intr; } return (0); fail_intr: bus_teardown_intr(esc->sc_dev, esc->sc_res[ESP_PCI_RES_INTR], esc->sc_ih); fail_xferdmam: bus_dmamap_destroy(esc->sc_xferdmat, esc->sc_xferdmam); fail_xferdmat: bus_dma_tag_destroy(esc->sc_xferdmat); fail_pdmat: bus_dma_tag_destroy(esc->sc_pdmat); fail_res: bus_release_resources(dev, esp_pci_res_spec, esc->sc_res); NCR_LOCK_DESTROY(sc); return (error); } static int esp_pci_detach(device_t dev) { struct ncr53c9x_softc *sc; struct esp_pci_softc *esc; int error; esc = device_get_softc(dev); sc = &esc->sc_ncr53c9x; bus_teardown_intr(esc->sc_dev, esc->sc_res[ESP_PCI_RES_INTR], esc->sc_ih); error = ncr53c9x_detach(sc); if (error != 0) return (error); bus_dmamap_destroy(esc->sc_xferdmat, esc->sc_xferdmam); bus_dma_tag_destroy(esc->sc_xferdmat); bus_dma_tag_destroy(esc->sc_pdmat); bus_release_resources(dev, esp_pci_res_spec, esc->sc_res); NCR_LOCK_DESTROY(sc); return (0); } static int esp_pci_suspend(device_t dev) { return (ENXIO); } static int esp_pci_resume(device_t dev) { return (ENXIO); } static void esp_pci_xfermap(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct esp_pci_softc *esc = (struct esp_pci_softc *)arg; if (error != 0) return; KASSERT(nsegs == 1, ("%s: bad transfer segment count %d", __func__, nsegs)); KASSERT(segs[0].ds_len <= MDL_SEG_SIZE, ("%s: bad transfer segment length %ld", __func__, (long)segs[0].ds_len)); /* Program the DMA Starting Physical Address. */ WRITE_DMAREG(esc, DMA_SPA, segs[0].ds_addr); } /* * Glue functions */ static uint8_t esp_pci_read_reg(struct ncr53c9x_softc *sc, int reg) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; return (READ_ESPREG(esc, reg)); } static void esp_pci_write_reg(struct ncr53c9x_softc *sc, int reg, uint8_t v) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; WRITE_ESPREG(esc, reg, v); } static int esp_pci_dma_isintr(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; return (READ_ESPREG(esc, NCR_STAT) & NCRSTAT_INT) != 0; } static void esp_pci_dma_reset(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE); esc->sc_active = 0; } static int esp_pci_dma_intr(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; bus_dma_tag_t xferdmat; bus_dmamap_t xferdmam; size_t dmasize; int datain, i, resid, trans; uint32_t dmastat; char *p = NULL; xferdmat = esc->sc_xferdmat; xferdmam = esc->sc_xferdmam; datain = esc->sc_datain; dmastat = READ_DMAREG(esc, DMA_STAT); if ((dmastat & DMASTAT_ERR) != 0) { /* XXX not tested... */ WRITE_DMAREG(esc, DMA_CMD, DMACMD_ABORT | (datain != 0 ? DMACMD_DIR : 0)); device_printf(esc->sc_dev, "DMA error detected; Aborting.\n"); bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(xferdmat, xferdmam); return (-1); } if ((dmastat & DMASTAT_ABT) != 0) { /* XXX what should be done? */ device_printf(esc->sc_dev, "DMA aborted.\n"); WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ? DMACMD_DIR : 0)); esc->sc_active = 0; return (0); } KASSERT(esc->sc_active != 0, ("%s: DMA wasn't active", __func__)); /* DMA has stopped. */ esc->sc_active = 0; dmasize = esc->sc_dmasize; if (dmasize == 0) { /* A "Transfer Pad" operation completed. */ NCR_DMA(("%s: discarded %d bytes (tcl=%d, tcm=%d)\n", __func__, READ_ESPREG(esc, NCR_TCL) | (READ_ESPREG(esc, NCR_TCM) << 8), READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM))); return (0); } resid = 0; /* * If a transfer onto the SCSI bus gets interrupted by the device * (e.g. for a SAVEPOINTER message), the data in the FIFO counts * as residual since the ESP counter registers get decremented as * bytes are clocked into the FIFO. */ if (datain == 0 && (resid = (READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF)) != 0) NCR_DMA(("%s: empty esp FIFO of %d ", __func__, resid)); if ((sc->sc_espstat & NCRSTAT_TC) == 0) { /* * "Terminal count" is off, so read the residue * out of the ESP counter registers. */ if (datain != 0) { resid = READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF; while (resid > 1) resid = READ_ESPREG(esc, NCR_FFLAG) & NCRFIFO_FF; WRITE_DMAREG(esc, DMA_CMD, DMACMD_BLAST | DMACMD_DIR); for (i = 0; i < 0x8000; i++) /* XXX 0x8000 ? */ if ((READ_DMAREG(esc, DMA_STAT) & DMASTAT_BCMP) != 0) break; /* See the below comments... */ if (resid != 0) p = *esc->sc_dmaaddr; } resid += READ_ESPREG(esc, NCR_TCL) | (READ_ESPREG(esc, NCR_TCM) << 8) | (READ_ESPREG(esc, NCR_TCH) << 16); } else while ((dmastat & DMASTAT_DONE) == 0) dmastat = READ_DMAREG(esc, DMA_STAT); WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ? DMACMD_DIR : 0)); /* Sync the transfer buffer. */ bus_dmamap_sync(xferdmat, xferdmam, datain != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(xferdmat, xferdmam); trans = dmasize - resid; /* * From the technical manual notes: * * "In some odd byte conditions, one residual byte will be left * in the SCSI FIFO, and the FIFO flags will never count to 0. * When this happens, the residual byte should be retrieved * via PIO following completion of the BLAST operation." */ if (p != NULL) { p += trans; *p = READ_ESPREG(esc, NCR_FIFO); trans++; } if (trans < 0) { /* transferred < 0 ? */ #if 0 /* * This situation can happen in perfectly normal operation * if the ESP is reselected while using DMA to select * another target. As such, don't print the warning. */ device_printf(dev, "xfer (%d) > req (%d)\n", trans, dmasize); #endif trans = dmasize; } NCR_DMA(("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", __func__, READ_ESPREG(esc, NCR_TCL), READ_ESPREG(esc, NCR_TCM), READ_ESPREG(esc, NCR_TCH), trans, resid)); *esc->sc_dmalen -= trans; *esc->sc_dmaaddr = (char *)*esc->sc_dmaaddr + trans; return (0); } static int esp_pci_dma_setup(struct ncr53c9x_softc *sc, void **addr, size_t *len, int datain, size_t *dmasize) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; int error; WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | (datain != 0 ? DMACMD_DIR : 0)); *dmasize = esc->sc_dmasize = ulmin(*dmasize, MDL_SEG_SIZE); esc->sc_dmaaddr = addr; esc->sc_dmalen = len; esc->sc_datain = datain; /* * There's no need to set up DMA for a "Transfer Pad" operation. */ if (*dmasize == 0) return (0); /* Set the transfer length. */ WRITE_DMAREG(esc, DMA_STC, *dmasize); /* * Load the transfer buffer and program the DMA address. * Note that the NCR53C9x core can't handle EINPROGRESS so we set * BUS_DMA_NOWAIT. */ error = bus_dmamap_load(esc->sc_xferdmat, esc->sc_xferdmam, *esc->sc_dmaaddr, *dmasize, esp_pci_xfermap, sc, BUS_DMA_NOWAIT); return (error); } static void esp_pci_dma_go(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; int datain; datain = esc->sc_datain; /* No DMA transfer for a "Transfer Pad" operation */ if (esc->sc_dmasize == 0) return; /* Sync the transfer buffer. */ bus_dmamap_sync(esc->sc_xferdmat, esc->sc_xferdmam, datain != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); /* Set the DMA engine to the IDLE state. */ /* XXX DMA Transfer Interrupt Enable bit is broken? */ WRITE_DMAREG(esc, DMA_CMD, DMACMD_IDLE | /* DMACMD_INTE | */ (datain != 0 ? DMACMD_DIR : 0)); /* Issue a DMA start command. */ WRITE_DMAREG(esc, DMA_CMD, DMACMD_START | /* DMACMD_INTE | */ (datain != 0 ? DMACMD_DIR : 0)); esc->sc_active = 1; } static void esp_pci_dma_stop(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; /* DMA stop */ /* XXX what should we do here ? */ WRITE_DMAREG(esc, DMA_CMD, DMACMD_ABORT | (esc->sc_datain != 0 ? DMACMD_DIR : 0)); bus_dmamap_unload(esc->sc_xferdmat, esc->sc_xferdmam); esc->sc_active = 0; } static int esp_pci_dma_isactive(struct ncr53c9x_softc *sc) { struct esp_pci_softc *esc = (struct esp_pci_softc *)sc; /* XXX should we check esc->sc_active? */ if ((READ_DMAREG(esc, DMA_CMD) & DMACMD_CMD) != DMACMD_IDLE) return (1); return (0); } Index: user/ngie/release-pkg-fix-tests/sys/dev/fdc/fdcvar.h =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/fdc/fdcvar.h (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/fdc/fdcvar.h (revision 299055) @@ -1,94 +1,94 @@ /*- * Copyright (c) 2004-2005 M. Warner Losh. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* XXX should audit this file to see if additional copyrights needed */ enum fdc_type { FDC_NE765, FDC_ENHANCED, FDC_UNKNOWN = -1 }; /* * Per controller structure (softc). */ struct fdc_data { int fdcu; /* our unit number */ int dmachan; int flags; #define FDC_HASDMA 0x01 #define FDC_STAT_VALID 0x08 #define FDC_HAS_FIFO 0x10 #define FDC_NEEDS_RESET 0x20 #define FDC_NODMA 0x40 /* Don't do DMA */ #define FDC_NOFAST 0x80 /* Don't register isr as a fast one */ #define FDC_KTHREAD_EXIT 0x1000 /* request worker thread to stop */ #define FDC_KTHREAD_ALIVE 0x2000 /* worker thread is alive */ struct fd_data *fd; /* The active drive */ int retry; #ifndef PC98 int fdout; /* mirror of the w/o digital output reg */ #endif u_int status[7]; /* copy of the registers */ enum fdc_type fdct; /* chip version of FDC */ int fdc_errs; /* number of logged errors */ struct bio_queue_head head; struct bio *bp; /* active buffer */ struct resource *res_irq, *res_drq; int rid_irq, rid_drq; #define FDC_MAXREG 8 int ridio[FDC_MAXREG]; struct resource *resio[FDC_MAXREG]; bus_space_tag_t iot; bus_space_handle_t ioh[FDC_MAXREG]; int ioff[FDC_MAXREG]; void *fdc_intr; - struct device *fdc_dev; + device_t fdc_dev; struct mtx fdc_mtx; struct proc *fdc_thread; }; extern devclass_t fdc_devclass; enum fdc_device_ivars { FDC_IVAR_FDUNIT, FDC_IVAR_FDTYPE, }; __BUS_ACCESSOR(fdc, fdunit, FDC, FDUNIT, int); __BUS_ACCESSOR(fdc, fdtype, FDC, FDTYPE, int); void fdc_release_resources(struct fdc_data *); int fdc_attach(device_t); void fdc_start_worker(device_t); int fdc_hints_probe(device_t); int fdc_detach(device_t dev); device_t fdc_add_child(device_t, const char *, int); int fdc_initial_reset(device_t, struct fdc_data *); int fdc_print_child(device_t, device_t); int fdc_read_ivar(device_t, device_t, int, uintptr_t *); int fdc_write_ivar(device_t, device_t, int, uintptr_t); int fdc_isa_alloc_resources(device_t, struct fdc_data *); Index: user/ngie/release-pkg-fix-tests/sys/dev/hptiop/hptiop.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/hptiop/hptiop.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/hptiop/hptiop.c (revision 299055) @@ -1,2854 +1,2854 @@ /* * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const char driver_name[] = "hptiop"; static const char driver_version[] = "v1.9"; static devclass_t hptiop_devclass; static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec); static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba, u_int32_t req); static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams); static int hptiop_rescan_bus(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba); static int hptiop_get_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_get_config *config); static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config); static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba); static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams); static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg); static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba); static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); static int hptiop_probe(device_t dev); static int hptiop_attach(device_t dev); static int hptiop_detach(device_t dev); static int hptiop_shutdown(device_t dev); static void hptiop_action(struct cam_sim *sim, union ccb *ccb); static void hptiop_poll(struct cam_sim *sim); static void hptiop_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); static void hptiop_pci_intr(void *arg); static void hptiop_release_resource(struct hpt_iop_hba *hba); static void hptiop_reset_adapter(void *argv); static d_open_t hptiop_open; static d_close_t hptiop_close; static d_ioctl_t hptiop_ioctl; static struct cdevsw hptiop_cdevsw = { .d_open = hptiop_open, .d_close = hptiop_close, .d_ioctl = hptiop_ioctl, .d_name = driver_name, .d_version = D_VERSION, }; #define hba_from_dev(dev) \ ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev))) #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value) #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\ hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset)) static int hptiop_open(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); if (hba==NULL) return ENXIO; if (hba->flag & HPT_IOCTL_FLAG_OPEN) return EBUSY; hba->flag |= HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_close(ioctl_dev_t dev, int flags, int devtype, ioctl_thread_t proc) { struct hpt_iop_hba *hba = hba_from_dev(dev); hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; return 0; } static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data, int flags, ioctl_thread_t proc) { int ret = EFAULT; struct hpt_iop_hba *hba = hba_from_dev(dev); mtx_lock(&Giant); switch (cmd) { case HPT_DO_IOCONTROL: ret = hba->ops->do_ioctl(hba, (struct hpt_iop_ioctl_param *)data); break; case HPT_SCAN_BUS: ret = hptiop_rescan_bus(hba); break; } mtx_unlock(&Giant); return ret; } static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) { u_int64_t p; u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); if (outbound_tail != outbound_head) { bus_space_read_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, outbound_q[outbound_tail]), (u_int32_t *)&p, 2); outbound_tail++; if (outbound_tail == MVIOP_QUEUE_LEN) outbound_tail = 0; BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); return p; } else return 0; } static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) { u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); u_int32_t head = inbound_head + 1; if (head == MVIOP_QUEUE_LEN) head = 0; bus_space_write_region_4(hba->bar2t, hba->bar2h, offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), (u_int32_t *)&p, 2); BUS_SPACE_WRT4_MV2(inbound_head, head); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); } static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MV2(inbound_msg, msg); BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg) { BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg); BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a); } static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) { u_int32_t req=0; int i; for (i = 0; i < millisec; i++) { req = BUS_SPACE_RD4_ITL(inbound_queue); if (req != IOPMU_QUEUE_EMPTY) break; DELAY(1000); } if (req!=IOPMU_QUEUE_EMPTY) { BUS_SPACE_WRT4_ITL(outbound_queue, req); BUS_SPACE_RD4_ITL(outbound_intstatus); return 0; } return -1; } static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba, u_int32_t millisec) { if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) return -1; return 0; } static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, u_int32_t index) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req=0; union ccb *ccb; u_int8_t *cdb; u_int32_t result, temp, dxfer; u_int64_t temp64; if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { srb = hba->srb[index & ~(u_int32_t) (IOPMU_QUEUE_ADDR_HOST_BIT | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; req = (struct hpt_iop_request_scsi_command *)srb; if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) result = IOP_RESULT_SUCCESS; else result = req->header.result; } else { srb = hba->srb[index & ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; result = req->header.result; } dxfer = req->dataxfer_length; goto srb_complete; } /*iop req*/ temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, type)); result = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, result)); switch(temp) { case IOP_REQUEST_TYPE_IOCTL_COMMAND: { temp64 = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); wakeup((void *)((unsigned long)hba->u.itl.mu + index)); break; } case IOP_REQUEST_TYPE_SCSI_COMMAND: bus_space_read_region_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); srb = (struct hpt_iop_srb *)(unsigned long)temp64; dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, dataxfer_length)); srb_complete: ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } switch (result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (dxfer < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - dxfer; else ccb->csio.sense_resid = 0; if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ bus_space_read_region_1(hba->bar0t, hba->bar0h, index + offsetof(struct hpt_iop_request_scsi_command, sg_list), (u_int8_t *)&ccb->csio.sense_data, MIN(dxfer, sizeof(ccb->csio.sense_data))); } else { memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(dxfer, sizeof(ccb->csio.sense_data))); } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) BUS_SPACE_WRT4_ITL(outbound_queue, index); ccb->csio.resid = ccb->csio.dxfer_len - dxfer; hptiop_free_srb(hba, srb); xpt_done(ccb); break; } } static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) { u_int32_t req, temp; while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_request_callback_itl(hba, req); else { struct hpt_iop_request_header *p; p = (struct hpt_iop_request_header *) ((char *)hba->u.itl.mu + req); temp = bus_space_read_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, flags)); if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { u_int64_t temp64; bus_space_read_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) { hptiop_request_callback_itl(hba, req); } else { temp64 = 1; bus_space_write_region_4(hba->bar0t, hba->bar0h,req + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); } } else hptiop_request_callback_itl(hba, req); } } } static int hptiop_intr_itl(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_ITL(outbound_intstatus); if (status & IOPMU_OUTBOUND_INT_MSG0) { u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); KdPrint(("hptiop: received outbound msg %x\n", msg)); BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_itl(hba); ret = 1; } return ret; } static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, u_int64_t _tag) { u_int32_t context = (u_int32_t)_tag; if (context & MVIOP_CMD_TYPE_SCSI) { struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); } else if (context & MVIOP_CMD_TYPE_IOCTL) { struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup(req); } else if (context & (MVIOP_CMD_TYPE_SET_CONFIG | MVIOP_CMD_TYPE_GET_CONFIG)) hba->config_done = 1; else { device_printf(hba->pcidev, "wrong callback type\n"); } } static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba, u_int32_t _tag) { u_int32_t req_type = _tag & 0xf; struct hpt_iop_srb *srb; struct hpt_iop_request_scsi_command *req; union ccb *ccb; u_int8_t *cdb; switch (req_type) { case IOP_REQUEST_TYPE_GET_CONFIG: case IOP_REQUEST_TYPE_SET_CONFIG: hba->config_done = 1; break; case IOP_REQUEST_TYPE_SCSI_COMMAND: srb = hba->srb[(_tag >> 4) & 0xff]; req = (struct hpt_iop_request_scsi_command *)srb; ccb = (union ccb *)srb->ccb; callout_stop(&srb->timeout); if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ ccb->ccb_h.status = CAM_REQ_CMP; goto scsi_done; } if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) req->header.result = IOP_RESULT_SUCCESS; switch (req->header.result) { case IOP_RESULT_SUCCESS: switch (ccb->ccb_h.flags & CAM_DIR_MASK) { case CAM_DIR_IN: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; case CAM_DIR_OUT: bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(hba->io_dmat, srb->dma_map); break; } ccb->ccb_h.status = CAM_REQ_CMP; break; case IOP_RESULT_BAD_TARGET: ccb->ccb_h.status = CAM_DEV_NOT_THERE; break; case IOP_RESULT_BUSY: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_INVALID_REQUEST: ccb->ccb_h.status = CAM_REQ_INVALID; break; case IOP_RESULT_FAIL: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; case IOP_RESULT_RESET: ccb->ccb_h.status = CAM_BUSY; break; case IOP_RESULT_CHECK_CONDITION: memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); if (req->dataxfer_length < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - req->dataxfer_length; else ccb->csio.sense_resid = 0; memcpy(&ccb->csio.sense_data, &req->sg_list, MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; break; default: ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } scsi_done: ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; hptiop_free_srb(hba, srb); xpt_done(ccb); break; case IOP_REQUEST_TYPE_IOCTL_COMMAND: if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT) hba->config_done = 1; else hba->config_done = -1; wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr); break; default: device_printf(hba->pcidev, "wrong callback type\n"); break; } } static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) { u_int64_t req; while ((req = hptiop_mv_outbound_read(hba))) { if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { hptiop_request_callback_mv(hba, req); } } } } static int hptiop_intr_mv(struct hpt_iop_hba * hba) { u_int32_t status; int ret = 0; status = BUS_SPACE_RD4_MV0(outbound_doorbell); if (status) BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); if (status & MVIOP_MU_OUTBOUND_INT_MSG) { u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); KdPrint(("hptiop: received outbound msg %x\n", msg)); hptiop_os_message_callback(hba, msg); ret = 1; } if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { hptiop_drain_outbound_queue_mv(hba); ret = 1; } return ret; } static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba) { u_int32_t status, _tag, cptr; int ret = 0; if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); } status = BUS_SPACE_RD4_MVFREY2(f0_doorbell); if (status) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status); if (status & CPU_TO_F0_DRBL_MSG_A_BIT) { u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a); hptiop_os_message_callback(hba, msg); } ret = 1; } status = BUS_SPACE_RD4_MVFREY2(isr_cause); if (status) { BUS_SPACE_WRT4_MVFREY2(isr_cause, status); do { cptr = *hba->u.mvfrey.outlist_cptr & 0xff; while (hba->u.mvfrey.outlist_rptr != cptr) { hba->u.mvfrey.outlist_rptr++; if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) { hba->u.mvfrey.outlist_rptr = 0; } _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val; hptiop_request_callback_mvfrey(hba, _tag); ret = 2; } } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); } if (hba->initialized) { BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); } return ret; } static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, u_int32_t req32, u_int32_t millisec) { u_int32_t i; u_int64_t temp64; BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); for (i = 0; i < millisec; i++) { hptiop_intr_itl(hba); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_header, context), (u_int32_t *)&temp64, 2); if (temp64) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i; u_int64_t phy_addr; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy | (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; ((struct hpt_iop_request_get_config *)req)->header.flags |= IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT; hptiop_mv_inbound_write(phy_addr, hba); BUS_SPACE_RD4_MV0(outbound_intmask); for (i = 0; i < millisec; i++) { hptiop_intr_mv(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba, void *req, u_int32_t millisec) { u_int32_t i, index; u_int64_t phy_addr; struct hpt_iop_request_header *reqhdr = (struct hpt_iop_request_header *)req; hba->config_done = 0; phy_addr = hba->ctlcfgcmd_phy; reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); reqhdr->context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); for (i = 0; i < millisec; i++) { hptiop_intr_mvfrey(hba); if (hba->config_done) return 0; DELAY(1000); } return -1; } static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, u_int32_t msg, u_int32_t millisec) { u_int32_t i; hba->msg_done = 0; hba->ops->post_msg(hba, msg); for (i=0; iops->iop_intr(hba); if (hba->msg_done) break; DELAY(1000); } return hba->msg_done? 0 : -1; } static int hptiop_get_config_itl(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { u_int32_t req32; config->header.size = sizeof(struct hpt_iop_request_get_config); config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_header) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } bus_space_read_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_get_config) >> 2); BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_get_config_mv(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_get_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: get config send cmd failed")); return -1; } *config = *req; return 0; } static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba, struct hpt_iop_request_get_config * config) { struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; if (info->header.size != sizeof(struct hpt_iop_request_get_config) || info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) { KdPrint(("hptiop: header size %x/%x type %x/%x", info->header.size, (int)sizeof(struct hpt_iop_request_get_config), info->header.type, IOP_REQUEST_TYPE_GET_CONFIG)); return -1; } config->interface_version = info->interface_version; config->firmware_version = info->firmware_version; config->max_requests = info->max_requests; config->request_size = info->request_size; config->max_sg_count = info->max_sg_count; config->data_transfer_length = info->data_transfer_length; config->alignment_mask = info->alignment_mask; config->max_devices = info->max_devices; config->sdram_size = info->sdram_size; KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x", config->max_requests, config->request_size, config->data_transfer_length, config->max_devices, config->sdram_size)); return 0; } static int hptiop_set_config_itl(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { u_int32_t req32; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return -1; config->header.size = sizeof(struct hpt_iop_request_set_config); config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; config->header.result = IOP_RESULT_PENDING; config->header.context = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)config, sizeof(struct hpt_iop_request_set_config) >> 2); if (hptiop_send_sync_request_itl(hba, req32, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } static int hptiop_set_config_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.flags = 0; req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; if (hptiop_send_sync_request_mv(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_set_config *config) { struct hpt_iop_request_set_config *req; if (!(req = hba->ctlcfg_ptr)) return -1; memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), (u_int8_t *)config + sizeof(struct hpt_iop_request_header), sizeof(struct hpt_iop_request_set_config) - sizeof(struct hpt_iop_request_header)); req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; req->header.size = sizeof(struct hpt_iop_request_set_config); req->header.result = IOP_RESULT_PENDING; if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) { KdPrint(("hptiop: set config send cmd failed")); return -1; } return 0; } static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, u_int32_t req32, struct hpt_iop_ioctl_param *pParams) { u_int64_t temp64; struct hpt_iop_request_ioctl_command req; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; req.header.result = IOP_RESULT_PENDING; req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req.inbuf_size = pParams->nInBufferSize; req.outbuf_size = pParams->nOutBufferSize; req.bytes_returned = 0; bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); hptiop_lock_adapter(hba); BUS_SPACE_WRT4_ITL(inbound_queue, req32); BUS_SPACE_RD4_ITL(outbound_intstatus); bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); while (temp64) { if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) break; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + offsetof(struct hpt_iop_request_ioctl_command, header.context), (u_int32_t *)&temp64, 2); } hptiop_unlock_adapter(hba); return 0; } static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i, byte); } return 0; } static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) { unsigned char byte; int i; for (i=0; ibar0t, hba->bar0h, bus + i); if (copyout(&byte, (u_int8_t *)user + i, 1)) return -1; } return 0; } static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams) { u_int32_t req32; u_int32_t result; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (req32 == IOPMU_QUEUE_EMPTY) return EFAULT; if (pParams->nInBufferSize) if (hptiop_bus_space_copyin(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf), (void *)pParams->lpInBuffer, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) goto invalid; result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + offsetof(struct hpt_iop_request_ioctl_command, header.result)); if (result == IOP_RESULT_SUCCESS) { if (pParams->nOutBufferSize) if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, buf) + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) { if (hptiop_bus_space_copyout(hba, req32 + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), (void *)pParams->lpBytesReturned, sizeof(unsigned long))) goto invalid; } BUS_SPACE_WRT4_ITL(outbound_queue, req32); return 0; } else{ invalid: BUS_SPACE_WRT4_ITL(outbound_queue, req32); return EFAULT; } } static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t req_phy; int size = 0; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; - size = size > 3 ? 3 : size; + size = imin(3, size); req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; hptiop_mv_inbound_write(req_phy, hba); BUS_SPACE_RD4_MV0(outbound_intmask); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mv(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_request_ioctl_command *req, struct hpt_iop_ioctl_param *pParams) { u_int64_t phy_addr; u_int32_t index; phy_addr = hba->ctlcfgcmd_phy; if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > (hba->max_request_size - offsetof(struct hpt_iop_request_ioctl_command, buf))) { device_printf(hba->pcidev, "request size beyond max value"); return -1; } req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); req->inbuf_size = pParams->nInBufferSize; req->outbuf_size = pParams->nOutBufferSize; req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + pParams->nInBufferSize; req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; req->header.result = IOP_RESULT_PENDING; req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST | IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((phy_addr >> 16) & 0xffff0000); req->header.context = ((phy_addr & 0xffffffff) << 32 ) | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = phy_addr; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); while (hba->config_done == 0) { if (hptiop_sleep(hba, req, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0) continue; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); } return 0; } static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param *pParams) { struct hpt_iop_request_ioctl_command *req; if ((pParams->Magic != HPT_IOCTL_MAGIC) && (pParams->Magic != HPT_IOCTL_MAGIC32)) return EFAULT; req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); hba->config_done = 0; hptiop_lock_adapter(hba); if (pParams->nInBufferSize) if (copyin((void *)pParams->lpInBuffer, req->buf, pParams->nInBufferSize)) goto invalid; if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams)) goto invalid; if (hba->config_done == 1) { if (pParams->nOutBufferSize) if (copyout(req->buf + ((pParams->nInBufferSize + 3) & ~3), (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) goto invalid; if (pParams->lpBytesReturned) if (copyout(&req->bytes_returned, (void*)pParams->lpBytesReturned, sizeof(u_int32_t))) goto invalid; hptiop_unlock_adapter(hba); return 0; } else{ invalid: hptiop_unlock_adapter(hba); return EFAULT; } } static int hptiop_rescan_bus(struct hpt_iop_hba * hba) { union ccb *ccb; if ((ccb = xpt_alloc_ccb()) == NULL) return(ENOMEM); if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_free_ccb(ccb); return(EIO); } xpt_rescan(ccb); return(0); } static bus_dmamap_callback_t hptiop_map_srb; static bus_dmamap_callback_t hptiop_post_scsi_command; static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg; static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop base adrress.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.itl.mu = (struct hpt_iopmu_itl *) rman_get_virtual(hba->bar0_res); if (!hba->u.itl.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc mem res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mv.regs = (struct hpt_iopmv_regs *) rman_get_virtual(hba->bar0_res); if (!hba->u.mv.regs) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); if (!hba->u.mv.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba) { hba->bar0_rid = 0x10; hba->bar0_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); if (hba->bar0_res == NULL) { device_printf(hba->pcidev, "failed to get iop bar0.\n"); return -1; } hba->bar0t = rman_get_bustag(hba->bar0_res); hba->bar0h = rman_get_bushandle(hba->bar0_res); hba->u.mvfrey.config = (struct hpt_iop_request_get_config *) rman_get_virtual(hba->bar0_res); if (!hba->u.mvfrey.config) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); return -1; } hba->bar2_rid = 0x18; hba->bar2_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); if (hba->bar2_res == NULL) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); device_printf(hba->pcidev, "failed to get iop bar2.\n"); return -1; } hba->bar2t = rman_get_bustag(hba->bar2_res); hba->bar2h = rman_get_bushandle(hba->bar2_res); hba->u.mvfrey.mu = (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res); if (!hba->u.mvfrey.mu) { bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); return -1; } return 0; } static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); } static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba) { if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); } static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) { if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0x800 - 0x8, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, MVIOP_IOCTLCFG_SIZE, hptiop_mv_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba) { u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl); list_count >>= 16; if (list_count == 0) { return -1; } hba->u.mvfrey.list_count = list_count; hba->u.mvfrey.internal_mem_size = 0x800 + list_count * sizeof(struct mvfrey_inlist_entry) + list_count * sizeof(struct mvfrey_outlist_entry) + sizeof(int); if (bus_dma_tag_create(hba->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, hba->u.mvfrey.internal_mem_size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &hba->ctlcfg_dmat)) { device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); return -1; } if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->ctlcfg_dmamap) != 0) { device_printf(hba->pcidev, "bus_dmamem_alloc failed!\n"); bus_dma_tag_destroy(hba->ctlcfg_dmat); return -1; } if (bus_dmamap_load(hba->ctlcfg_dmat, hba->ctlcfg_dmamap, hba->ctlcfg_ptr, hba->u.mvfrey.internal_mem_size, hptiop_mvfrey_map_ctlcfg, hba, 0)) { device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); if (hba->ctlcfg_dmat) { bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return -1; } return 0; } static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) { return 0; } static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba) { if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } return 0; } static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba) { u_int32_t i = 100; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000)) return -1; /* wait 100ms for MCU ready */ while(i--) { DELAY(1000); } BUS_SPACE_WRT4_MVFREY2(inbound_base, hba->u.mvfrey.inlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(inbound_base_high, (hba->u.mvfrey.inlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_base, hba->u.mvfrey.outlist_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_base_high, (hba->u.mvfrey.outlist_phy >> 16) >> 16); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base, hba->u.mvfrey.outlist_cptr_phy & 0xffffffff); BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high, (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16); hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1) | CL_POINTER_TOGGLE; hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1; return 0; } /* * CAM driver interface */ static device_method_t driver_methods[] = { /* Device interface */ DEVMETHOD(device_probe, hptiop_probe), DEVMETHOD(device_attach, hptiop_attach), DEVMETHOD(device_detach, hptiop_detach), DEVMETHOD(device_shutdown, hptiop_shutdown), { 0, 0 } }; static struct hptiop_adapter_ops hptiop_itl_ops = { .family = INTEL_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_itl, .internal_memalloc = 0, .internal_memfree = hptiop_internal_memfree_itl, .alloc_pci_res = hptiop_alloc_pci_res_itl, .release_pci_res = hptiop_release_pci_res_itl, .enable_intr = hptiop_enable_intr_itl, .disable_intr = hptiop_disable_intr_itl, .get_config = hptiop_get_config_itl, .set_config = hptiop_set_config_itl, .iop_intr = hptiop_intr_itl, .post_msg = hptiop_post_msg_itl, .post_req = hptiop_post_req_itl, .do_ioctl = hptiop_do_ioctl_itl, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mv_ops = { .family = MV_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mv, .internal_memalloc = hptiop_internal_memalloc_mv, .internal_memfree = hptiop_internal_memfree_mv, .alloc_pci_res = hptiop_alloc_pci_res_mv, .release_pci_res = hptiop_release_pci_res_mv, .enable_intr = hptiop_enable_intr_mv, .disable_intr = hptiop_disable_intr_mv, .get_config = hptiop_get_config_mv, .set_config = hptiop_set_config_mv, .iop_intr = hptiop_intr_mv, .post_msg = hptiop_post_msg_mv, .post_req = hptiop_post_req_mv, .do_ioctl = hptiop_do_ioctl_mv, .reset_comm = 0, }; static struct hptiop_adapter_ops hptiop_mvfrey_ops = { .family = MVFREY_BASED_IOP, .iop_wait_ready = hptiop_wait_ready_mvfrey, .internal_memalloc = hptiop_internal_memalloc_mvfrey, .internal_memfree = hptiop_internal_memfree_mvfrey, .alloc_pci_res = hptiop_alloc_pci_res_mvfrey, .release_pci_res = hptiop_release_pci_res_mvfrey, .enable_intr = hptiop_enable_intr_mvfrey, .disable_intr = hptiop_disable_intr_mvfrey, .get_config = hptiop_get_config_mvfrey, .set_config = hptiop_set_config_mvfrey, .iop_intr = hptiop_intr_mvfrey, .post_msg = hptiop_post_msg_mvfrey, .post_req = hptiop_post_req_mvfrey, .do_ioctl = hptiop_do_ioctl_mvfrey, .reset_comm = hptiop_reset_comm_mvfrey, }; static driver_t hptiop_pci_driver = { driver_name, driver_methods, sizeof(struct hpt_iop_hba) }; DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); MODULE_DEPEND(hptiop, cam, 1, 1, 1); static int hptiop_probe(device_t dev) { struct hpt_iop_hba *hba; u_int32_t id; static char buf[256]; int sas = 0; struct hptiop_adapter_ops *ops; if (pci_get_vendor(dev) != 0x1103) return (ENXIO); id = pci_get_device(dev); switch (id) { case 0x4520: case 0x4521: case 0x4522: sas = 1; case 0x3620: case 0x3622: case 0x3640: ops = &hptiop_mvfrey_ops; break; case 0x4210: case 0x4211: case 0x4310: case 0x4311: case 0x4320: case 0x4321: case 0x4322: sas = 1; case 0x3220: case 0x3320: case 0x3410: case 0x3520: case 0x3510: case 0x3511: case 0x3521: case 0x3522: case 0x3530: case 0x3540: case 0x3560: ops = &hptiop_itl_ops; break; case 0x3020: case 0x3120: case 0x3122: ops = &hptiop_mv_ops; break; default: return (ENXIO); } device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)); sprintf(buf, "RocketRAID %x %s Controller\n", id, sas ? "SAS" : "SATA"); device_set_desc_copy(dev, buf); hba = (struct hpt_iop_hba *)device_get_softc(dev); bzero(hba, sizeof(struct hpt_iop_hba)); hba->ops = ops; KdPrint(("hba->ops=%p\n", hba->ops)); return 0; } static int hptiop_attach(device_t dev) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); struct hpt_iop_request_get_config iop_config; struct hpt_iop_request_set_config set_config; int rid = 0; struct cam_devq *devq; struct ccb_setasync ccb; u_int32_t unit = device_get_unit(dev); device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", unit, driver_version); KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), hba->ops)); pci_enable_busmaster(dev); hba->pcidev = dev; hba->pciunit = unit; if (hba->ops->alloc_pci_res(hba)) return ENXIO; if (hba->ops->iop_wait_ready(hba, 2000)) { device_printf(dev, "adapter is not ready\n"); goto release_pci_res; } mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF); if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->parent_dmat /* tag */)) { device_printf(dev, "alloc parent_dmat failed\n"); goto release_pci_res; } if (hba->ops->family == MV_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } } if (hba->ops->get_config(hba, &iop_config)) { device_printf(dev, "get iop config failed.\n"); goto get_config_failed; } hba->firmware_version = iop_config.firmware_version; hba->interface_version = iop_config.interface_version; hba->max_requests = iop_config.max_requests; hba->max_devices = iop_config.max_devices; hba->max_request_size = iop_config.request_size; hba->max_sg_count = iop_config.max_sg_count; if (hba->ops->family == MVFREY_BASED_IOP) { if (hba->ops->internal_memalloc(hba)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_parent_tag; } if (hba->ops->reset_comm(hba)) { device_printf(dev, "reset comm failed\n"); goto get_config_failed; } } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 4, /* alignment */ BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ hba->max_sg_count, /* nsegments */ 0x20000, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &hba->lock, /* lockfuncarg */ &hba->io_dmat /* tag */)) { device_printf(dev, "alloc io_dmat failed\n"); goto get_config_failed; } if (bus_dma_tag_create(hba->parent_dmat,/* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &hba->srb_dmat /* tag */)) { device_printf(dev, "alloc srb_dmat failed\n"); goto destroy_io_dmat; } if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &hba->srb_dmamap) != 0) { device_printf(dev, "srb bus_dmamem_alloc failed!\n"); goto destroy_srb_dmat; } if (bus_dmamap_load(hba->srb_dmat, hba->srb_dmamap, hba->uncached_ptr, (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, hptiop_map_srb, hba, 0)) { device_printf(dev, "bus_dmamap_load failed!\n"); goto srb_dmamem_free; } if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { device_printf(dev, "cam_simq_alloc failed\n"); goto srb_dmamap_unload; } hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, hba, unit, &hba->lock, hba->max_requests - 1, 1, devq); if (!hba->sim) { device_printf(dev, "cam_sim_alloc failed\n"); cam_simq_free(devq); goto srb_dmamap_unload; } hptiop_lock_adapter(hba); if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "xpt_bus_register failed\n"); goto free_cam_sim; } if (xpt_create_path(&hba->path, /*periph */ NULL, cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "xpt_create_path failed\n"); goto deregister_xpt_bus; } hptiop_unlock_adapter(hba); bzero(&set_config, sizeof(set_config)); set_config.iop_id = unit; set_config.vbus_id = cam_sim_path(hba->sim); set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; if (hba->ops->set_config(hba, &set_config)) { device_printf(dev, "set iop config failed.\n"); goto free_hba_path; } xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); rid = 0; if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE, NULL, hptiop_pci_intr, hba, &hba->irq_handle)) { device_printf(dev, "allocate intr function failed!\n"); goto free_irq_resource; } if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { device_printf(dev, "fail to start background task\n"); goto teartown_irq_resource; } hba->ops->enable_intr(hba); hba->initialized = 1; hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit, UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); return 0; teartown_irq_resource: bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); free_irq_resource: bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); hptiop_lock_adapter(hba); free_hba_path: xpt_free_path(hba->path); deregister_xpt_bus: xpt_bus_deregister(cam_sim_path(hba->sim)); free_cam_sim: cam_sim_free(hba->sim, /*free devq*/ TRUE); hptiop_unlock_adapter(hba); srb_dmamap_unload: if (hba->uncached_ptr) bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); srb_dmamem_free: if (hba->uncached_ptr) bus_dmamem_free(hba->srb_dmat, hba->uncached_ptr, hba->srb_dmamap); destroy_srb_dmat: if (hba->srb_dmat) bus_dma_tag_destroy(hba->srb_dmat); destroy_io_dmat: if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); get_config_failed: hba->ops->internal_memfree(hba); destroy_parent_tag: if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); release_pci_res: if (hba->ops->release_pci_res) hba->ops->release_pci_res(hba); return ENXIO; } static int hptiop_detach(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int i; int error = EBUSY; hptiop_lock_adapter(hba); for (i = 0; i < hba->max_devices; i++) if (hptiop_os_query_remove_device(hba, i)) { device_printf(dev, "%d file system is busy. id=%d", hba->pciunit, i); goto out; } if ((error = hptiop_shutdown(dev)) != 0) goto out; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) goto out; hptiop_unlock_adapter(hba); hptiop_release_resource(hba); return (0); out: hptiop_unlock_adapter(hba); return error; } static int hptiop_shutdown(device_t dev) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); int error = 0; if (hba->flag & HPT_IOCTL_FLAG_OPEN) { device_printf(dev, "%d device is busy", hba->pciunit); return EBUSY; } hba->ops->disable_intr(hba); if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) error = EBUSY; return error; } static void hptiop_pci_intr(void *arg) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; hptiop_lock_adapter(hba); hba->ops->iop_intr(hba); hptiop_unlock_adapter(hba); } static void hptiop_poll(struct cam_sim *sim) { struct hpt_iop_hba *hba; hba = cam_sim_softc(sim); hba->ops->iop_intr(hba); } static void hptiop_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg) { } static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_ITL(outbound_intmask, ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); } static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG; BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); } static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); BUS_SPACE_RD4_ITL(outbound_intstatus); } static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) { u_int32_t int_mask; int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); BUS_SPACE_RD4_MV0(outbound_intmask); } static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba) { BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0); BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable); BUS_SPACE_WRT4_MVFREY2(isr_enable, 0); BUS_SPACE_RD4_MVFREY2(isr_enable); BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0); BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable); } static void hptiop_reset_adapter(void *argv) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv; if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000)) return; hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000); } static void *hptiop_get_srb(struct hpt_iop_hba * hba) { struct hpt_iop_srb * srb; if (hba->srb_list) { srb = hba->srb_list; hba->srb_list = srb->next; return srb; } return NULL; } static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) { srb->next = hba->srb_list; hba->srb_list = srb; } static void hptiop_action(struct cam_sim *sim, union ccb *ccb) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); struct hpt_iop_srb * srb; int error; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: if (ccb->ccb_h.target_lun != 0 || ccb->ccb_h.target_id >= hba->max_devices || (ccb->ccb_h.flags & CAM_CDB_PHYS)) { ccb->ccb_h.status = CAM_TID_INVALID; xpt_done(ccb); return; } if ((srb = hptiop_get_srb(hba)) == NULL) { device_printf(hba->pcidev, "srb allocated failed"); ccb->ccb_h.status = CAM_REQ_CMP_ERR; xpt_done(ccb); return; } srb->ccb = ccb; error = bus_dmamap_load_ccb(hba->io_dmat, srb->dma_map, ccb, hptiop_post_scsi_command, srb, 0); if (error && error != EINPROGRESS) { device_printf(hba->pcidev, "%d bus_dmamap_load error %d", hba->pciunit, error); xpt_freeze_simq(hba->sim, 1); ccb->ccb_h.status = CAM_REQ_CMP_ERR; hptiop_free_srb(hba, srb); xpt_done(ccb); return; } return; case XPT_RESET_BUS: device_printf(hba->pcidev, "reset adapter"); hba->msg_done = 0; hptiop_reset_adapter(hba); break; case XPT_GET_TRAN_SETTINGS: case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET; cpi->hba_eng_cnt = 0; cpi->max_target = hba->max_devices; cpi->max_lun = 0; cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->initiator_id = hba->max_devices; cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); return; } static void hptiop_post_req_itl(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx; union ccb *ccb = srb->ccb; u_int8_t *cdb; if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; KdPrint(("ccb=%p %x-%x-%x\n", ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { u_int32_t iop_req32; struct hpt_iop_request_scsi_command req; iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); if (iop_req32 == IOPMU_QUEUE_EMPTY) { device_printf(hba->pcidev, "invalid req offset\n"); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req.sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req.cdb, ccb->csio.cdb_len); req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req.header.flags = 0; req.header.result = IOP_RESULT_PENDING; req.header.context = (u_int64_t)(unsigned long)srb; req.dataxfer_length = ccb->csio.dxfer_len; req.channel = 0; req.target = ccb->ccb_h.target_id; req.lun = ccb->ccb_h.target_lun; bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, (u_int8_t *)&req, req.header.size); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); } else { struct hpt_iop_request_scsi_command *req; req = (struct hpt_iop_request_scsi_command *)srb; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + nsegs*sizeof(struct hpt_iopsg); req->header.context = (u_int64_t)srb->index | IOPMU_QUEUE_ADDR_HOST_BIT; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); } if (hba->firmware_version > 0x01020000 || hba->interface_version > 0x01020000) { u_int32_t size_bits; if (req->header.size < 256) size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; else if (req->header.size < 512) size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; else size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | IOPMU_QUEUE_ADDR_HOST_BIT; BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr | size_bits); } else BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr |IOPMU_QUEUE_ADDR_HOST_BIT); } } static void hptiop_post_req_mv(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, size; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.context = (u_int64_t)srb->index << MVIOP_REQUEST_NUMBER_START_BIT | MVIOP_CMD_TYPE_SCSI; req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; size = req->header.size >> 8; hptiop_mv_inbound_write(req_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT - | (size > 3 ? 3 : size), hba); + | imin(3, size), hba); } static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs) { int idx, index; union ccb *ccb = srb->ccb; u_int8_t *cdb; struct hpt_iop_request_scsi_command *req; u_int64_t req_phy; req = (struct hpt_iop_request_scsi_command *)srb; req_phy = srb->phy_addr; if (ccb->csio.dxfer_len && nsegs > 0) { struct hpt_iopsg *psg = req->sg_list; for (idx = 0; idx < nsegs; idx++, psg++) { psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1; psg->size = segs[idx].ds_len; psg->eot = 0; } psg[-1].eot = 1; } if (ccb->ccb_h.flags & CAM_CDB_POINTER) cdb = ccb->csio.cdb_io.cdb_ptr; else cdb = ccb->csio.cdb_io.cdb_bytes; bcopy(cdb, req->cdb, ccb->csio.cdb_len); req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; req->header.result = IOP_RESULT_PENDING; req->dataxfer_length = ccb->csio.dxfer_len; req->channel = 0; req->target = ccb->ccb_h.target_id; req->lun = ccb->ccb_h.target_lun; req->header.size = sizeof(struct hpt_iop_request_scsi_command) - sizeof(struct hpt_iopsg) + nsegs * sizeof(struct hpt_iopsg); if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREREAD); } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) bus_dmamap_sync(hba->io_dmat, srb->dma_map, BUS_DMASYNC_PREWRITE); req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT | IOP_REQUEST_FLAG_ADDR_BITS | ((req_phy >> 16) & 0xffff0000); req->header.context = ((req_phy & 0xffffffff) << 32 ) | srb->index << 4 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type; hba->u.mvfrey.inlist_wptr++; index = hba->u.mvfrey.inlist_wptr & 0x3fff; if (index == hba->u.mvfrey.list_count) { index = 0; hba->u.mvfrey.inlist_wptr &= ~0x3fff; hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; } hba->u.mvfrey.inlist[index].addr = req_phy; hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4; BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr); BUS_SPACE_RD4_MVFREY2(inbound_write_ptr); if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) { callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba); } } static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; union ccb *ccb = srb->ccb; struct hpt_iop_hba *hba = srb->hba; if (error || nsegs > hba->max_sg_count) { KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n", ccb->ccb_h.func_code, ccb->ccb_h.target_id, (uintmax_t)ccb->ccb_h.target_lun, nsegs)); ccb->ccb_h.status = CAM_BUSY; bus_dmamap_unload(hba->io_dmat, srb->dma_map); hptiop_free_srb(hba, srb); xpt_done(ccb); return; } hba->ops->post_req(hba, srb, segs, nsegs); } static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); } static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; char *p; u_int64_t phy; u_int32_t list_count = hba->u.mvfrey.list_count; phy = ((u_int64_t)segs->ds_addr + 0x1F) & ~(u_int64_t)0x1F; p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) & ~0x1F); hba->ctlcfgcmd_phy = phy; hba->ctlcfg_ptr = p; p += 0x800; phy += 0x800; hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; hba->u.mvfrey.inlist_phy = phy; p += list_count * sizeof(struct mvfrey_inlist_entry); phy += list_count * sizeof(struct mvfrey_inlist_entry); hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; hba->u.mvfrey.outlist_phy = phy; p += list_count * sizeof(struct mvfrey_outlist_entry); phy += list_count * sizeof(struct mvfrey_outlist_entry); hba->u.mvfrey.outlist_cptr = (u_int32_t *)p; hba->u.mvfrey.outlist_cptr_phy = phy; } static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; struct hpt_iop_srb *srb, *tmp_srb; int i; if (error || nsegs == 0) { device_printf(hba->pcidev, "hptiop_map_srb error"); return; } /* map srb */ srb = (struct hpt_iop_srb *) (((unsigned long)hba->uncached_ptr + 0x1F) & ~(unsigned long)0x1F); for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { tmp_srb = (struct hpt_iop_srb *) ((char *)srb + i * HPT_SRB_MAX_SIZE); if (((unsigned long)tmp_srb & 0x1F) == 0) { if (bus_dmamap_create(hba->io_dmat, 0, &tmp_srb->dma_map)) { device_printf(hba->pcidev, "dmamap create failed"); return; } bzero(tmp_srb, sizeof(struct hpt_iop_srb)); tmp_srb->hba = hba; tmp_srb->index = i; if (hba->ctlcfg_ptr == 0) {/*itl iop*/ tmp_srb->phy_addr = (u_int64_t)(u_int32_t) (phy_addr >> 5); if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) tmp_srb->srb_flag = HPT_SRB_FLAG_HIGH_MEM_ACESS; } else { tmp_srb->phy_addr = phy_addr; } callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0); hptiop_free_srb(hba, tmp_srb); hba->srb[i] = tmp_srb; phy_addr += HPT_SRB_MAX_SIZE; } else { device_printf(hba->pcidev, "invalid alignment"); return; } } } static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) { hba->msg_done = 1; } static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, int target_id) { struct cam_periph *periph = NULL; struct cam_path *path; int status, retval = 0; status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); if (status == CAM_REQ_CMP) { if ((periph = cam_periph_find(path, "da")) != NULL) { if (periph->refcount >= 1) { device_printf(hba->pcidev, "%d ," "target_id=0x%x," "refcount=%d", hba->pciunit, target_id, periph->refcount); retval = -1; } } xpt_free_path(path); } return retval; } static void hptiop_release_resource(struct hpt_iop_hba *hba) { int i; if (hba->ioctl_dev) destroy_dev(hba->ioctl_dev); if (hba->path) { struct ccb_setasync ccb; xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); ccb.ccb_h.func_code = XPT_SASYNC_CB; ccb.event_enable = 0; ccb.callback = hptiop_async; ccb.callback_arg = hba->sim; xpt_action((union ccb *)&ccb); xpt_free_path(hba->path); } if (hba->irq_handle) bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); if (hba->sim) { hptiop_lock_adapter(hba); xpt_bus_deregister(cam_sim_path(hba->sim)); cam_sim_free(hba->sim, TRUE); hptiop_unlock_adapter(hba); } if (hba->ctlcfg_dmat) { bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); bus_dmamem_free(hba->ctlcfg_dmat, hba->ctlcfg_ptr, hba->ctlcfg_dmamap); bus_dma_tag_destroy(hba->ctlcfg_dmat); } for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { struct hpt_iop_srb *srb = hba->srb[i]; if (srb->dma_map) bus_dmamap_destroy(hba->io_dmat, srb->dma_map); callout_drain(&srb->timeout); } if (hba->srb_dmat) { bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); bus_dma_tag_destroy(hba->srb_dmat); } if (hba->io_dmat) bus_dma_tag_destroy(hba->io_dmat); if (hba->parent_dmat) bus_dma_tag_destroy(hba->parent_dmat); if (hba->irq_res) bus_release_resource(hba->pcidev, SYS_RES_IRQ, 0, hba->irq_res); if (hba->bar0_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar0_rid, hba->bar0_res); if (hba->bar2_res) bus_release_resource(hba->pcidev, SYS_RES_MEMORY, hba->bar2_rid, hba->bar2_res); mtx_destroy(&hba->lock); } Index: user/ngie/release-pkg-fix-tests/sys/dev/ioat/ioat.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/ioat/ioat.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/ioat/ioat.c (revision 299055) @@ -1,2087 +1,2087 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ioat.h" #include "ioat_hw.h" #include "ioat_internal.h" #ifndef BUS_SPACE_MAXADDR_40BIT #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL #endif #define IOAT_INTR_TIMO (hz / 10) #define IOAT_REFLK (&ioat->submit_lock) static int ioat_probe(device_t device); static int ioat_attach(device_t device); static int ioat_detach(device_t device); static int ioat_setup_intr(struct ioat_softc *ioat); static int ioat_teardown_intr(struct ioat_softc *ioat); static int ioat3_attach(device_t device); static int ioat_start_channel(struct ioat_softc *ioat); static int ioat_map_pci_bar(struct ioat_softc *ioat); static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ioat_interrupt_handler(void *arg); static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); static int chanerr_to_errno(uint32_t); static void ioat_process_events(struct ioat_softc *ioat); static inline uint32_t ioat_get_active(struct ioat_softc *ioat); static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); static void ioat_free_ring(struct ioat_softc *, uint32_t size, struct ioat_descriptor **); static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc); static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, int mflags); static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index); static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, uint32_t size, boolean_t need_dscr, int mflags); static int ring_grow(struct ioat_softc *, uint32_t oldorder, struct ioat_descriptor **); static int ring_shrink(struct ioat_softc *, uint32_t oldorder, struct ioat_descriptor **); static void ioat_halted_debug(struct ioat_softc *, uint32_t); static void ioat_timer_callback(void *arg); static void dump_descriptor(void *hw_desc); static void ioat_submit_single(struct ioat_softc *ioat); static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error); static int ioat_reset_hw(struct ioat_softc *ioat); static void ioat_reset_hw_task(void *, int); static void ioat_setup_sysctl(device_t device); static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); static inline struct ioat_softc *ioat_get(struct ioat_softc *, enum ioat_ref_kind); static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); static inline void _ioat_putn(struct ioat_softc *, uint32_t, enum ioat_ref_kind, boolean_t); static inline void ioat_putn(struct ioat_softc *, uint32_t, enum ioat_ref_kind); static inline void ioat_putn_locked(struct ioat_softc *, uint32_t, enum ioat_ref_kind); static void ioat_drain_locked(struct ioat_softc *); #define ioat_log_message(v, ...) do { \ if ((v) <= g_ioat_debug_level) { \ device_printf(ioat->device, __VA_ARGS__); \ } \ } while (0) MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); static int g_force_legacy_interrupts; SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); int g_ioat_debug_level = 0; SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); /* * OS <-> Driver interface structures */ static device_method_t ioat_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ioat_probe), DEVMETHOD(device_attach, ioat_attach), DEVMETHOD(device_detach, ioat_detach), - { 0, 0 } + DEVMETHOD_END }; static driver_t ioat_pci_driver = { "ioat", ioat_pci_methods, sizeof(struct ioat_softc), }; static devclass_t ioat_devclass; DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); MODULE_VERSION(ioat, 1); /* * Private data structures */ static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; static int ioat_channel_index = 0; SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, "Number of IOAT channels attached"); static struct _pcsid { u_int32_t type; const char *desc; } pci_ids[] = { { 0x34308086, "TBG IOAT Ch0" }, { 0x34318086, "TBG IOAT Ch1" }, { 0x34328086, "TBG IOAT Ch2" }, { 0x34338086, "TBG IOAT Ch3" }, { 0x34298086, "TBG IOAT Ch4" }, { 0x342a8086, "TBG IOAT Ch5" }, { 0x342b8086, "TBG IOAT Ch6" }, { 0x342c8086, "TBG IOAT Ch7" }, { 0x37108086, "JSF IOAT Ch0" }, { 0x37118086, "JSF IOAT Ch1" }, { 0x37128086, "JSF IOAT Ch2" }, { 0x37138086, "JSF IOAT Ch3" }, { 0x37148086, "JSF IOAT Ch4" }, { 0x37158086, "JSF IOAT Ch5" }, { 0x37168086, "JSF IOAT Ch6" }, { 0x37178086, "JSF IOAT Ch7" }, { 0x37188086, "JSF IOAT Ch0 (RAID)" }, { 0x37198086, "JSF IOAT Ch1 (RAID)" }, { 0x3c208086, "SNB IOAT Ch0" }, { 0x3c218086, "SNB IOAT Ch1" }, { 0x3c228086, "SNB IOAT Ch2" }, { 0x3c238086, "SNB IOAT Ch3" }, { 0x3c248086, "SNB IOAT Ch4" }, { 0x3c258086, "SNB IOAT Ch5" }, { 0x3c268086, "SNB IOAT Ch6" }, { 0x3c278086, "SNB IOAT Ch7" }, { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, { 0x0e208086, "IVB IOAT Ch0" }, { 0x0e218086, "IVB IOAT Ch1" }, { 0x0e228086, "IVB IOAT Ch2" }, { 0x0e238086, "IVB IOAT Ch3" }, { 0x0e248086, "IVB IOAT Ch4" }, { 0x0e258086, "IVB IOAT Ch5" }, { 0x0e268086, "IVB IOAT Ch6" }, { 0x0e278086, "IVB IOAT Ch7" }, { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, { 0x2f208086, "HSW IOAT Ch0" }, { 0x2f218086, "HSW IOAT Ch1" }, { 0x2f228086, "HSW IOAT Ch2" }, { 0x2f238086, "HSW IOAT Ch3" }, { 0x2f248086, "HSW IOAT Ch4" }, { 0x2f258086, "HSW IOAT Ch5" }, { 0x2f268086, "HSW IOAT Ch6" }, { 0x2f278086, "HSW IOAT Ch7" }, { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, { 0x0c508086, "BWD IOAT Ch0" }, { 0x0c518086, "BWD IOAT Ch1" }, { 0x0c528086, "BWD IOAT Ch2" }, { 0x0c538086, "BWD IOAT Ch3" }, { 0x6f508086, "BDXDE IOAT Ch0" }, { 0x6f518086, "BDXDE IOAT Ch1" }, { 0x6f528086, "BDXDE IOAT Ch2" }, { 0x6f538086, "BDXDE IOAT Ch3" }, { 0x6f208086, "BDX IOAT Ch0" }, { 0x6f218086, "BDX IOAT Ch1" }, { 0x6f228086, "BDX IOAT Ch2" }, { 0x6f238086, "BDX IOAT Ch3" }, { 0x6f248086, "BDX IOAT Ch4" }, { 0x6f258086, "BDX IOAT Ch5" }, { 0x6f268086, "BDX IOAT Ch6" }, { 0x6f278086, "BDX IOAT Ch7" }, { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" }, { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" }, { 0x00000000, NULL } }; /* * OS <-> Driver linkage functions */ static int ioat_probe(device_t device) { struct _pcsid *ep; u_int32_t type; type = pci_get_devid(device); for (ep = pci_ids; ep->type; ep++) { if (ep->type == type) { device_set_desc(device, ep->desc); return (0); } } return (ENXIO); } static int ioat_attach(device_t device) { struct ioat_softc *ioat; int error; ioat = DEVICE2SOFTC(device); ioat->device = device; error = ioat_map_pci_bar(ioat); if (error != 0) goto err; ioat->version = ioat_read_cbver(ioat); if (ioat->version < IOAT_VER_3_0) { error = ENODEV; goto err; } error = ioat3_attach(device); if (error != 0) goto err; error = pci_enable_busmaster(device); if (error != 0) goto err; error = ioat_setup_intr(ioat); if (error != 0) goto err; error = ioat_reset_hw(ioat); if (error != 0) goto err; ioat_process_events(ioat); ioat_setup_sysctl(device); ioat->chan_idx = ioat_channel_index; ioat_channel[ioat_channel_index++] = ioat; ioat_test_attach(); err: if (error != 0) ioat_detach(device); return (error); } static int ioat_detach(device_t device) { struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ioat_test_detach(); taskqueue_drain(taskqueue_thread, &ioat->reset_task); mtx_lock(IOAT_REFLK); ioat->quiescing = TRUE; ioat->destroying = TRUE; wakeup(&ioat->quiescing); ioat_channel[ioat->chan_idx] = NULL; ioat_drain_locked(ioat); mtx_unlock(IOAT_REFLK); ioat_teardown_intr(ioat); callout_drain(&ioat->timer); pci_disable_busmaster(device); if (ioat->pci_resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, ioat->pci_resource_id, ioat->pci_resource); if (ioat->ring != NULL) ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); if (ioat->comp_update != NULL) { bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, ioat->comp_update_map); bus_dma_tag_destroy(ioat->comp_update_tag); } bus_dma_tag_destroy(ioat->hw_desc_tag); return (0); } static int ioat_teardown_intr(struct ioat_softc *ioat) { if (ioat->tag != NULL) bus_teardown_intr(ioat->device, ioat->res, ioat->tag); if (ioat->res != NULL) bus_release_resource(ioat->device, SYS_RES_IRQ, rman_get_rid(ioat->res), ioat->res); pci_release_msi(ioat->device); return (0); } static int ioat_start_channel(struct ioat_softc *ioat) { uint64_t status; uint32_t chanerr; int i; ioat_acquire(&ioat->dmaengine); ioat_null(&ioat->dmaengine, NULL, NULL, 0); ioat_release(&ioat->dmaengine); for (i = 0; i < 100; i++) { DELAY(1); status = ioat_get_chansts(ioat); if (is_ioat_idle(status)) return (0); } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_log_message(0, "could not start channel: " "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr, IOAT_CHANERR_STR); return (ENXIO); } /* * Initialize Hardware */ static int ioat3_attach(device_t device) { struct ioat_softc *ioat; struct ioat_descriptor **ring; struct ioat_descriptor *next; struct ioat_dma_hw_descriptor *dma_hw_desc; int i, num_descriptors; int error; uint8_t xfercap; error = 0; ioat = DEVICE2SOFTC(device); ioat->capabilities = ioat_read_dmacapability(ioat); ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities, IOAT_DMACAP_STR); xfercap = ioat_read_xfercap(ioat); ioat->max_xfer_size = 1 << xfercap; ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_SUPPORTED) != 0; if (ioat->intrdelay_supported) ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK; /* TODO: need to check DCA here if we ever do XOR/PQ */ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF); callout_init(&ioat->timer, 1); TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat); /* Establish lock order for Witness */ mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); mtx_unlock(&ioat->cleanup_lock); mtx_unlock(&ioat->submit_lock); ioat->is_resize_pending = FALSE; ioat->is_completion_pending = FALSE; ioat->is_reset_pending = FALSE; ioat->is_channel_running = FALSE; bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, &ioat->comp_update_tag); error = bus_dmamem_alloc(ioat->comp_update_tag, (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); if (ioat->comp_update == NULL) return (ENOMEM); error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 0); if (error != 0) return (error); ioat->ring_size_order = IOAT_MIN_ORDER; num_descriptors = 1 << ioat->ring_size_order; bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct ioat_dma_hw_descriptor), 1, sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, &ioat->hw_desc_tag); ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, M_ZERO | M_WAITOK); if (ioat->ring == NULL) return (ENOMEM); ring = ioat->ring; for (i = 0; i < num_descriptors; i++) { ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); if (ring[i] == NULL) return (ENOMEM); ring[i]->id = i; } for (i = 0; i < num_descriptors - 1; i++) { next = ring[i + 1]; dma_hw_desc = ring[i]->u.dma; dma_hw_desc->next = next->hw_desc_bus_addr; } ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; ioat->head = ioat->hw_head = 0; ioat->tail = 0; ioat->last_seen = 0; return (0); } static int ioat_map_pci_bar(struct ioat_softc *ioat) { ioat->pci_resource_id = PCIR_BAR(0); ioat->pci_resource = bus_alloc_resource_any(ioat->device, SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); if (ioat->pci_resource == NULL) { ioat_log_message(0, "unable to allocate pci resource\n"); return (ENODEV); } ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); return (0); } static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { struct ioat_softc *ioat = arg; KASSERT(error == 0, ("%s: error:%d", __func__, error)); ioat->comp_update_bus_addr = seg[0].ds_addr; } static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; KASSERT(error == 0, ("%s: error:%d", __func__, error)); baddr = arg; *baddr = segs->ds_addr; } /* * Interrupt setup and handlers */ static int ioat_setup_intr(struct ioat_softc *ioat) { uint32_t num_vectors; int error; boolean_t use_msix; boolean_t force_legacy_interrupts; use_msix = FALSE; force_legacy_interrupts = FALSE; if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { num_vectors = 1; pci_alloc_msix(ioat->device, &num_vectors); if (num_vectors == 1) use_msix = TRUE; } if (use_msix) { ioat->rid = 1; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_ACTIVE); } else { ioat->rid = 0; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_SHAREABLE | RF_ACTIVE); } if (ioat->res == NULL) { ioat_log_message(0, "bus_alloc_resource failed\n"); return (ENOMEM); } ioat->tag = NULL; error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); if (error != 0) { ioat_log_message(0, "bus_setup_intr failed\n"); return (error); } ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); return (0); } static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat) { u_int32_t pciid; pciid = pci_get_devid(ioat->device); switch (pciid) { /* BWD: */ case 0x0c508086: case 0x0c518086: case 0x0c528086: case 0x0c538086: /* BDXDE: */ case 0x6f508086: case 0x6f518086: case 0x6f528086: case 0x6f538086: return (TRUE); } return (FALSE); } static void ioat_interrupt_handler(void *arg) { struct ioat_softc *ioat = arg; ioat->stats.interrupts++; ioat_process_events(ioat); } static int chanerr_to_errno(uint32_t chanerr) { if (chanerr == 0) return (0); if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0) return (EFAULT); if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0) return (EIO); /* This one is probably our fault: */ if ((chanerr & IOAT_CHANERR_NDADDERR) != 0) return (EIO); return (EIO); } static void ioat_process_events(struct ioat_softc *ioat) { struct ioat_descriptor *desc; struct bus_dmadesc *dmadesc; uint64_t comp_update, status; uint32_t completed, chanerr; int error; mtx_lock(&ioat->cleanup_lock); completed = 0; comp_update = *ioat->comp_update; status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; CTR0(KTR_IOAT, __func__); if (status == ioat->last_seen) { /* * If we landed in process_events and nothing has been * completed, check for a timeout due to channel halt. */ comp_update = ioat_get_chansts(ioat); goto out; } while (1) { desc = ioat_get_ring_entry(ioat, ioat->tail); dmadesc = &desc->bus_dmadesc; CTR1(KTR_IOAT, "completing desc %d", ioat->tail); if (dmadesc->callback_fn != NULL) dmadesc->callback_fn(dmadesc->callback_arg, 0); completed++; ioat->tail++; if (desc->hw_desc_bus_addr == status) break; } ioat->last_seen = desc->hw_desc_bus_addr; if (ioat->head == ioat->tail) { ioat->is_completion_pending = FALSE; callout_reset(&ioat->timer, IOAT_INTR_TIMO, ioat_timer_callback, ioat); } ioat->stats.descriptors_processed += completed; out: ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); mtx_unlock(&ioat->cleanup_lock); if (completed != 0) { ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); wakeup(&ioat->tail); } if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update)) return; ioat->stats.channel_halts++; /* * Fatal programming error on this DMA channel. Flush any outstanding * work with error status and restart the engine. */ ioat_log_message(0, "Channel halted due to fatal programming error\n"); mtx_lock(&ioat->submit_lock); mtx_lock(&ioat->cleanup_lock); ioat->quiescing = TRUE; chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_halted_debug(ioat, chanerr); ioat->stats.last_halt_chanerr = chanerr; while (ioat_get_active(ioat) > 0) { desc = ioat_get_ring_entry(ioat, ioat->tail); dmadesc = &desc->bus_dmadesc; CTR1(KTR_IOAT, "completing err desc %d", ioat->tail); if (dmadesc->callback_fn != NULL) dmadesc->callback_fn(dmadesc->callback_arg, chanerr_to_errno(chanerr)); ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF); ioat->tail++; ioat->stats.descriptors_processed++; ioat->stats.descriptors_error++; } /* Clear error status */ ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); mtx_unlock(&ioat->cleanup_lock); mtx_unlock(&ioat->submit_lock); ioat_log_message(0, "Resetting channel to recover from error\n"); error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task); KASSERT(error == 0, ("%s: taskqueue_enqueue failed: %d", __func__, error)); } static void ioat_reset_hw_task(void *ctx, int pending __unused) { struct ioat_softc *ioat; int error; ioat = ctx; ioat_log_message(1, "%s: Resetting channel\n", __func__); error = ioat_reset_hw(ioat); KASSERT(error == 0, ("%s: reset failed: %d", __func__, error)); (void)error; } /* * User API functions */ bus_dmaengine_t ioat_get_dmaengine(uint32_t index, int flags) { struct ioat_softc *ioat; KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0, ("invalid flags: 0x%08x", flags)); KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK), ("invalid wait | nowait")); if (index >= ioat_channel_index) return (NULL); ioat = ioat_channel[index]; if (ioat == NULL || ioat->destroying) return (NULL); if (ioat->quiescing) { if ((flags & M_NOWAIT) != 0) return (NULL); mtx_lock(IOAT_REFLK); while (ioat->quiescing && !ioat->destroying) msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0); mtx_unlock(IOAT_REFLK); if (ioat->destroying) return (NULL); } /* * There's a race here between the quiescing check and HW reset or * module destroy. */ return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine); } void ioat_put_dmaengine(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); ioat_put(ioat, IOAT_DMAENGINE_REF); } int ioat_get_hwversion(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->version); } size_t ioat_get_max_io_size(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->max_xfer_size); } int ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); if (!ioat->intrdelay_supported) return (ENODEV); if (delay > ioat->intrdelay_max) return (ERANGE); ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay); ioat->cached_intrdelay = ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK; return (0); } uint16_t ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); return (ioat->intrdelay_max); } void ioat_acquire(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); mtx_lock(&ioat->submit_lock); CTR0(KTR_IOAT, __func__); } int ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags) { struct ioat_softc *ioat; int error; ioat = to_ioat_softc(dmaengine); ioat_acquire(dmaengine); error = ioat_reserve_space(ioat, n, mflags); if (error != 0) ioat_release(dmaengine); return (error); } void ioat_release(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR0(KTR_IOAT, __func__); ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); mtx_unlock(&ioat->submit_lock); } static struct ioat_descriptor * ioat_op_generic(struct ioat_softc *ioat, uint8_t op, uint32_t size, uint64_t src, uint64_t dst, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_generic_hw_descriptor *hw_desc; struct ioat_descriptor *desc; int mflags; mtx_assert(&ioat->submit_lock, MA_OWNED); KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0, ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS)); if ((flags & DMA_NO_WAIT) != 0) mflags = M_NOWAIT; else mflags = M_WAITOK; if (size > ioat->max_xfer_size) { ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n", __func__, ioat->max_xfer_size, (unsigned)size); return (NULL); } if (ioat_reserve_space(ioat, 1, mflags) != 0) return (NULL); desc = ioat_get_ring_entry(ioat, ioat->head); hw_desc = desc->u.generic; hw_desc->u.control_raw = 0; hw_desc->u.control_generic.op = op; hw_desc->u.control_generic.completion_update = 1; if ((flags & DMA_INT_EN) != 0) hw_desc->u.control_generic.int_enable = 1; if ((flags & DMA_FENCE) != 0) hw_desc->u.control_generic.fence = 1; hw_desc->size = size; hw_desc->src_addr = src; hw_desc->dest_addr = dst; desc->bus_dmadesc.callback_fn = callback_fn; desc->bus_dmadesc.callback_arg = callback_arg; return (desc); } struct bus_dmadesc * ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; hw_desc->u.control.null = 1; ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); if (((src | dst) & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1, bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n", __func__); return (NULL); } if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) { ioat_log_message(0, "%s: Addresses must be page-aligned\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.dma; if (src2 != src1 + PAGE_SIZE) { hw_desc->u.control.src_page_break = 1; hw_desc->next_src_addr = src2; } if (dst2 != dst1 + PAGE_SIZE) { hw_desc->u.control.dest_page_break = 1; hw_desc->next_dest_addr = dst2; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_crc32_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; uint32_t teststore; uint8_t op; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) { ioat_log_message(0, "%s: Device lacks MOVECRC capability\n", __func__); return (NULL); } if (((src | dst) & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n", __func__); return (NULL); } teststore = (flags & _DMA_CRC_TESTSTORE); if (teststore == _DMA_CRC_TESTSTORE) { ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); return (NULL); } if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", __func__); return (NULL); } switch (teststore) { case DMA_CRC_STORE: op = IOAT_OP_MOVECRC_STORE; break; case DMA_CRC_TEST: op = IOAT_OP_MOVECRC_TEST; break; default: KASSERT(teststore == 0, ("bogus")); op = IOAT_OP_MOVECRC; break; } if ((flags & DMA_CRC_INLINE) == 0 && (crcptr & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of crcptr invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn, callback_arg, flags & ~_DMA_CRC_FLAGS); if (desc == NULL) return (NULL); hw_desc = desc->u.crc32; if ((flags & DMA_CRC_INLINE) == 0) hw_desc->crc_address = crcptr; else hw_desc->u.control.crc_location = 1; if (initialseed != NULL) { hw_desc->u.control.use_seed = 1; hw_desc->seed = *initialseed; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_crc32_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; uint32_t teststore; uint8_t op; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) { ioat_log_message(0, "%s: Device lacks CRC capability\n", __func__); return (NULL); } if ((src & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of src invalid\n", __func__); return (NULL); } teststore = (flags & _DMA_CRC_TESTSTORE); if (teststore == _DMA_CRC_TESTSTORE) { ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__); return (NULL); } if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) { ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n", __func__); return (NULL); } switch (teststore) { case DMA_CRC_STORE: op = IOAT_OP_CRC_STORE; break; case DMA_CRC_TEST: op = IOAT_OP_CRC_TEST; break; default: KASSERT(teststore == 0, ("bogus")); op = IOAT_OP_CRC; break; } if ((flags & DMA_CRC_INLINE) == 0 && (crcptr & (0xffffffull << 40)) != 0) { ioat_log_message(0, "%s: High 24 bits of crcptr invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn, callback_arg, flags & ~_DMA_CRC_FLAGS); if (desc == NULL) return (NULL); hw_desc = desc->u.crc32; if ((flags & DMA_CRC_INLINE) == 0) hw_desc->crc_address = crcptr; else hw_desc->u.control.crc_location = 1; if (initialseed != NULL) { hw_desc->u.control.use_seed = 1; hw_desc->seed = *initialseed; } if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_fill_hw_descriptor *hw_desc; struct ioat_descriptor *desc; struct ioat_softc *ioat; CTR0(KTR_IOAT, __func__); ioat = to_ioat_softc(dmaengine); if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) { ioat_log_message(0, "%s: Device lacks BFILL capability\n", __func__); return (NULL); } if ((dst & (0xffffull << 48)) != 0) { ioat_log_message(0, "%s: High 16 bits of dst invalid\n", __func__); return (NULL); } desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst, callback_fn, callback_arg, flags); if (desc == NULL) return (NULL); hw_desc = desc->u.fill; if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); ioat_submit_single(ioat); return (&desc->bus_dmadesc); } /* * Ring Management */ static inline uint32_t ioat_get_active(struct ioat_softc *ioat) { return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); } static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat) { return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); } static struct ioat_descriptor * ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) { struct ioat_generic_hw_descriptor *hw_desc; struct ioat_descriptor *desc; int error, busdmaflag; error = ENOMEM; hw_desc = NULL; if ((mflags & M_WAITOK) != 0) busdmaflag = BUS_DMA_WAITOK; else busdmaflag = BUS_DMA_NOWAIT; desc = malloc(sizeof(*desc), M_IOAT, mflags); if (desc == NULL) goto out; bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); if (hw_desc == NULL) goto out; memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc)); desc->u.generic = hw_desc; error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, busdmaflag); if (error) goto out; out: if (error) { ioat_free_ring_entry(ioat, desc); return (NULL); } return (desc); } static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) { if (desc == NULL) return; if (desc->u.generic) bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic, ioat->hw_desc_map); free(desc, M_IOAT); } /* * Reserves space in this IOAT descriptor ring by ensuring enough slots remain * for 'num_descs'. * * If mflags contains M_WAITOK, blocks until enough space is available. * * Returns zero on success, or an errno on error. If num_descs is beyond the * maximum ring size, returns EINVAl; if allocation would block and mflags * contains M_NOWAIT, returns EAGAIN. * * Must be called with the submit_lock held; returns with the lock held. The * lock may be dropped to allocate the ring. * * (The submit_lock is needed to add any entries to the ring, so callers are * assured enough room is available.) */ static int ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) { struct ioat_descriptor **new_ring; uint32_t order; int error; mtx_assert(&ioat->submit_lock, MA_OWNED); error = 0; if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) { error = EINVAL; goto out; } if (ioat->quiescing) { error = ENXIO; goto out; } for (;;) { if (ioat_get_ring_space(ioat) >= num_descs) goto out; order = ioat->ring_size_order; if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { if ((mflags & M_WAITOK) != 0) { msleep(&ioat->tail, &ioat->submit_lock, 0, "ioat_rsz", 0); continue; } error = EAGAIN; break; } ioat->is_resize_pending = TRUE; for (;;) { mtx_unlock(&ioat->submit_lock); new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), TRUE, mflags); mtx_lock(&ioat->submit_lock); KASSERT(ioat->ring_size_order == order, ("is_resize_pending should protect order")); if (new_ring == NULL) { KASSERT((mflags & M_WAITOK) == 0, ("allocation failed")); error = EAGAIN; break; } error = ring_grow(ioat, order, new_ring); if (error == 0) break; } ioat->is_resize_pending = FALSE; wakeup(&ioat->tail); if (error) break; } out: mtx_assert(&ioat->submit_lock, MA_OWNED); return (error); } static struct ioat_descriptor ** ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, int mflags) { struct ioat_descriptor **ring; uint32_t i; int error; KASSERT(size > 0 && powerof2(size), ("bogus size")); ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); if (ring == NULL) return (NULL); if (need_dscr) { error = ENOMEM; for (i = size / 2; i < size; i++) { ring[i] = ioat_alloc_ring_entry(ioat, mflags); if (ring[i] == NULL) goto out; ring[i]->id = i; } } error = 0; out: if (error != 0 && ring != NULL) { ioat_free_ring(ioat, size, ring); ring = NULL; } return (ring); } static void ioat_free_ring(struct ioat_softc *ioat, uint32_t size, struct ioat_descriptor **ring) { uint32_t i; for (i = 0; i < size; i++) { if (ring[i] != NULL) ioat_free_ring_entry(ioat, ring[i]); } free(ring, M_IOAT); } static struct ioat_descriptor * ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) { return (ioat->ring[index % (1 << ioat->ring_size_order)]); } static int ring_grow(struct ioat_softc *ioat, uint32_t oldorder, struct ioat_descriptor **newring) { struct ioat_descriptor *tmp, *next; struct ioat_dma_hw_descriptor *hw; uint32_t oldsize, newsize, head, tail, i, end; int error; CTR0(KTR_IOAT, __func__); mtx_assert(&ioat->submit_lock, MA_OWNED); if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { error = EINVAL; goto out; } oldsize = (1 << oldorder); newsize = (1 << (oldorder + 1)); mtx_lock(&ioat->cleanup_lock); head = ioat->head & (oldsize - 1); tail = ioat->tail & (oldsize - 1); /* Copy old descriptors to new ring */ for (i = 0; i < oldsize; i++) newring[i] = ioat->ring[i]; /* * If head has wrapped but tail hasn't, we must swap some descriptors * around so that tail can increment directly to head. */ if (head < tail) { for (i = 0; i <= head; i++) { tmp = newring[oldsize + i]; newring[oldsize + i] = newring[i]; newring[oldsize + i]->id = oldsize + i; newring[i] = tmp; newring[i]->id = i; } head += oldsize; } KASSERT(head >= tail, ("invariants")); /* Head didn't wrap; we only need to link in oldsize..newsize */ if (head < oldsize) { i = oldsize - 1; end = newsize; } else { /* Head did wrap; link newhead..newsize and 0..oldhead */ i = head; end = newsize + (head - oldsize) + 1; } /* * Fix up hardware ring, being careful not to trample the active * section (tail -> head). */ for (; i < end; i++) { KASSERT((i & (newsize - 1)) < tail || (i & (newsize - 1)) >= head, ("trampling snake")); next = newring[(i + 1) & (newsize - 1)]; hw = newring[i & (newsize - 1)]->u.dma; hw->next = next->hw_desc_bus_addr; } free(ioat->ring, M_IOAT); ioat->ring = newring; ioat->ring_size_order = oldorder + 1; ioat->tail = tail; ioat->head = head; error = 0; mtx_unlock(&ioat->cleanup_lock); out: if (error) ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); return (error); } static int ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, struct ioat_descriptor **newring) { struct ioat_dma_hw_descriptor *hw; struct ioat_descriptor *ent, *next; uint32_t oldsize, newsize, current_idx, new_idx, i; int error; CTR0(KTR_IOAT, __func__); mtx_assert(&ioat->submit_lock, MA_OWNED); if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { error = EINVAL; goto out_unlocked; } oldsize = (1 << oldorder); newsize = (1 << (oldorder - 1)); mtx_lock(&ioat->cleanup_lock); /* Can't shrink below current active set! */ if (ioat_get_active(ioat) >= newsize) { error = ENOMEM; goto out; } /* * Copy current descriptors to the new ring, dropping the removed * descriptors. */ for (i = 0; i < newsize; i++) { current_idx = (ioat->tail + i) & (oldsize - 1); new_idx = (ioat->tail + i) & (newsize - 1); newring[new_idx] = ioat->ring[current_idx]; newring[new_idx]->id = new_idx; } /* Free deleted descriptors */ for (i = newsize; i < oldsize; i++) { ent = ioat_get_ring_entry(ioat, ioat->tail + i); ioat_free_ring_entry(ioat, ent); } /* Fix up hardware ring. */ hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; next = newring[(ioat->tail + newsize) & (newsize - 1)]; hw->next = next->hw_desc_bus_addr; free(ioat->ring, M_IOAT); ioat->ring = newring; ioat->ring_size_order = oldorder - 1; error = 0; out: mtx_unlock(&ioat->cleanup_lock); out_unlocked: if (error) ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); return (error); } static void ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) { struct ioat_descriptor *desc; ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr, IOAT_CHANERR_STR); if (chanerr == 0) return; mtx_assert(&ioat->cleanup_lock, MA_OWNED); desc = ioat_get_ring_entry(ioat, ioat->tail + 0); dump_descriptor(desc->u.raw); desc = ioat_get_ring_entry(ioat, ioat->tail + 1); dump_descriptor(desc->u.raw); } static void ioat_timer_callback(void *arg) { struct ioat_descriptor **newring; struct ioat_softc *ioat; uint32_t order; ioat = arg; ioat_log_message(1, "%s\n", __func__); if (ioat->is_completion_pending) { ioat_process_events(ioat); return; } /* Slowly scale the ring down if idle. */ mtx_lock(&ioat->submit_lock); order = ioat->ring_size_order; if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { mtx_unlock(&ioat->submit_lock); goto out; } ioat->is_resize_pending = TRUE; mtx_unlock(&ioat->submit_lock); newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, M_NOWAIT); mtx_lock(&ioat->submit_lock); KASSERT(ioat->ring_size_order == order, ("resize_pending protects order")); if (newring != NULL) ring_shrink(ioat, order, newring); ioat->is_resize_pending = FALSE; mtx_unlock(&ioat->submit_lock); out: if (ioat->ring_size_order > IOAT_MIN_ORDER) callout_reset(&ioat->timer, 10 * hz, ioat_timer_callback, ioat); } /* * Support Functions */ static void ioat_submit_single(struct ioat_softc *ioat) { ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); atomic_add_rel_int(&ioat->head, 1); atomic_add_rel_int(&ioat->hw_head, 1); if (!ioat->is_completion_pending) { ioat->is_completion_pending = TRUE; callout_reset(&ioat->timer, IOAT_INTR_TIMO, ioat_timer_callback, ioat); } ioat->stats.descriptors_submitted++; } static int ioat_reset_hw(struct ioat_softc *ioat) { uint64_t status; uint32_t chanerr; unsigned timeout; int error; mtx_lock(IOAT_REFLK); ioat->quiescing = TRUE; ioat_drain_locked(ioat); mtx_unlock(IOAT_REFLK); status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(ioat); /* Wait at most 20 ms */ for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && timeout < 20; timeout++) { DELAY(1000); status = ioat_get_chansts(ioat); } if (timeout == 20) { error = ETIMEDOUT; goto out; } KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); /* * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors * that can cause stability issues for IOAT v3. */ pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 4); chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); /* * BDXDE and BWD models reset MSI-X registers on device reset. * Save/restore their contents manually. */ if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets MSI-X registers; saving\n"); pci_save_state(ioat->device); } ioat_reset(ioat); /* Wait at most 20 ms */ for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) DELAY(1000); if (timeout == 20) { error = ETIMEDOUT; goto out; } if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets registers; restored\n"); pci_restore_state(ioat->device); } /* Reset attempts to return the hardware to "halted." */ status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) { /* So this really shouldn't happen... */ ioat_log_message(0, "Device is active after a reset?\n"); ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); error = 0; goto out; } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); if (chanerr != 0) { mtx_lock(&ioat->cleanup_lock); ioat_halted_debug(ioat, chanerr); mtx_unlock(&ioat->cleanup_lock); error = EIO; goto out; } /* * Bring device back online after reset. Writing CHAINADDR brings the * device back to active. * * The internal ring counter resets to zero, so we have to start over * at zero as well. */ ioat->tail = ioat->head = ioat->hw_head = 0; ioat->last_seen = 0; ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); error = 0; out: mtx_lock(IOAT_REFLK); ioat->quiescing = FALSE; wakeup(&ioat->quiescing); mtx_unlock(IOAT_REFLK); if (error == 0) error = ioat_start_channel(ioat); return (error); } static int sysctl_handle_chansts(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; struct sbuf sb; uint64_t status; int error; ioat = arg1; status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS; sbuf_new_for_sysctl(&sb, NULL, 256, req); switch (status) { case IOAT_CHANSTS_ACTIVE: sbuf_printf(&sb, "ACTIVE"); break; case IOAT_CHANSTS_IDLE: sbuf_printf(&sb, "IDLE"); break; case IOAT_CHANSTS_SUSPENDED: sbuf_printf(&sb, "SUSPENDED"); break; case IOAT_CHANSTS_HALTED: sbuf_printf(&sb, "HALTED"); break; case IOAT_CHANSTS_ARMED: sbuf_printf(&sb, "ARMED"); break; default: sbuf_printf(&sb, "UNKNOWN"); break; } error = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0 || req->newptr == NULL) return (error); return (EINVAL); } static int sysctl_handle_dpi(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; struct sbuf sb; #define PRECISION "1" const uintmax_t factor = 10; uintmax_t rate; int error; ioat = arg1; sbuf_new_for_sysctl(&sb, NULL, 16, req); if (ioat->stats.interrupts == 0) { sbuf_printf(&sb, "NaN"); goto out; } rate = ioat->stats.descriptors_processed * factor / ioat->stats.interrupts; sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor, rate % factor); #undef PRECISION out: error = sbuf_finish(&sb); sbuf_delete(&sb); if (error != 0 || req->newptr == NULL) return (error); return (EINVAL); } static int sysctl_handle_error(SYSCTL_HANDLER_ARGS) { struct ioat_descriptor *desc; struct ioat_softc *ioat; int error, arg; ioat = arg1; arg = 0; error = SYSCTL_OUT(req, &arg, sizeof(arg)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &arg, sizeof(arg)); if (error != 0) return (error); if (arg != 0) { ioat_acquire(&ioat->dmaengine); desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1, 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL, 0); if (desc == NULL) error = ENOMEM; else ioat_submit_single(ioat); ioat_release(&ioat->dmaengine); } return (error); } static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; int error, arg; ioat = arg1; arg = 0; error = SYSCTL_OUT(req, &arg, sizeof(arg)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &arg, sizeof(arg)); if (error != 0) return (error); if (arg != 0) error = ioat_reset_hw(ioat); return (error); } static void dump_descriptor(void *hw_desc) { int i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 8; j++) printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); printf("\n"); } } static void ioat_setup_sysctl(device_t device) { struct sysctl_oid_list *par, *statpar, *state, *hammer; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmp; struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ctx = device_get_sysctl_ctx(device); tree = device_get_sysctl_tree(device); par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, &ioat->version, 0, "HW version (0xMM form)"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, &ioat->max_xfer_size, 0, "HW maximum transfer size"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD, &ioat->intrdelay_supported, 0, "Is INTRDELAY supported"); SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD, &ioat->intrdelay_max, 0, "Maximum configurable INTRDELAY on this channel (microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL, "IOAT channel internal state"); state = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD, &ioat->ring_size_order, 0, "SW descriptor ring size order"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0, "SW descriptor head pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0, "SW descriptor tail pointer index"); SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD, &ioat->hw_head, 0, "HW DMACOUNT"); SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD, ioat->comp_update, "HW addr of last completion"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD, &ioat->is_resize_pending, 0, "resize pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending", CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD, &ioat->is_reset_pending, 0, "reset pending"); SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD, &ioat->is_channel_running, 0, "channel running"); SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A", "String of the channel status"); SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD, &ioat->cached_intrdelay, 0, "Current INTRDELAY on this channel (cached, microseconds)"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL, "Big hammers (mostly for testing)"); hammer = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", "Set to non-zero to reset the hardware"); SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I", "Set to non-zero to inject a recoverable hardware error"); tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL, "IOAT channel statistics"); statpar = SYSCTL_CHILDREN(tmp); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW, &ioat->stats.interrupts, "Number of interrupts processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW, &ioat->stats.descriptors_processed, "Number of descriptors processed on this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW, &ioat->stats.descriptors_submitted, "Number of descriptors submitted to this channel"); SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW, &ioat->stats.descriptors_error, "Number of descriptors failed by channel errors"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW, &ioat->stats.channel_halts, 0, "Number of times the channel has halted"); SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW, &ioat->stats.last_halt_chanerr, 0, "The raw CHANERR when the channel was last halted"); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt", CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A", "Descriptors per interrupt"); } static inline struct ioat_softc * ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); old = atomic_fetchadd_32(&ioat->refcnt, 1); KASSERT(old < UINT32_MAX, ("refcnt overflow")); #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); #endif return (ioat); } static inline void ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) { _ioat_putn(ioat, n, kind, FALSE); } static inline void ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) { _ioat_putn(ioat, n, kind, TRUE); } static inline void _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind, boolean_t locked) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); if (n == 0) return; #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); KASSERT(old >= n, ("refcnt kind underflow")); #endif /* Skip acquiring the lock if resulting refcnt > 0. */ for (;;) { old = ioat->refcnt; if (old <= n) break; if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) return; } if (locked) mtx_assert(IOAT_REFLK, MA_OWNED); else mtx_lock(IOAT_REFLK); old = atomic_fetchadd_32(&ioat->refcnt, -n); KASSERT(old >= n, ("refcnt error")); if (old == n) wakeup(IOAT_REFLK); if (!locked) mtx_unlock(IOAT_REFLK); } static inline void ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) { ioat_putn(ioat, 1, kind); } static void ioat_drain_locked(struct ioat_softc *ioat) { mtx_assert(IOAT_REFLK, MA_OWNED); while (ioat->refcnt > 0) msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); } Index: user/ngie/release-pkg-fix-tests/sys/dev/lmc/if_lmc.h =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/lmc/if_lmc.h (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/lmc/if_lmc.h (revision 299055) @@ -1,1325 +1,1325 @@ /* * $FreeBSD$ * * Copyright (c) 2002-2004 David Boggs. (boggs@boggs.palo-alto.ca.us) * All rights reserved. * * BSD License: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GNU General Public License: * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #ifndef IF_LMC_H #define IF_LMC_H #define DEVICE_NAME "lmc" /* Linux RPM-style version information */ #define DRIVER_MAJOR_VERSION 2005 /* year */ #define DRIVER_MINOR_VERSION 9 /* month */ #define DRIVER_SUB_VERSION 29 /* day */ /* netgraph stuff */ #define NG_LMC_NODE_TYPE DEVICE_NAME #define NGM_LMC_COOKIE 1128054761 /* date -u +'%s' */ /* Tulip PCI configuration registers */ #define TLP_CFID 0x00 /* 0: CFg ID register */ #define TLP_CFCS 0x04 /* 1: CFg Command/Status */ #define TLP_CFRV 0x08 /* 2: CFg ReVision */ #define TLP_CFLT 0x0C /* 3: CFg Latency Timer */ #define TLP_CBIO 0x10 /* 4: Cfg Base IO address */ #define TLP_CBMA 0x14 /* 5: Cfg Base Mem Addr */ #define TLP_CSID 0x2C /* 11: Cfg Subsys ID reg */ #define TLP_CFIT 0x3C /* 15: CFg InTerrupt */ #define TLP_CFDD 0x40 /* 16: CFg Driver Data */ #define TLP_CFID_TULIP 0x00091011 /* DEC 21140A Ethernet chip */ #define TLP_CFCS_MSTR_ABORT 0x20000000 #define TLP_CFCS_TARG_ABORT 0x10000000 #define TLP_CFCS_SYS_ERROR 0x00000100 #define TLP_CFCS_PAR_ERROR 0x00000040 #define TLP_CFCS_MWI_ENABLE 0x00000010 #define TLP_CFCS_BUS_MASTER 0x00000004 #define TLP_CFCS_MEM_ENABLE 0x00000002 #define TLP_CFCS_IO_ENABLE 0x00000001 #define TLP_CFLT_LATENCY 0x0000FF00 #define TLP_CFLT_CACHE 0x000000FF #define TLP_CSID_HSSI 0x00031376 /* LMC 5200 HSSI card */ #define TLP_CSID_T3 0x00041376 /* LMC 5245 T3 card */ #define TLP_CSID_SSI 0x00051376 /* LMC 1000 SSI card */ #define TLP_CSID_T1E1 0x00061376 /* LMC 1200 T1E1 card */ #define TLP_CSID_HSSIc 0x00071376 /* LMC 5200 HSSI cPCI */ #define TLP_CSID_SDSL 0x00081376 /* LMC 1168 SDSL card */ #define TLP_CFIT_MAX_LAT 0xFF000000 #define TLP_CFDD_SLEEP 0x80000000 #define TLP_CFDD_SNOOZE 0x40000000 /* Tulip Control and Status Registers */ #define TLP_CSR_STRIDE 8 /* 64 bits */ #define TLP_BUS_MODE 0 * TLP_CSR_STRIDE #define TLP_TX_POLL 1 * TLP_CSR_STRIDE #define TLP_RX_POLL 2 * TLP_CSR_STRIDE #define TLP_RX_LIST 3 * TLP_CSR_STRIDE #define TLP_TX_LIST 4 * TLP_CSR_STRIDE #define TLP_STATUS 5 * TLP_CSR_STRIDE #define TLP_OP_MODE 6 * TLP_CSR_STRIDE #define TLP_INT_ENBL 7 * TLP_CSR_STRIDE #define TLP_MISSED 8 * TLP_CSR_STRIDE #define TLP_SROM_MII 9 * TLP_CSR_STRIDE #define TLP_BIOS_ROM 10 * TLP_CSR_STRIDE #define TLP_TIMER 11 * TLP_CSR_STRIDE #define TLP_GPIO 12 * TLP_CSR_STRIDE #define TLP_CSR13 13 * TLP_CSR_STRIDE #define TLP_CSR14 14 * TLP_CSR_STRIDE #define TLP_WDOG 15 * TLP_CSR_STRIDE #define TLP_CSR_SIZE 128 /* IO bus space size */ /* CSR 0 - PCI Bus Mode Register */ #define TLP_BUS_WRITE_INVAL 0x01000000 /* DONT USE! */ #define TLP_BUS_READ_LINE 0x00800000 #define TLP_BUS_READ_MULT 0x00200000 #define TLP_BUS_DESC_BIGEND 0x00100000 #define TLP_BUS_TAP 0x000E0000 #define TLP_BUS_CAL 0x0000C000 #define TLP_BUS_PBL 0x00003F00 #define TLP_BUS_DATA_BIGEND 0x00000080 #define TLP_BUS_DSL 0x0000007C #define TLP_BUS_ARB 0x00000002 #define TLP_BUS_RESET 0x00000001 #define TLP_BUS_CAL_SHIFT 14 #define TLP_BUS_PBL_SHIFT 8 /* CSR 5 - Status Register */ #define TLP_STAT_FATAL_BITS 0x03800000 #define TLP_STAT_TX_FSM 0x00700000 #define TLP_STAT_RX_FSM 0x000E0000 #define TLP_STAT_FATAL_ERROR 0x00002000 #define TLP_STAT_TX_UNDERRUN 0x00000020 #define TLP_STAT_FATAL_SHIFT 23 /* CSR 6 - Operating Mode Register */ #define TLP_OP_RECEIVE_ALL 0x40000000 #define TLP_OP_MUST_BE_ONE 0x02000000 #define TLP_OP_NO_HEART_BEAT 0x00080000 #define TLP_OP_PORT_SELECT 0x00040000 #define TLP_OP_TX_THRESH 0x0000C000 #define TLP_OP_TX_RUN 0x00002000 #define TLP_OP_LOOP_MODE 0x00000C00 #define TLP_OP_EXT_LOOP 0x00000800 #define TLP_OP_INT_LOOP 0x00000400 #define TLP_OP_FULL_DUPLEX 0x00000200 #define TLP_OP_PROMISCUOUS 0x00000040 #define TLP_OP_PASS_BAD_PKT 0x00000008 #define TLP_OP_RX_RUN 0x00000002 #define TLP_OP_TR_SHIFT 14 #define TLP_OP_INIT (TLP_OP_PORT_SELECT | \ TLP_OP_FULL_DUPLEX | \ TLP_OP_MUST_BE_ONE | \ TLP_OP_NO_HEART_BEAT | \ TLP_OP_RECEIVE_ALL | \ TLP_OP_PROMISCUOUS | \ TLP_OP_PASS_BAD_PKT | \ TLP_OP_RX_RUN | \ TLP_OP_TX_RUN) /* CSR 7 - Interrupt Enable Register */ #define TLP_INT_NORMAL_INTR 0x00010000 #define TLP_INT_ABNRML_INTR 0x00008000 #define TLP_INT_FATAL_ERROR 0x00002000 #define TLP_INT_RX_NO_BUFS 0x00000080 #define TLP_INT_RX_INTR 0x00000040 #define TLP_INT_TX_UNDERRUN 0x00000020 #define TLP_INT_TX_INTR 0x00000001 #define TLP_INT_DISABLE 0 #define TLP_INT_TX (TLP_INT_NORMAL_INTR | \ TLP_INT_ABNRML_INTR | \ TLP_INT_FATAL_ERROR | \ TLP_INT_TX_UNDERRUN | \ TLP_INT_TX_INTR) #define TLP_INT_RX (TLP_INT_NORMAL_INTR | \ TLP_INT_ABNRML_INTR | \ TLP_INT_FATAL_ERROR | \ TLP_INT_RX_NO_BUFS | \ TLP_INT_RX_INTR) #define TLP_INT_TXRX (TLP_INT_TX | TLP_INT_RX) /* CSR 8 - RX Missed Frames & Overrun Register */ #define TLP_MISS_OCO 0x10000000 #define TLP_MISS_OVERRUN 0x0FFE0000 #define TLP_MISS_MFO 0x00010000 #define TLP_MISS_MISSED 0x0000FFFF #define TLP_OVERRUN_SHIFT 17 /* CSR 9 - SROM & MII & Boot ROM Register */ #define TLP_MII_MDIN 0x00080000 #define TLP_MII_MDOE 0x00040000 #define TLP_MII_MDOUT 0x00020000 #define TLP_MII_MDC 0x00010000 #define TLP_BIOS_RD 0x00004000 #define TLP_BIOS_WR 0x00002000 #define TLP_BIOS_SEL 0x00001000 #define TLP_SROM_RD 0x00004000 #define TLP_SROM_SEL 0x00000800 #define TLP_SROM_DOUT 0x00000008 #define TLP_SROM_DIN 0x00000004 #define TLP_SROM_CLK 0x00000002 #define TLP_SROM_CS 0x00000001 /* CSR 12 - General Purpose IO register */ #define TLP_GPIO_DIR 0x00000100 /* CSR 15 - Watchdog Timer Register */ #define TLP_WDOG_RX_OFF 0x00000010 #define TLP_WDOG_TX_OFF 0x00000001 #define TLP_WDOG_INIT (TLP_WDOG_TX_OFF | \ TLP_WDOG_RX_OFF) /* GPIO bits common to all cards */ #define GPIO_INIT 0x01 /* from Xilinx */ #define GPIO_RESET 0x02 /* to Xilinx */ /* bits 2 and 3 vary with board type -- see below */ #define GPIO_MODE 0x10 /* to Xilinx */ #define GPIO_DP 0x20 /* to/from Xilinx */ #define GPIO_DATA 0x40 /* serial data */ #define GPIO_CLK 0x80 /* serial clock */ /* HSSI GPIO bits */ #define GPIO_HSSI_ST 0x04 /* send timing sense (deprecated) */ #define GPIO_HSSI_TXCLK 0x08 /* clock source */ /* HSSIc GPIO bits */ #define GPIO_HSSI_SYNTH 0x04 /* Synth osc chip select */ #define GPIO_HSSI_DCE 0x08 /* provide clock on TXCLOCK output */ /* T3 GPIO bits */ #define GPIO_T3_DAC 0x04 /* DAC chip select */ #define GPIO_T3_INTEN 0x08 /* Framer Interrupt enable */ /* SSI GPIO bits */ #define GPIO_SSI_SYNTH 0x04 /* Synth osc chip select */ #define GPIO_SSI_DCE 0x08 /* provide clock on TXCLOCK output */ /* T1E1 GPIO bits */ #define GPIO_T1_INTEN 0x08 /* Framer Interrupt enable */ /* MII register 16 bits common to all cards */ /* NB: LEDs for HSSI & SSI are in DIFFERENT bits than for T1E1 & T3; oops */ /* NB: CRC32 for HSSI & SSI is in DIFFERENT bit than for T1E1 & T3; oops */ #define MII16_LED_ALL 0x0780 /* RW: LED bit mask */ #define MII16_FIFO 0x0800 /* RW: 1=reset, 0=not reset */ /* MII register 16 bits for HSSI */ #define MII16_HSSI_TA 0x0001 /* RW: host ready; host->modem */ #define MII16_HSSI_CA 0x0002 /* RO: modem ready; modem->host */ #define MII16_HSSI_LA 0x0004 /* RW: loopback A; host->modem */ #define MII16_HSSI_LB 0x0008 /* RW: loopback B; host->modem */ #define MII16_HSSI_LC 0x0010 /* RO: loopback C; modem->host */ #define MII16_HSSI_TM 0x0020 /* RO: test mode; modem->host */ #define MII16_HSSI_CRC32 0x0040 /* RW: CRC length 16/32 */ #define MII16_HSSI_LED_LL 0x0080 /* RW: lower left - green */ #define MII16_HSSI_LED_LR 0x0100 /* RW: lower right - green */ #define MII16_HSSI_LED_UL 0x0200 /* RW: upper left - green */ #define MII16_HSSI_LED_UR 0x0400 /* RW: upper right - red */ #define MII16_HSSI_FIFO 0x0800 /* RW: reset fifos */ #define MII16_HSSI_FORCECA 0x1000 /* RW: [cPCI] force CA on */ #define MII16_HSSI_CLKMUX 0x6000 /* RW: [cPCI] TX clock selection */ #define MII16_HSSI_LOOP 0x8000 /* RW: [cPCI] LOOP TX into RX */ #define MII16_HSSI_MODEM 0x003F /* TA+CA+LA+LB+LC+TM */ /* MII register 16 bits for DS3 */ #define MII16_DS3_ZERO 0x0001 /* RW: short/long cables */ #define MII16_DS3_TRLBK 0x0002 /* RW: loop towards host */ #define MII16_DS3_LNLBK 0x0004 /* RW: loop towards net */ #define MII16_DS3_RAIS 0x0008 /* RO: LIU receive AIS (depr) */ #define MII16_DS3_TAIS 0x0010 /* RW: LIU transmit AIS (depr) */ #define MII16_DS3_BIST 0x0020 /* RO: LIU QRSS patt match (depr) */ #define MII16_DS3_DLOS 0x0040 /* RO: LIU Digital LOS (depr) */ #define MII16_DS3_LED_BLU 0x0080 /* RW: lower right - blue */ #define MII16_DS3_LED_YEL 0x0100 /* RW: lower left - yellow */ #define MII16_DS3_LED_RED 0x0200 /* RW: upper right - red */ #define MII16_DS3_LED_GRN 0x0400 /* RW: upper left - green */ #define MII16_DS3_FIFO 0x0800 /* RW: reset fifos */ #define MII16_DS3_CRC32 0x1000 /* RW: CRC length 16/32 */ #define MII16_DS3_SCRAM 0x2000 /* RW: payload scrambler */ #define MII16_DS3_POLY 0x4000 /* RW: 1=Larse, 0=DigLink|Kentrox */ #define MII16_DS3_FRAME 0x8000 /* RW: 1=stop txframe pulses */ /* MII register 16 bits for SSI */ #define MII16_SSI_DTR 0x0001 /* RW: DTR host->modem */ #define MII16_SSI_DSR 0x0002 /* RO: DSR modem->host */ #define MII16_SSI_RTS 0x0004 /* RW: RTS host->modem */ #define MII16_SSI_CTS 0x0008 /* RO: CTS modem->host */ #define MII16_SSI_DCD 0x0010 /* RW: DCD modem<->host */ #define MII16_SSI_RI 0x0020 /* RO: RI modem->host */ #define MII16_SSI_CRC32 0x0040 /* RW: CRC length 16/32 */ #define MII16_SSI_LED_LL 0x0080 /* RW: lower left - green */ #define MII16_SSI_LED_LR 0x0100 /* RW: lower right - green */ #define MII16_SSI_LED_UL 0x0200 /* RW: upper left - green */ #define MII16_SSI_LED_UR 0x0400 /* RW: upper right - red */ #define MII16_SSI_FIFO 0x0800 /* RW: reset fifos */ #define MII16_SSI_LL 0x1000 /* RW: LL: host->modem */ #define MII16_SSI_RL 0x2000 /* RW: RL: host->modem */ #define MII16_SSI_TM 0x4000 /* RO: TM: modem->host */ #define MII16_SSI_LOOP 0x8000 /* RW: Loop at ext conn */ #define MII16_SSI_MODEM 0x703F /* DTR+DSR+RTS+CTS+DCD+RI+LL+RL+TM */ /* Mii register 17 has the SSI cable bits */ #define MII17_SSI_CABLE_SHIFT 3 /* shift to get cable type */ #define MII17_SSI_CABLE_MASK 0x0038 /* RO: mask to get cable type */ #define MII17_SSI_PRESCALE 0x0040 /* RW: divide by: 0=16; 1=512 */ #define MII17_SSI_ITF 0x0100 /* RW: fill with: 0=flags; 1=ones */ #define MII17_SSI_NRZI 0x0400 /* RW: coding: NRZ= 0; NRZI=1 */ /* MII register 16 bits for T1/E1 */ #define MII16_T1_UNUSED1 0x0001 #define MII16_T1_INVERT 0x0002 /* RW: invert data (for SF/AMI) */ #define MII16_T1_XOE 0x0004 /* RW: TX Output Enable; 0=disable */ #define MII16_T1_RST 0x0008 /* RW: Bt8370 chip reset */ #define MII16_T1_Z 0x0010 /* RW: output impedance T1=1 E1=0 */ #define MII16_T1_INTR 0x0020 /* RO: interrupt from Bt8370 */ #define MII16_T1_ONESEC 0x0040 /* RO: one second square wave */ #define MII16_T1_LED_BLU 0x0080 /* RW: lower right - blue */ #define MII16_T1_LED_YEL 0x0100 /* RW: lower left - yellow */ #define MII16_T1_LED_RED 0x0200 /* RW: upper right - red */ #define MII16_T1_LED_GRN 0x0400 /* RW: upper left - green */ #define MII16_T1_FIFO 0x0800 /* RW: reset fifos */ #define MII16_T1_CRC32 0x1000 /* RW: CRC length 16/32 */ #define MII16_T1_UNUSED2 0xE000 /* T3 framer: RW=Read/Write; RO=Read-Only; RC=Read/Clr; WO=Write-Only */ #define T3CSR_STAT0 0x00 /* RO: real-time status */ #define T3CSR_CTL1 0x01 /* RW: global control bits */ #define T3CSR_FEBE 0x02 /* RC: Far End Block Error Counter */ #define T3CSR_CERR 0x03 /* RC: C-bit Parity Error Counter */ #define T3CSR_PERR 0x04 /* RC: P-bit Parity Error Counter */ #define T3CSR_TX_FEAC 0x05 /* RW: Far End Alarm & Control */ #define T3CSR_RX_FEAC 0x06 /* RO: Far End Alarm & Control */ #define T3CSR_STAT7 0x07 /* RL: latched real-time status */ #define T3CSR_CTL8 0x08 /* RW: extended global ctl bits */ #define T3CSR_STAT9 0x09 /* RL: extended status bits */ #define T3CSR_FERR 0x0A /* RC: F-bit Error Counter */ #define T3CSR_MERR 0x0B /* RC: M-bit Error Counter */ #define T3CSR_CTL12 0x0C /* RW: more extended ctl bits */ #define T3CSR_DBL_FEAC 0x0D /* RW: TX double FEAC */ #define T3CSR_CTL14 0x0E /* RW: even more extended ctl bits */ #define T3CSR_FEAC_STK 0x0F /* RO: RX FEAC stack */ #define T3CSR_STAT16 0x10 /* RL: extended latched status */ #define T3CSR_INTEN 0x11 /* RW: interrupt enable */ #define T3CSR_CVLO 0x12 /* RC: coding violation cntr LSB */ #define T3CSR_CVHI 0x13 /* RC: coding violation cntr MSB */ #define T3CSR_CTL20 0x14 /* RW: yet more extended ctl bits */ #define CTL1_XTX 0x01 /* Transmit X-bit value */ #define CTL1_3LOOP 0x02 /* framer loop back */ #define CTL1_SER 0x04 /* SERial interface selected */ #define CTL1_M13MODE 0x08 /* M13 frame format */ #define CTL1_TXIDL 0x10 /* Transmit Idle signal */ #define CTL1_ENAIS 0x20 /* Enable AIS upon LOS */ #define CTL1_TXAIS 0x40 /* Transmit Alarm Indication Sig */ #define CTL1_NOFEBE 0x80 /* No Far End Block Errors */ #define CTL5_EMODE 0x80 /* rev B Extended features enabled */ #define CTL5_START 0x40 /* transmit the FEAC msg now */ #define CTL8_FBEC 0x80 /* F-Bit Error Count control */ #define CTL8_TBLU 0x20 /* Transmit Blue signal */ #define STAT9_SEF 0x80 /* Severely Errored Frame */ #define STAT9_RBLU 0x20 /* Receive Blue signal */ #define CTL12_RTPLLEN 0x80 /* Rx-to-Tx Payload Lpbk Lock ENbl */ #define CTL12_RTPLOOP 0x40 /* Rx-to-Tx Payload Loopback */ #define CTL12_DLCB1 0x08 /* Data Link C-Bits forced to 1 */ #define CTL12_C21 0x04 /* C2 forced to 1 */ #define CTL12_MCB1 0x02 /* Most C-Bits forced to 1 */ #define CTL13_DFEXEC 0x40 /* Execute Double FEAC */ #define CTL14_FEAC10 0x80 /* Transmit FEAC word 10 times */ #define CTL14_RGCEN 0x20 /* Receive Gapped Clock Out Enbl */ #define CTL14_TGCEN 0x10 /* Timing Gen Gapped Clk Out Enbl */ #define FEAC_STK_MORE 0x80 /* FEAC stack has more FEACs */ #define FEAC_STK_VALID 0x40 /* FEAC stack is valid */ #define FEAC_STK_FEAC 0x3F /* FEAC stack FEAC data */ #define STAT16_XERR 0x01 /* X-bit Error */ #define STAT16_SEF 0x02 /* Severely Errored Frame */ #define STAT16_RTLOC 0x04 /* Rx/Tx Loss Of Clock */ #define STAT16_FEAC 0x08 /* new FEAC msg */ #define STAT16_RIDL 0x10 /* channel IDLe signal */ #define STAT16_RAIS 0x20 /* Alarm Indication Signal */ #define STAT16_ROOF 0x40 /* Out Of Frame sync */ #define STAT16_RLOS 0x80 /* Loss Of Signal */ #define CTL20_CVEN 0x01 /* Coding Violation Counter Enbl */ /* T1.107 Bit Oriented C-Bit Parity Far End Alarm Control and Status codes */ #define T3BOP_OOF 0x00 /* Yellow alarm status */ #define T3BOP_LINE_UP 0x07 /* line loopback activate */ #define T3BOP_LINE_DOWN 0x1C /* line loopback deactivate */ #define T3BOP_LOOP_DS3 0x1B /* loopback full DS3 */ #define T3BOP_IDLE 0x1A /* IDLE alarm status */ #define T3BOP_AIS 0x16 /* AIS alarm status */ #define T3BOP_LOS 0x0E /* LOS alarm status */ /* T1E1 regs; RW=Read/Write; RO=Read-Only; RC=Read/Clr; WO=Write-Only */ #define Bt8370_DID 0x000 /* RO: Device ID */ #define Bt8370_CR0 0x001 /* RW; Primary Control Register */ #define Bt8370_JAT_CR 0x002 /* RW: Jitter Attenuator CR */ #define Bt8370_IRR 0x003 /* RO: Interrupt Request Reg */ #define Bt8370_ISR7 0x004 /* RC: Alarm 1 Interrupt Status */ #define Bt8370_ISR6 0x005 /* RC: Alarm 2 Interrupt Status */ #define Bt8370_ISR5 0x006 /* RC: Error Interrupt Status */ #define Bt8370_ISR4 0x007 /* RC; Cntr Ovfl Interrupt Status */ #define Bt8370_ISR3 0x008 /* RC: Timer Interrupt Status */ #define Bt8370_ISR2 0x009 /* RC: Data Link 1 Int Status */ #define Bt8370_ISR1 0x00A /* RC: Data Link 2 Int Status */ #define Bt8370_ISR0 0x00B /* RC: Pattrn Interrupt Status */ #define Bt8370_IER7 0x00C /* RW: Alarm 1 Interrupt Enable */ #define Bt8370_IER6 0x00D /* RW: Alarm 2 Interrupt Enable */ #define Bt8370_IER5 0x00E /* RW: Error Interrupt Enable */ #define Bt8370_IER4 0x00F /* RW: Cntr Ovfl Interrupt Enable */ #define Bt8370_IER3 0x010 /* RW: Timer Interrupt Enable */ #define Bt8370_IER2 0x011 /* RW: Data Link 1 Int Enable */ #define Bt8370_IER1 0x012 /* RW: Data Link 2 Int Enable */ #define Bt8370_IER0 0x013 /* RW: Pattern Interrupt Enable */ #define Bt8370_LOOP 0x014 /* RW: Loopback Config Reg */ #define Bt8370_DL3_TS 0x015 /* RW: External Data Link Channel */ #define Bt8370_DL3_BIT 0x016 /* RW: External Data Link Bit */ #define Bt8370_FSTAT 0x017 /* RO: Offline Framer Status */ #define Bt8370_PIO 0x018 /* RW: Programmable Input/Output */ #define Bt8370_POE 0x019 /* RW: Programmable Output Enable */ #define Bt8370_CMUX 0x01A /* RW: Clock Input Mux */ #define Bt8370_TMUX 0x01B /* RW: Test Mux Config */ #define Bt8370_TEST 0x01C /* RW: Test Config */ #define Bt8370_LIU_CR 0x020 /* RW: Line Intf Unit Config Reg */ #define Bt8370_RSTAT 0x021 /* RO; Receive LIU Status */ #define Bt8370_RLIU_CR 0x022 /* RW: Receive LIU Config */ #define Bt8370_LPF 0x023 /* RW: RPLL Low Pass Filter */ #define Bt8370_VGA_MAX 0x024 /* RW: Variable Gain Amplifier Max */ #define Bt8370_EQ_DAT 0x025 /* RW: Equalizer Coeff Data Reg */ #define Bt8370_EQ_PTR 0x026 /* RW: Equzlizer Coeff Table Ptr */ #define Bt8370_DSLICE 0x027 /* RW: Data Slicer Threshold */ #define Bt8370_EQ_OUT 0x028 /* RW: Equalizer Output Levels */ #define Bt8370_VGA 0x029 /* RO: Variable Gain Ampl Status */ #define Bt8370_PRE_EQ 0x02A /* RW: Pre-Equalizer */ #define Bt8370_COEFF0 0x030 /* RO: LMS Adj Eq Coeff Status */ #define Bt8370_GAIN0 0x038 /* RW: Equalizer Gain Thresh */ #define Bt8370_GAIN1 0x039 /* RW: Equalizer Gain Thresh */ #define Bt8370_GAIN2 0x03A /* RW: Equalizer Gain Thresh */ #define Bt8370_GAIN3 0x03B /* RW: Equalizer Gain Thresh */ #define Bt8370_GAIN4 0x03C /* RW: Equalizer Gain Thresh */ #define Bt8370_RCR0 0x040 /* RW: Rx Configuration */ #define Bt8370_RPATT 0x041 /* RW: Rx Test Pattern Config */ #define Bt8370_RLB 0x042 /* RW: Rx Loopback Code Detr Conf */ #define Bt8370_LBA 0x043 /* RW: Loopback Activate Code Patt */ #define Bt8370_LBD 0x044 /* RW: Loopback Deact Code Patt */ #define Bt8370_RALM 0x045 /* RW: Rx Alarm Signal Config */ #define Bt8370_LATCH 0x046 /* RW: Alarm/Err/Cntr Latch Config */ #define Bt8370_ALM1 0x047 /* RO: Alarm 1 Status */ #define Bt8370_ALM2 0x048 /* RO: Alarm 2 Status */ #define Bt8370_ALM3 0x049 /* RO: Alarm 3 Status */ #define Bt8370_FERR_LO 0x050 /* RC: Framing Bit Error Cntr LSB */ #define Bt8370_FERR_HI 0x051 /* RC: Framing Bit Error Cntr MSB */ #define Bt8370_CRC_LO 0x052 /* RC: CRC Error Counter LSB */ #define Bt8370_CRC_HI 0x053 /* RC: CRC Error Counter MSB */ #define Bt8370_LCV_LO 0x054 /* RC: Line Code Viol Counter LSB */ #define Bt8370_LCV_HI 0x055 /* RC: Line Code Viol Counter MSB */ #define Bt8370_FEBE_LO 0x056 /* RC: Far End Block Err Cntr LSB */ #define Bt8370_FEBE_HI 0x057 /* RC: Far End Block Err Cntr MSB */ #define Bt8370_BERR_LO 0x058 /* RC: PRBS Bit Error Counter LSB */ #define Bt8370_BERR_HI 0x059 /* RC: PRBS Bit Error Counter MSB */ #define Bt8370_AERR 0x05A /* RC: SEF/LOF/COFA counter */ #define Bt8370_RSA4 0x05B /* RO: Rx Sa4 Byte Buffer */ #define Bt8370_RSA5 0x05C /* RO: Rx Sa5 Byte Buffer */ #define Bt8370_RSA6 0x05D /* RO: Rx Sa6 Byte Buffer */ #define Bt8370_RSA7 0x05E /* RO: Rx Sa7 Byte Buffer */ #define Bt8370_RSA8 0x05F /* RO: Rx Sa8 Byte Buffer */ #define Bt8370_SHAPE0 0x060 /* RW: Tx Pulse Shape Config */ #define Bt8370_TLIU_CR 0x068 /* RW: Tx LIU Config Reg */ #define Bt8370_TCR0 0x070 /* RW: Tx Framer Config */ #define Bt8370_TCR1 0x071 /* RW: Txter Configuration */ #define Bt8370_TFRM 0x072 /* RW: Tx Frame Format */ #define Bt8370_TERROR 0x073 /* RW: Tx Error Insert */ #define Bt8370_TMAN 0x074 /* RW: Tx Manual Sa/FEBE Config */ #define Bt8370_TALM 0x075 /* RW: Tx Alarm Signal Config */ #define Bt8370_TPATT 0x076 /* RW: Tx Test Pattern Config */ #define Bt8370_TLB 0x077 /* RW: Tx Inband Loopback Config */ #define Bt8370_LBP 0x078 /* RW: Tx Inband Loopback Patt */ #define Bt8370_TSA4 0x07B /* RW: Tx Sa4 Byte Buffer */ #define Bt8370_TSA5 0x07C /* RW: Tx Sa5 Byte Buffer */ #define Bt8370_TSA6 0x07D /* RW: Tx Sa6 Byte Buffer */ #define Bt8370_TSA7 0x07E /* RW: Tx Sa7 Byte Buffer */ #define Bt8370_TSA8 0x07F /* RW: Tx Sa8 Byte Buffer */ #define Bt8370_CLAD_CR 0x090 /* RW: Clock Rate Adapter Config */ #define Bt8370_CSEL 0x091 /* RW: CLAD Frequency Select */ #define Bt8370_CPHASE 0x092 /* RW: CLAD Phase Det Scale Factor */ #define Bt8370_CTEST 0x093 /* RW: CLAD Test */ #define Bt8370_BOP 0x0A0 /* RW: Bit Oriented Protocol Xcvr */ #define Bt8370_TBOP 0x0A1 /* RW: Tx BOP Codeword */ #define Bt8370_RBOP 0x0A2 /* RO; Rx BOP Codeword */ #define Bt8370_BOP_STAT 0x0A3 /* RO: BOP Status */ #define Bt8370_DL1_TS 0x0A4 /* RW: DL1 Time Slot Enable */ #define Bt8370_DL1_BIT 0x0A5 /* RW: DL1 Bit Enable */ #define Bt8370_DL1_CTL 0x0A6 /* RW: DL1 Control */ #define Bt8370_RDL1_FFC 0x0A7 /* RW: RDL1 FIFO Fill Control */ #define Bt8370_RDL1 0x0A8 /* RO: RDL1 FIFO */ #define Bt8370_RDL1_STAT 0x0A9 /* RO: RDL1 Status */ #define Bt8370_PRM 0x0AA /* RW: Performance Report Message */ #define Bt8370_TDL1_FEC 0x0AB /* RW: TDL1 FIFO Empty Control */ #define Bt8370_TDL1_EOM 0x0AC /* WO: TDL1 End Of Message Control */ #define Bt8370_TDL1 0x0AD /* RW: TDL1 FIFO */ #define Bt8370_TDL1_STAT 0x0AE /* RO: TDL1 Status */ #define Bt8370_DL2_TS 0x0AF /* RW: DL2 Time Slot Enable */ #define Bt8370_DL2_BIT 0x0B0 /* RW: DL2 Bit Enable */ #define Bt8370_DL2_CTL 0x0B1 /* RW: DL2 Control */ #define Bt8370_RDL2_FFC 0x0B2 /* RW: RDL2 FIFO Fill Control */ #define Bt8370_RDL2 0x0B3 /* RO: RDL2 FIFO */ #define Bt8370_RDL2_STAT 0x0B4 /* RO: RDL2 Status */ #define Bt8370_TDL2_FEC 0x0B6 /* RW: TDL2 FIFO Empty Control */ #define Bt8370_TDL2_EOM 0x0B7 /* WO; TDL2 End Of Message Control */ #define Bt8370_TDL2 0x0B8 /* RW: TDL2 FIFO */ #define Bt8370_TDL2_STAT 0x0B9 /* RO: TDL2 Status */ #define Bt8370_DL_TEST1 0x0BA /* RW: DLINK Test Config */ #define Bt8370_DL_TEST2 0x0BB /* RW: DLINK Test Status */ #define Bt8370_DL_TEST3 0x0BC /* RW: DLINK Test Status */ #define Bt8370_DL_TEST4 0x0BD /* RW: DLINK Test Control */ #define Bt8370_DL_TEST5 0x0BE /* RW: DLINK Test Control */ #define Bt8370_SBI_CR 0x0D0 /* RW: System Bus Interface Config */ #define Bt8370_RSB_CR 0x0D1 /* RW: Rx System Bus Config */ #define Bt8370_RSYNC_BIT 0x0D2 /* RW: Rx System Bus Sync Bit Offs */ #define Bt8370_RSYNC_TS 0x0D3 /* RW: Rx System Bus Sync TS Offs */ #define Bt8370_TSB_CR 0x0D4 /* RW: Tx System Bus Config */ #define Bt8370_TSYNC_BIT 0x0D5 /* RW: Tx System Bus Sync Bit OFfs */ #define Bt8370_TSYNC_TS 0x0D6 /* RW: Tx System Bus Sync TS Offs */ #define Bt8370_RSIG_CR 0x0D7 /* RW: Rx Siganalling Config */ #define Bt8370_RSYNC_FRM 0x0D8 /* RW: Sig Reinsertion Frame Offs */ #define Bt8370_SSTAT 0x0D9 /* RO: Slip Buffer Status */ #define Bt8370_STACK 0x0DA /* RO: Rx Signalling Stack */ #define Bt8370_RPHASE 0x0DB /* RO: RSLIP Phase Status */ #define Bt8370_TPHASE 0x0DC /* RO: TSLIP Phase Status */ #define Bt8370_PERR 0x0DD /* RO: RAM Parity Status */ #define Bt8370_SBCn 0x0E0 /* RW: System Bus Per-Channel Ctl */ #define Bt8370_TPCn 0x100 /* RW: Tx Per-Channel Control */ #define Bt8370_TSIGn 0x120 /* RW: Tx Signalling Buffer */ #define Bt8370_TSLIP_LOn 0x140 /* RW: Tx PCM Slip Buffer Lo */ #define Bt8370_TSLIP_HIn 0x160 /* RW: Tx PCM Slip Buffer Hi */ #define Bt8370_RPCn 0x180 /* RW: Rx Per-Channel Control */ #define Bt8370_RSIGn 0x1A0 /* RW: Rx Signalling Buffer */ #define Bt8370_RSLIP_LOn 0x1C0 /* RW: Rx PCM Slip Buffer Lo */ #define Bt8370_RSLIP_HIn 0x1E0 /* RW: Rx PCM Slip Buffer Hi */ /* Bt8370_LOOP (0x14) framer loopback control register bits */ #define LOOP_ANALOG 0x01 /* inward loop thru LIU */ #define LOOP_FRAMER 0x02 /* inward loop thru framer */ #define LOOP_LINE 0x04 /* outward loop thru LIU */ #define LOOP_PAYLOAD 0x08 /* outward loop of payload */ #define LOOP_DUAL 0x06 /* inward framer + outward line */ /* Bt8370_ALM1 (0x47) receiver alarm status register bits */ #define ALM1_SIGFRZ 0x01 /* Rx Signalling Freeze */ #define ALM1_RLOF 0x02 /* Rx loss of frame alignment */ #define ALM1_RLOS 0x04 /* Rx digital loss of signal */ #define ALM1_RALOS 0x08 /* Rx analog loss of signal */ #define ALM1_RAIS 0x10 /* Rx Alarm Indication Signal */ #define ALM1_RYEL 0x40 /* Rx Yellow alarm indication */ #define ALM1_RMYEL 0x80 /* Rx multiframe YELLOW alarm */ /* Bt8370_ALM3 (0x49) receive framer status register bits */ #define ALM3_FRED 0x04 /* Rx Out Of T1/FAS alignment */ #define ALM3_MRED 0x08 /* Rx Out Of MFAS alignment */ #define ALM3_SRED 0x10 /* Rx Out Of CAS alignment */ #define ALM3_SEF 0x20 /* Rx Severely Errored Frame */ #define ALM3_RMAIS 0x40 /* Rx TS16 AIS (CAS) */ /* Bt8370_TALM (0x75) transmit alarm control register bits */ #define TALM_TAIS 0x01 /* Tx Alarm Indication Signal */ #define TALM_TYEL 0x02 /* Tx Yellow alarm */ #define TALM_TMYEL 0x04 /* Tx Multiframe Yellow alarm */ #define TALM_AUTO_AIS 0x08 /* auto send AIS on LOS */ #define TALM_AUTO_YEL 0x10 /* auto send YEL on LOF */ #define TALM_AUTO_MYEL 0x20 /* auto send E1-Y16 on loss-of-CAS */ /* 8370 BOP (Bit Oriented Protocol) command fragments */ #define RBOP_OFF 0x00 /* BOP Rx disabled */ #define RBOP_25 0xE0 /* BOP Rx requires 25 BOPs */ #define TBOP_OFF 0x00 /* BOP Tx disabled */ #define TBOP_25 0x0B /* BOP Tx sends 25 BOPs */ #define TBOP_CONT 0x0F /* BOP Tx sends continuously */ /* T1.403 Bit-Oriented ESF Data-Link Message codes */ #define T1BOP_OOF 0x00 /* Yellow alarm status */ #define T1BOP_LINE_UP 0x07 /* line loopback activate */ #define T1BOP_LINE_DOWN 0x1C /* line loopback deactivate */ #define T1BOP_PAY_UP 0x0A /* payload loopback activate */ #define T1BOP_PAY_DOWN 0x19 /* payload loopback deactivate */ #define T1BOP_NET_UP 0x09 /* network loopback activate */ #define T1BOP_NET_DOWN 0x12 /* network loopback deactivate */ /* Unix & Linux reserve 16 device-private IOCTLs */ #if BSD # define LMCIOCGSTAT _IOWR('i', 240, struct status) # define LMCIOCGCFG _IOWR('i', 241, struct config) # define LMCIOCSCFG _IOW('i', 242, struct config) # define LMCIOCREAD _IOWR('i', 243, struct ioctl) # define LMCIOCWRITE _IOW('i', 244, struct ioctl) # define LMCIOCTL _IOWR('i', 245, struct ioctl) #endif struct iohdr /* all LMCIOCs begin with this */ { char ifname[IFNAMSIZ]; /* interface name, e.g. "lmc0" */ u_int32_t cookie; /* interface version number */ u_int16_t direction; /* missing in Linux IOCTL */ u_int16_t length; /* missing in Linux IOCTL */ struct iohdr *iohdr; /* missing in Linux IOCTL */ u_int32_t spare; /* pad this struct to **32 bytes** */ }; #define DIR_IO 0 #define DIR_IOW 1 /* copy data user->kernel */ #define DIR_IOR 2 /* copy data kernel->user */ #define DIR_IOWR 3 /* copy data kernel<->user */ struct hssi_snmp { u_int16_t sigs; /* MII16_HSSI & MII16_HSSI_MODEM */ }; struct ssi_snmp { u_int16_t sigs; /* MII16_SSI & MII16_SSI_MODEM */ }; struct t3_snmp { u_int16_t febe; /* 8 bits - Far End Block err cnt */ u_int16_t lcv; /* 16 bits - BPV err cnt */ u_int16_t pcv; /* 8 bits - P-bit err cnt */ u_int16_t ccv; /* 8 bits - C-bit err cnt */ u_int16_t line; /* line status bit vector */ u_int16_t loop; /* loop status bit vector */ }; struct t1_snmp { u_int16_t prm[4]; /* T1.403 Performance Report Msg */ u_int16_t febe; /* 10 bits - E1 FAR CRC err cnt */ u_int16_t lcv; /* 16 bits - BPV + EXZ err cnt */ u_int16_t fe; /* 12 bits - Ft/Fs/FPS/FAS err cnt */ u_int16_t crc; /* 10 bits - CRC6/CRC4 err cnt */ u_int16_t line; /* line status bit vector */ u_int16_t loop; /* loop status bit vector */ }; /* SNMP trunk MIB Send codes */ #define TSEND_NORMAL 1 /* Send data (normal or looped) */ #define TSEND_LINE 2 /* Send 'line loopback activate' */ #define TSEND_PAYLOAD 3 /* Send 'payload loop activate' */ #define TSEND_RESET 4 /* Send 'loopback deactivate' */ #define TSEND_QRS 5 /* Send Quasi Random Signal */ /* ANSI T1.403 Performance Report Msg -- once a second from the far end */ #define T1PRM_FE 0x8000 /* Frame Sync Bit Error Event >= 1 */ #define T1PRM_SE 0x4000 /* Severely Err Framing Event >= 1 */ #define T1PRM_LB 0x2000 /* Payload Loopback Activated */ #define T1PRM_G1 0x1000 /* CRC Error Event = 1 */ #define T1PRM_R 0x0800 /* Reserved */ #define T1PRM_G2 0x0400 /* 1 < CRC Error Event <= 5 */ #define T1PRM_SEQ 0x0300 /* modulo 4 counter */ #define T1PRM_G3 0x0080 /* 5 < CRC Error Event <= 10 */ #define T1PRM_LV 0x0040 /* Line Code Violation Event >= 1 */ #define T1PRM_G4 0x0020 /* 10 < CRC Error Event <= 100 */ #define T1PRM_U 0x0018 /* Under study for synchronization */ #define T1PRM_G5 0x0004 /* 100 < CRC Error Event <= 319 */ #define T1PRM_SL 0x0002 /* Slip Event >= 1 */ #define T1PRM_G6 0x0001 /* CRC Error Event >= 320 */ /* SNMP Line Status */ #define TLINE_NORM 0x0001 /* no alarm present */ #define TLINE_RX_RAI 0x0002 /* receiving RAI = Yellow alarm */ #define TLINE_TX_RAI 0x0004 /* sending RAI = Yellow alarm */ #define TLINE_RX_AIS 0x0008 /* receiving AIS = blue alarm */ #define TLINE_TX_AIS 0x0010 /* sending AIS = blue alarm */ #define TLINE_LOF 0x0020 /* near end LOF = red alarm */ #define TLINE_LOS 0x0040 /* near end loss of Signal */ #define TLINE_LOOP 0x0080 /* near end is looped */ #define T1LINE_RX_TS16_AIS 0x0100 /* near end receiving TS16 AIS */ #define T1LINE_RX_TS16_LOMF 0x0200 /* near end sending TS16 LOMF */ #define T1LINE_TX_TS16_LOMF 0x0400 /* near end receiving TS16 LOMF */ #define T1LINE_RX_TEST 0x0800 /* near end receiving QRS Signal */ #define T1LINE_SEF 0x1000 /* near end severely errored frame */ #define T3LINE_RX_IDLE 0x0100 /* near end receiving IDLE signal */ #define T3LINE_SEF 0x0200 /* near end severely errored frame */ /* SNMP Loopback Status */ #define TLOOP_NONE 0x01 /* no loopback */ #define TLOOP_NEAR_PAYLOAD 0x02 /* near end payload loopback */ #define TLOOP_NEAR_LINE 0x04 /* near end line loopback */ #define TLOOP_NEAR_OTHER 0x08 /* near end looped somehow */ #define TLOOP_NEAR_INWARD 0x10 /* near end looped inward */ #define TLOOP_FAR_PAYLOAD 0x20 /* far end payload loopback */ #define TLOOP_FAR_LINE 0x40 /* far end line loopback */ /* event counters record interesting statistics */ struct event_cntrs { struct timeval reset_time; /* time when cntrs were reset */ u_int64_t ibytes; /* Rx bytes with good status */ u_int64_t obytes; /* Tx bytes */ u_int64_t ipackets; /* Rx packets with good status */ u_int64_t opackets; /* Tx packets */ u_int32_t ierrors; /* Rx packets with bad status */ u_int32_t oerrors; /* Tx packets with bad status */ u_int32_t idiscards; /* Rx packets discarded */ u_int32_t odiscards; /* Tx packets discarded */ u_int32_t fifo_over; /* Rx fifo overruns */ u_int32_t fifo_under; /* Tx fifo underruns */ u_int32_t missed; /* Rx pkts missed: no DMA descs */ u_int32_t overruns; /* Rx pkts missed: fifo overrun */ u_int32_t fdl_pkts; /* Rx T1 Facility Data Link pkts */ u_int32_t crc_errs; /* Rx T1 frame CRC errors */ u_int32_t lcv_errs; /* Rx T1 T3 Line Coding Violation */ u_int32_t frm_errs; /* Rx T1 T3 Frame bit errors */ u_int32_t febe_errs; /* Rx T1 T3 Far End Bit Errors */ u_int32_t par_errs; /* Rx T3 P-bit parity errors */ u_int32_t cpar_errs; /* Rx T3 C-bit parity errors */ u_int32_t mfrm_errs; /* Rx T3 Multi-frame bit errors */ u_int32_t rxdma; /* Rx out of kernel buffers */ u_int32_t txdma; /* Tx out of DMA desciptors */ u_int32_t lck_watch; /* try_lock conflict in watchdog */ u_int32_t lck_ioctl; /* try_lock conflict in ioctl */ u_int32_t lck_intr; /* try_lock conflict in interrupt */ }; /* sc->status is the READ ONLY status of the card. */ /* Accessed using socket IO control calls or netgraph control messages. */ struct status { struct iohdr iohdr; /* common ioctl header */ u_int32_t card_type; /* PCI device number */ u_int16_t ieee[3]; /* IEEE MAC-addr from Tulip SROM */ u_int16_t oper_status; /* actual state: up, down, test */ u_int32_t tx_speed; /* measured TX bits/sec */ u_int32_t cable_type; /* SSI only: cable type */ u_int32_t line_pkg; /* actual line pkg in use */ u_int32_t line_prot; /* actual line proto in use */ u_int32_t ticks; /* incremented by watchdog @ 1 Hz */ struct event_cntrs cntrs; /* event counters */ union { struct hssi_snmp hssi; /* data for RFC-???? HSSI MIB */ struct t3_snmp t3; /* data for RFC-2496 T3 MIB */ struct ssi_snmp ssi; /* data for RFC-1659 RS232 MIB */ struct t1_snmp t1; /* data for RFC-2495 T1 MIB */ } snmp; }; /* line protocol package codes fnobl */ #define PKG_RAWIP 1 /* driver yyyyy */ #define PKG_SPPP 2 /* fbsd, nbsd, obsd yyynn */ #define PKG_P2P 3 /* bsd/os nnnyn */ #define PKG_NG 4 /* fbsd ynnnn */ #define PKG_GEN_HDLC 5 /* linux nnnny */ /* line protocol codes fnobl */ #define PROT_PPP 1 /* Point-to-Point Protocol yyyyy */ #define PROT_C_HDLC 2 /* Cisco HDLC Protocol yyyyy */ #define PROT_FRM_RLY 3 /* Frame Relay Protocol ynnyy */ #define PROT_X25 4 /* X.25/LAPB Protocol nnnny */ #define PROT_ETH_HDLC 5 /* raw Ether pkts in HDLC nnnny */ #define PROT_IP_HDLC 6 /* raw IP4/6 pkts in HDLC yyyyy */ /* oper_status codes (same as SNMP status codes) */ #define STATUS_UP 1 /* may/will tx/rx pkts */ #define STATUS_DOWN 2 /* can't/won't tx/rx pkts */ #define STATUS_TEST 3 /* currently not used */ struct synth /* programmable oscillator params */ { unsigned n :7; /* numerator (3..127) */ unsigned m :7; /* denominator (3..127) */ unsigned v :1; /* mul by 1|8 */ unsigned x :2; /* div by 1|2|4|8 */ unsigned r :2; /* div by 1|2|4|8 */ unsigned prescale :13; /* log(final divisor): 2, 4 or 9 */ } __attribute__ ((packed)); #define SYNTH_FREF 20e6 /* reference xtal = 20 MHz */ #define SYNTH_FMIN 50e6 /* internal VCO min 50 MHz */ #define SYNTH_FMAX 250e6 /* internal VCO max 250 MHz */ /* sc->config is the READ/WRITE configuration of the card. */ /* Accessed using socket IO control calls or netgraph control messages. */ struct config { struct iohdr iohdr; /* common ioctl header */ u_int32_t crc_len; /* ALL: CRC-16 or CRC-32 or none */ u_int32_t loop_back; /* ALL: many kinds of loopbacks */ u_int32_t tx_clk_src; /* T1, HSSI: ST, RT, int, ext */ u_int32_t format; /* T3, T1: ckt framing format */ u_int32_t time_slots; /* T1: 64Kb time slot config */ u_int32_t cable_len; /* T3, T1: cable length in meters */ u_int32_t scrambler; /* T3: payload scrambler config */ u_int32_t dte_dce; /* SSI, HSSIc: drive TXCLK */ struct synth synth; /* SSI, HSSIc: synth oscil params */ u_int32_t rx_gain; /* T1: receiver gain limit 0-50 dB */ u_int32_t tx_pulse; /* T1: transmitter pulse shape */ u_int32_t tx_lbo; /* T1: transmitter atten 0-22.5 dB */ u_int32_t debug; /* ALL: extra printout */ u_int32_t line_pkg; /* ALL: use this line pkg */ u_int32_t line_prot; /* SPPP: use this line proto */ u_int32_t keep_alive; /* SPPP: use keep-alive packets */ }; #define CFG_CRC_0 0 /* no CRC */ #define CFG_CRC_16 2 /* X^16+X^12+X^5+1 (default) */ #define CFG_CRC_32 4 /* X^32+X^26+X^23+X^22+X^16+X^12+ */ /* X^11+X^10+X^8+X^7+X^5+X^4+X^2+X+1 */ #define CFG_LOOP_NONE 1 /* SNMP don't loop back anything */ #define CFG_LOOP_PAYLOAD 2 /* SNMP loop outward thru framer */ #define CFG_LOOP_LINE 3 /* SNMP loop outward thru LIU */ #define CFG_LOOP_OTHER 4 /* SNMP loop inward thru LIU */ #define CFG_LOOP_INWARD 5 /* SNMP loop inward thru framer */ #define CFG_LOOP_DUAL 6 /* SNMP loop inward & outward */ #define CFG_LOOP_TULIP 16 /* ALL: loop inward thru Tulip */ #define CFG_LOOP_PINS 17 /* HSSIc, SSI: loop inward-pins */ #define CFG_LOOP_LL 18 /* HSSI, SSI: assert LA/LL mdm pin */ #define CFG_LOOP_RL 19 /* HSSI, SSI: assert LB/RL mdm pin */ #define CFG_CLKMUX_ST 1 /* TX clk <- Send timing */ #define CFG_CLKMUX_INT 2 /* TX clk <- internal source */ #define CFG_CLKMUX_RT 3 /* TX clk <- Receive (loop) timing */ #define CFG_CLKMUX_EXT 4 /* TX clk <- ext connector */ /* values 0-31 are Bt8370 CR0 register values (LSB is zero if E1). */ /* values 32-99 are reserved for other T1E1 formats, (even number if E1) */ /* values 100 and up are used for T3 frame formats. */ #define CFG_FORMAT_T1SF 9 /* T1-SF AMI */ #define CFG_FORMAT_T1ESF 27 /* T1-ESF+CRC B8ZS X^6+X+1 */ #define CFG_FORMAT_E1FAS 0 /* E1-FAS HDB3 TS0 */ #define CFG_FORMAT_E1FASCRC 8 /* E1-FAS+CRC HDB3 TS0 X^4+X+1 */ #define CFG_FORMAT_E1FASCAS 16 /* E1-FAS +CAS HDB3 TS0 & TS16 */ #define CFG_FORMAT_E1FASCRCCAS 24 /* E1-FAS+CRC+CAS HDB3 TS0 & TS16 */ #define CFG_FORMAT_E1NONE 32 /* E1-NO framing HDB3 */ #define CFG_FORMAT_T3CPAR 100 /* T3-C-Bit par B3ZS */ #define CFG_FORMAT_T3M13 101 /* T3-M13 format B3ZS */ /* format aliases that improve code readability */ #define FORMAT_T1ANY ((sc->config.format & 1)==1) #define FORMAT_E1ANY ((sc->config.format & 1)==0) #define FORMAT_E1CAS ((sc->config.format & 0x11)==0x10) #define FORMAT_E1CRC ((sc->config.format & 0x09)==0x08) #define FORMAT_E1NONE (sc->config.format == CFG_FORMAT_E1NONE) #define FORMAT_T1ESF (sc->config.format == CFG_FORMAT_T1ESF) #define FORMAT_T1SF (sc->config.format == CFG_FORMAT_T1SF) #define FORMAT_T3CPAR (sc->config.format == CFG_FORMAT_T3CPAR) #define CFG_SCRAM_OFF 1 /* DS3 payload scrambler off */ #define CFG_SCRAM_DL_KEN 2 /* DS3 DigitalLink/Kentrox X^43+1 */ #define CFG_SCRAM_LARS 3 /* DS3 Larscom X^20+X^17+1 w/28ZS */ #define CFG_DTE 1 /* HSSIc, SSI: rcv TXCLK; rcv DCD */ #define CFG_DCE 2 /* HSSIc, SSI: drv TXCLK; drv DCD */ #define CFG_GAIN_SHORT 0x24 /* 0-20 dB of equalized gain */ #define CFG_GAIN_MEDIUM 0x2C /* 0-30 dB of equalized gain */ #define CFG_GAIN_LONG 0x34 /* 0-40 dB of equalized gain */ #define CFG_GAIN_EXTEND 0x3F /* 0-64 dB of equalized gain */ #define CFG_GAIN_AUTO 0xFF /* auto-set based on cable length */ #define CFG_PULSE_T1DSX0 0 /* T1 DSX 0- 40 meters */ #define CFG_PULSE_T1DSX1 2 /* T1 DSX 40- 80 meters */ #define CFG_PULSE_T1DSX2 4 /* T1 DSX 80-120 meters */ #define CFG_PULSE_T1DSX3 6 /* T1 DSX 120-160 meters */ #define CFG_PULSE_T1DSX4 8 /* T1 DSX 160-200 meters */ #define CFG_PULSE_E1COAX 10 /* E1 75 ohm coax pair */ #define CFG_PULSE_E1TWIST 12 /* E1 120 ohm twisted pairs */ #define CFG_PULSE_T1CSU 14 /* T1 CSU 200-2000 meters; set LBO */ #define CFG_PULSE_AUTO 0xFF /* auto-set based on cable length */ #define CFG_LBO_0DB 0 /* T1CSU LBO = 0.0 dB; FCC opt A */ #define CFG_LBO_7DB 16 /* T1CSU LBO = 7.5 dB; FCC opt B */ #define CFG_LBO_15DB 32 /* T1CSU LBO = 15.0 dB; FCC opt C */ #define CFG_LBO_22DB 48 /* T1CSU LBO = 22.5 dB; final span */ #define CFG_LBO_AUTO 0xFF /* auto-set based on cable length */ struct ioctl { struct iohdr iohdr; /* common ioctl header */ u_int32_t cmd; /* command */ u_int32_t address; /* command address */ u_int32_t data; /* command data */ char *ucode; /* user-land address of ucode */ }; #define IOCTL_RW_PCI 1 /* RW: Tulip PCI config registers */ #define IOCTL_RW_CSR 2 /* RW: Tulip Control & Status Regs */ #define IOCTL_RW_SROM 3 /* RW: Tulip Serial Rom */ #define IOCTL_RW_BIOS 4 /* RW: Tulip Boot rom */ #define IOCTL_RW_MII 5 /* RW: MII registers */ #define IOCTL_RW_FRAME 6 /* RW: Framer registers */ #define IOCTL_WO_SYNTH 7 /* WO: Synthesized oscillator */ #define IOCTL_WO_DAC 8 /* WO: Digital/Analog Converter */ #define IOCTL_XILINX_RESET 16 /* reset Xilinx: all FFs set to 0 */ #define IOCTL_XILINX_ROM 17 /* load Xilinx program from ROM */ #define IOCTL_XILINX_FILE 18 /* load Xilinx program from file */ #define IOCTL_SET_STATUS 50 /* set mdm ctrl bits (internal use)*/ #define IOCTL_SNMP_SEND 51 /* trunk MIB send code */ #define IOCTL_SNMP_LOOP 52 /* trunk MIB loop configuration */ #define IOCTL_SNMP_SIGS 53 /* RS232-like modem control sigs */ #define IOCTL_RESET_CNTRS 54 /* reset event counters */ /* storage for these strings is allocated here! */ static const char *ssi_cables[] = { "V.10/EIA423", "V.11/EIA530A", "RESERVED", "X.21", "V.35", "V.36/EIA449", "V.28/EIA232", "NO CABLE", NULL, }; /***************************************************************************/ /* Declarations above here are shared with the user lmcconfig program. */ /* Declarations below here are private to the kernel device driver. */ /***************************************************************************/ #if (_KERNEL || KERNEL || __KERNEL__) #define SNDQ_MAXLEN 32 /* packets awaiting transmission */ #define DESCS_PER_PKT 4 /* DMA descriptors per TX pkt */ #define NUM_TX_DESCS (DESCS_PER_PKT * SNDQ_MAXLEN) /* Increase DESCS_PER_PKT if status.cntrs.txdma increments. */ /* A Tulip DMA descriptor can point to two chunks of memory. * Each chunk has a max length of 2047 bytes (ask the VMS guys...). * 2047 isn't a multiple of a cache line size (32 bytes typically). * So back off to 2048-32 = 2016 bytes per chunk (2 chunks per descr). */ #define MAX_CHUNK_LEN 2016 #define MAX_DESC_LEN (2 * MAX_CHUNK_LEN) /* Tulip DMA descriptor; THIS STRUCT MUST MATCH THE HARDWARE */ struct dma_desc { u_int32_t status; /* hardware->to->software */ #if (BYTE_ORDER == LITTLE_ENDIAN) /* left-to-right packing by compiler */ unsigned length1:11; /* buffer1 length */ unsigned length2:11; /* buffer2 length */ unsigned control:10; /* software->to->hardware */ #else /* right-to-left packing by compiler */ unsigned control:10; /* software->to->hardware */ unsigned length2:11; /* buffer2 length */ unsigned length1:11; /* buffer1 length */ #endif u_int32_t address1; /* buffer1 bus address */ u_int32_t address2; /* buffer2 bus address */ bus_dmamap_t map; /* bus dmamap for this descriptor */ # define TLP_BUS_DSL_VAL (sizeof(bus_dmamap_t) & TLP_BUS_DSL) } __attribute__ ((packed)); /* Tulip DMA descriptor status bits */ #define TLP_DSTS_OWNER 0x80000000 #define TLP_DSTS_RX_DESC_ERR 0x00004000 #define TLP_DSTS_RX_FIRST_DESC 0x00000200 #define TLP_DSTS_RX_LAST_DESC 0x00000100 #define TLP_DSTS_RX_MII_ERR 0x00000008 #define TLP_DSTS_RX_DRIBBLE 0x00000004 #define TLP_DSTS_TX_UNDERRUN 0x00000002 #define TLP_DSTS_RX_OVERRUN 0x00000001 /* not documented in rev AF */ #define TLP_DSTS_RX_BAD (TLP_DSTS_RX_MII_ERR | \ TLP_DSTS_RX_DRIBBLE | \ TLP_DSTS_RX_DESC_ERR | \ TLP_DSTS_RX_OVERRUN) /* Tulip DMA descriptor control bits */ #define TLP_DCTL_TX_INTERRUPT 0x0200 #define TLP_DCTL_TX_LAST_SEG 0x0100 #define TLP_DCTL_TX_FIRST_SEG 0x0080 #define TLP_DCTL_TX_NO_CRC 0x0010 #define TLP_DCTL_END_RING 0x0008 #define TLP_DCTL_TX_NO_PAD 0x0002 /* DMA descriptors are kept in a ring. * Ring is empty when (read == write). * Ring is full when (read == wrap(write+1)), * The ring also contains a tailq of data buffers. */ struct desc_ring { struct dma_desc *read; /* next descriptor to be read */ struct dma_desc *write; /* next descriptor to be written */ struct dma_desc *first; /* first descriptor in ring */ struct dma_desc *last; /* last descriptor in ring */ struct dma_desc *temp; /* temporary write pointer for tx */ u_int32_t dma_addr; /* bus address for desc array */ int size_descs; /* bus_dmamap_sync needs this */ int num_descs; /* used to set rx quota */ #if BSD struct mbuf *head; /* tail-queue of mbufs */ struct mbuf *tail; bus_dma_tag_t tag; /* bus_dma tag for desc array */ bus_dmamap_t map; /* bus_dma map for desc array */ bus_dma_segment_t segs[2]; /* bus_dmamap_load() or bus_dmamem_alloc() */ int nsegs; /* bus_dmamap_load() or bus_dmamem_alloc() */ #endif }; /* break circular definition */ typedef struct softc softc_t; /* card-dependent methods */ struct card { void (* config)(softc_t *); void (* ident)(softc_t *); int (* watchdog)(softc_t *); /* must not sleep */ int (* ioctl)(softc_t *, struct ioctl *); /* can sleep */ }; /* flag bits in sc->flags */ #define FLAG_IFNET 0x00000002 /* IFNET is attached */ #define FLAG_NETDEV 0x00000004 /* NETDEV is registered */ #define FLAG_NETGRAPH 0x00000008 /* NETGRAPH is attached */ /* Accessing Tulip CSRs: * There are two ways: IO instruction (default) and memory reference. * IO refs are used if IOREF_CSR is defined; otherwise memory refs are used. * MEMORY REFERENCES DO NOT WORK in BSD/OS: page faults happen. */ #define IOREF_CSR 1 /* access Tulip CSRs with IO cycles if 1 */ #if defined(DEVICE_POLLING) # define DEV_POLL 1 #else # define DEV_POLL 0 #endif #if defined(ALTQ) && ALTQ # define ALTQ_PRESENT 1 #else # define ALTQ_PRESENT 0 #endif /* This is the instance data, or "software context" for the device driver. */ /* NetBSD, OpenBSD and BSD/OS want struct device first in the softc. */ /* FreeBSD wants struct ifnet first in the softc. */ struct softc { /* State for kernel-resident Line Protocols */ #if IFNET struct ifnet *ifp; struct ifmedia ifm; /* hooks for ifconfig(8) */ # if NSPPP struct sppp *sppp; # elif P2P struct p2pcom p2pcom; struct p2pcom *p2p; # endif #endif #if NETGRAPH node_p ng_node; /* pointer to our node struct */ hook_p ng_hook; /* non-zero means NETGRAPH owns device */ struct ifaltq ng_sndq; struct ifaltq ng_fastq; #endif struct callout callout; /* watchdog needs this */ - struct device *dev; /* base device pointer */ + device_t dev; /* base device pointer */ bus_space_tag_t csr_tag; /* bus_space needs this */ bus_space_handle_t csr_handle;/* bus_space_needs this */ void *irq_cookie; /* bus_teardown_intr needs this */ struct resource *irq_res; /* bus_release_resource needs this */ int irq_res_id; /* bus_release_resource needs this */ struct resource *csr_res; /* bus_release_resource needs this */ int csr_res_id; /* bus_release resource needs this */ int csr_res_type; /* bus_release resource needs this */ struct mbuf *tx_mbuf; /* hang mbuf here while building dma descs */ # ifdef DEVICE_POLLING int quota; /* used for incoming packet flow control */ # endif struct mtx top_mtx; /* lock card->watchdog vs core_ioctl */ struct mtx bottom_mtx; /* lock for buf queues & descriptor rings */ /* Top-half state used by all card types; lock with top_lock, */ const char *dev_desc; /* string describing type of board */ struct status status; /* driver status lmcconfig can read */ struct config config; /* driver config lmcconfig can read/write */ struct card *card; /* card methods: config, ioctl, watchdog */ u_int32_t gpio_dir; /* s/w copy of GPIO direction register */ u_int16_t led_state; /* last value written to mii16 */ u_int32_t flags; /* driver-global flags */ /* Top-half state used by card-specific watchdogs; lock with top_lock. */ u_int32_t last_mii16; /* SSI, HSSI: MII reg 16 one second ago */ u_int32_t last_stat16; /* T3: framer reg 16 one second ago */ u_int32_t last_alm1; /* T1E1: framer reg 47 one second ago */ u_int32_t last_FEAC; /* last FEAC msg code received */ u_int32_t loop_timer; /* seconds until loopback expires */ /* Bottom-half state used by the interrupt code; lock with bottom_lock. */ struct desc_ring txring; /* tx descriptor ring state */ struct desc_ring rxring; /* rx descriptor ring state */ }; /* end of softc */ /* Hide the minor differences between OS versions */ typedef void intr_return_t; # define READ_PCI_CFG(sc, addr) pci_read_config ((sc)->dev, addr, 4) # define WRITE_PCI_CFG(sc, addr, data) pci_write_config((sc)->dev, addr, data, 4) # define READ_CSR(csr) bus_space_read_4 (sc->csr_tag, sc->csr_handle, csr) # define WRITE_CSR(csr, val) bus_space_write_4(sc->csr_tag, sc->csr_handle, csr, val) # define NAME_UNIT device_get_nameunit(sc->dev) # define DRIVER_DEBUG ((sc->config.debug) || (sc->ifp->if_flags & IFF_DEBUG)) # define TOP_TRYLOCK mtx_trylock(&sc->top_mtx) # define TOP_UNLOCK mtx_unlock (&sc->top_mtx) # define BOTTOM_TRYLOCK mtx_trylock(&sc->bottom_mtx) # define BOTTOM_UNLOCK mtx_unlock (&sc->bottom_mtx) # define CHECK_CAP priv_check(curthread, PRIV_DRIVER) # define DISABLE_INTR /* nothing */ # define ENABLE_INTR /* nothing */ # define IRQ_NONE /* nothing */ # define IRQ_HANDLED /* nothing */ # define IFP2SC(ifp) (ifp)->if_softc # define COPY_BREAK MHLEN # define SLEEP(usecs) tsleep(sc, PCATCH | PZERO, DEVICE_NAME, 1+(usecs/tick)) # define DMA_SYNC(map, size, flags) bus_dmamap_sync(ring->tag, map, flags) # define DMA_LOAD(map, addr, size) bus_dmamap_load(ring->tag, map, addr, size, fbsd_dmamap_load, ring, 0) # if (NBPFILTER != 0) # define LMC_BPF_MTAP(mbuf) BPF_MTAP(sc->ifp, mbuf) # define LMC_BPF_ATTACH(dlt, len) bpfattach(sc->ifp, dlt, len) # define LMC_BPF_DETACH bpfdetach(sc->ifp) # endif # define IF_DROP(ifq) _IF_DROP(ifq) # define IF_QFULL(ifq) _IF_QFULL(ifq) # define IFF_RUNNING IFF_DRV_RUNNING #if (NBPFILTER == 0) # define LMC_BPF_MTAP(mbuf) /* nothing */ # define LMC_BPF_ATTACH(dlt, len) /* nothing */ # define LMC_BPF_DETACH /* nothing */ #endif #define HSSI_DESC "SBE/LMC HSSI Card" #define T3_DESC "SBE/LMC T3 Card" #define SSI_DESC "SBE/LMC SSI Card" #define T1E1_DESC "SBE/LMC T1E1 Card" /* procedure prototypes */ static void shift_srom_bits(softc_t *, u_int32_t, u_int32_t); static u_int16_t read_srom(softc_t *, u_int8_t); static void write_srom(softc_t *, u_int8_t, u_int16_t); static u_int8_t read_bios(softc_t *, u_int32_t); static void write_bios_phys(softc_t *, u_int32_t, u_int8_t); static void write_bios(softc_t *, u_int32_t, u_int8_t); static void erase_bios(softc_t *); static void shift_mii_bits(softc_t *, u_int32_t, u_int32_t); static u_int16_t read_mii(softc_t *, u_int8_t); static void write_mii(softc_t *, u_int8_t, u_int16_t); static void set_mii16_bits(softc_t *, u_int16_t); static void clr_mii16_bits(softc_t *, u_int16_t); static void set_mii17_bits(softc_t *, u_int16_t); static void clr_mii17_bits(softc_t *, u_int16_t); static void led_off(softc_t *, u_int16_t); static void led_on(softc_t *, u_int16_t); static void led_inv(softc_t *, u_int16_t); static void write_framer(softc_t *, u_int16_t, u_int8_t); static u_int8_t read_framer(softc_t *, u_int16_t); static void make_gpio_input(softc_t *, u_int32_t); static void make_gpio_output(softc_t *, u_int32_t); static u_int32_t read_gpio(softc_t *); static void set_gpio_bits(softc_t *, u_int32_t); static void clr_gpio_bits(softc_t *, u_int32_t); static void reset_xilinx(softc_t *); static void load_xilinx_from_rom(softc_t *); static int load_xilinx_from_file(softc_t *, char *, u_int32_t); static void shift_synth_bits(softc_t *, u_int32_t, u_int32_t); static void write_synth(softc_t *, struct synth *); static void write_dac(softc_t *, u_int16_t); static void hssi_config(softc_t *); static void hssi_ident(softc_t *); static int hssi_watchdog(softc_t *); static int hssi_ioctl(softc_t *, struct ioctl *); static void t3_config(softc_t *); static void t3_ident(softc_t *); static int t3_watchdog(softc_t *); static void t3_send_dbl_feac(softc_t *, int, int); static int t3_ioctl(softc_t *, struct ioctl *); static void ssi_config(softc_t *); static void ssi_ident(softc_t *); static int ssi_watchdog(softc_t *); static int ssi_ioctl(softc_t *, struct ioctl *); static void t1_config(softc_t *); static void t1_ident(softc_t *); static int t1_watchdog(softc_t *); static void t1_send_bop(softc_t *, int); static int t1_ioctl(softc_t *, struct ioctl *); #if IFNET static void lmc_raw_input(struct ifnet *, struct mbuf *); #endif /* IFNET */ #if BSD static void mbuf_enqueue(struct desc_ring *, struct mbuf *); static struct mbuf* mbuf_dequeue(struct desc_ring *); static void fbsd_dmamap_load(void *, bus_dma_segment_t *, int, int); static int create_ring(softc_t *, struct desc_ring *, int); static void destroy_ring(softc_t *, struct desc_ring *); static int rxintr_cleanup(softc_t *); static int rxintr_setup(softc_t *); static int txintr_cleanup(softc_t *); static int txintr_setup_mbuf(softc_t *, struct mbuf *); static int txintr_setup(softc_t *); #endif /* BSD */ static void check_intr_status(softc_t *); static void core_interrupt(void *, int); static void user_interrupt(softc_t *, int); #if BSD # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) static int fbsd_poll(struct ifnet *, enum poll_cmd, int); # endif static intr_return_t bsd_interrupt(void *); #endif /* BSD */ static void set_status(softc_t *, int); #if P2P static int p2p_getmdm(struct p2pcom *, caddr_t); static int p2p_mdmctl(struct p2pcom *, int); #endif #if NSPPP static void sppp_tls(struct sppp *); static void sppp_tlf(struct sppp *); #endif static void config_proto(softc_t *, struct config *); static int core_ioctl(softc_t *, u_long, caddr_t); static void core_watchdog(softc_t *); #if IFNET static int lmc_raw_ioctl(struct ifnet *, u_long, caddr_t); static int lmc_ifnet_ioctl(struct ifnet *, u_long, caddr_t); static void lmc_ifnet_start(struct ifnet *); static int lmc_raw_output(struct ifnet *, struct mbuf *, const struct sockaddr *, struct route *); static void setup_ifnet(struct ifnet *); static int lmc_ifnet_attach(softc_t *); static void lmc_ifnet_detach(softc_t *); #endif /* IFNET */ #if NETGRAPH static int ng_constructor(node_p); static int ng_rcvmsg(node_p, item_p, hook_p); static int ng_shutdown(node_p); static int ng_newhook(node_p, hook_p, const char *); static int ng_connect(hook_p); static int ng_rcvdata(hook_p, item_p); static int ng_disconnect(hook_p); # if (IFNET == 0) static void ng_watchdog(void *); # endif static int ng_attach(softc_t *); static void ng_detach(softc_t *); #endif /* NETGRAPH */ static int startup_card(softc_t *); static void shutdown_card(void *); static int attach_card(softc_t *, const char *); static void detach_card(softc_t *); static int fbsd_probe(device_t); static int fbsd_detach(device_t); static int fbsd_shutdown(device_t); static int fbsd_attach(device_t); #endif /* KERNEL */ #endif /* IF_LMC_H */ Index: user/ngie/release-pkg-fix-tests/sys/dev/nvram2env/nvram2env.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/nvram2env/nvram2env.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/nvram2env/nvram2env.c (revision 299055) @@ -1,318 +1,318 @@ /*- * Copyright (c) 2010 Aleksandr Rybalko. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Pseudo driver to copy the NVRAM settings from various sources * into the kernel environment. * * Drivers (such as ethernet devices) can then use environment * variables to set default parameters. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define nvram2env_read_1(sc, reg) \ bus_space_read_1((sc)->sc_bt, (sc)->sc_bh,(reg)) #define nvram2env_read_2(sc, reg) \ bus_space_read_2((sc)->sc_bt, (sc)->sc_bh,(reg)) #define nvram2env_read_4(sc, reg) \ bus_space_read_4((sc)->sc_bt, (sc)->sc_bh,(reg)) #define nvram2env_write_1(sc, reg, val) \ bus_space_write_1((sc)->sc_bt, (sc)->sc_bh, \ (reg), (val)) #define nvram2env_write_2(sc, reg, val) \ bus_space_write_2((sc)->sc_bt, (sc)->sc_bh, \ (reg), (val)) #define nvram2env_write_4(sc, reg, val) \ bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, \ (reg), (val)) struct nvram2env_softc { bus_space_tag_t bst; bus_space_handle_t bsh; bus_addr_t addr; int need_swap; uint32_t sig; uint32_t flags; #define NVRAM_FLAGS_NOCHECK 0x0001 /* Do not check(CRC or somthing else)*/ #define NVRAM_FLAGS_GENERIC 0x0002 /* Format Generic, skip 4b and read */ #define NVRAM_FLAGS_BROADCOM 0x0004 /* Format Broadcom, use struct nvram */ #define NVRAM_FLAGS_UBOOT 0x0008 /* Format Generic, skip 4b of CRC and read */ uint32_t maxsize; uint32_t crc; }; static int nvram2env_attach(device_t); static int nvram2env_probe(device_t); #define NVRAM_MAX_SIZE 0x10000 static void nvram2env_identify(driver_t * drv, device_t parent) { int i, ivar; for (i = 0; !resource_int_value("nvram", i, "base", &ivar); i++) BUS_ADD_CHILD(parent, 0, "nvram2env", i); } static int nvram2env_probe(device_t dev) { uint32_t i, ivar, sig; struct nvram2env_softc * sc = device_get_softc(dev); sc->bst = mips_bus_space_generic; if (resource_int_value("nvram", device_get_unit(dev), "sig", &sc->sig) != 0 || sc->sig == 0) sc->sig = 0x48534c46; if (resource_int_value("nvram", device_get_unit(dev), "maxsize", &sc->maxsize) != 0 || sc->maxsize == 0) sc->maxsize = NVRAM_MAX_SIZE; if (resource_int_value("nvram", device_get_unit(dev), "flags", &sc->flags) != 0 || sc->flags == 0) sc->flags = NVRAM_FLAGS_GENERIC; for (i = 0; i < 2; i ++) { if (resource_int_value("nvram", device_get_unit(dev), (!i)?"base":"fallbackbase", &ivar) != 0 || ivar == 0) continue; - sc->addr = MIPS_PHYS_TO_KSEG1(ivar); + sc->addr = ivar; if (bootverbose) device_printf(dev, "base=0x%08x sig=0x%08x " "maxsize=0x%08x flags=0x%08x\n", sc->addr, sc->sig, sc->maxsize, sc->flags); if (bus_space_map(sc->bst, sc->addr, sc->maxsize, 0, &sc->bsh) != 0) continue; sig = bus_space_read_4(sc->bst, sc->bsh, 0); if ( sig == sc->sig /*FLSH*/) { device_printf(dev, "Found NVRAM at %#x\n", (uint32_t)ivar); sc->need_swap = 0; goto unmap_done; } else if ( htole32(sig) == sc->sig /*HSLF*/) { device_printf(dev, "Found NVRAM at %#x\n", (uint32_t)ivar); sc->need_swap = 1; goto unmap_done; } else if (sc->flags & NVRAM_FLAGS_UBOOT) { device_printf(dev, "Use NVRAM at %#x\n", (uint32_t)ivar); sc->crc = sig; goto unmap_done; } bus_space_unmap(sc->bst, sc->bsh, NVRAM_MAX_SIZE); } sc->bst = 0; sc->bsh = 0; sc->addr = 0; return (ENXIO); unmap_done: bus_space_unmap(sc->bst, sc->bsh, NVRAM_MAX_SIZE); device_set_desc(dev, "NVRAM to ENV pseudo-device"); return (BUS_PROBE_SPECIFIC); } struct nvram { u_int32_t sig; u_int32_t size; u_int32_t unknown1; u_int32_t unknown2; u_int32_t unknown3; char data[]; }; static uint32_t read_4(struct nvram2env_softc * sc, int offset) { if (sc->need_swap) return (bswap32(bus_space_read_4(sc->bst, sc->bsh, offset))); else return (bus_space_read_4(sc->bst, sc->bsh, offset)); } static int nvram2env_attach(device_t dev) { struct nvram2env_softc * sc = device_get_softc(dev); struct nvram * nv; char *pair, *value, *assign; uint32_t sig, size, i; if (sc->bst == 0 || sc->addr == 0) return (ENXIO); if (bus_space_map(sc->bst, sc->addr, NVRAM_MAX_SIZE, 0, &sc->bsh) != 0) return (ENXIO); sig = read_4(sc, 0); size = read_4(sc, 4); #if 1 if (bootverbose) device_printf(dev, " size=0x%05x maxsize=0x%05x\n", size, sc->maxsize); #endif size = (size > sc->maxsize)?sc->maxsize:size; if (sig == sc->sig || (sc->flags & NVRAM_FLAGS_UBOOT)) { /* align and shift size to 32bit size*/ size += 3; size >>= 2; nv = malloc(size<<2, M_DEVBUF, M_WAITOK | M_ZERO); if (!nv) return (ENOMEM); for (i = 0; i < size; i ++) ((uint32_t *)nv)[i] = read_4(sc, i<<2); if (sc->flags & NVRAM_FLAGS_BROADCOM) { device_printf(dev, "sig = %#x\n", nv->sig); device_printf(dev, "size = %#x\n", nv->size); } if (!(sc->flags & NVRAM_FLAGS_NOCHECK)) { /* TODO: need checksum verification */ } if (sc->flags & NVRAM_FLAGS_GENERIC) pair = (char*)nv+4; if (sc->flags & NVRAM_FLAGS_UBOOT) pair = (char*)nv+4; else if (sc->flags & NVRAM_FLAGS_BROADCOM) pair = (char*)nv+20; else pair = (char*)nv+4; for ( ; (u_int32_t)pair < ((u_int32_t)nv + size - 4); pair = pair + strlen(pair) + 1 + strlen(value) + 1 ) { if (pair && strlen(pair)) { #if 0 printf("ENV: %s\n", pair); #endif /* hint.nvram.0. */ assign = strchr(pair,'='); assign[0] = '\0'; value = assign+1; #if 1 if (bootverbose) printf("ENV: %s=%s\n", pair, value); #endif kern_setenv(pair, value); if (strcasecmp(pair, "WAN_MAC_ADDR") == 0) { /* Alias for MAC address of eth0 */ if (bootverbose) printf("ENV: aliasing " "WAN_MAC_ADDR to ethaddr" " = %s\n", value); kern_setenv("ethaddr", value); } else if (strcasecmp(pair, "LAN_MAC_ADDR") == 0){ /* Alias for MAC address of eth1 */ if (bootverbose) printf("ENV: aliasing " "LAN_MAC_ADDR to eth1addr" " = %s\n", value); kern_setenv("eth1addr", value); } if (strcmp(pair, "bootverbose") == 0) bootverbose = strtoul(value, 0, 0); if (strcmp(pair, "boothowto" ) == 0) boothowto = strtoul(value, 0, 0); } else break; } free(nv, M_DEVBUF); } bus_space_unmap(sc->bst, sc->bsh, NVRAM_MAX_SIZE); return (0); } static device_method_t nvram2env_methods[] = { /* Device interface */ DEVMETHOD(device_identify, nvram2env_identify), DEVMETHOD(device_probe, nvram2env_probe), DEVMETHOD(device_attach, nvram2env_attach), DEVMETHOD_END }; static driver_t nvram2env_driver = { "nvram2env", nvram2env_methods, sizeof(struct nvram2env_softc), }; static devclass_t nvram2env_devclass; DRIVER_MODULE(nvram2env, nexus, nvram2env_driver, nvram2env_devclass, 0, 0); Index: user/ngie/release-pkg-fix-tests/sys/dev/pci/pci.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/dev/pci/pci.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/dev/pci/pci.c (revision 299055) @@ -1,5868 +1,5871 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) #include #endif #include #include #include #include #ifdef PCI_IOV #include #include #endif #include #include #include #include #include "pcib_if.h" #include "pci_if.h" #define PCIR_IS_BIOS(cfg, reg) \ (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) static int pci_has_quirk(uint32_t devid, int quirk); static pci_addr_t pci_mapbase(uint64_t mapreg); static const char *pci_maptype(uint64_t mapreg); static int pci_maprange(uint64_t mapreg); static pci_addr_t pci_rombase(uint64_t mapreg); static int pci_romsize(uint64_t testval); static void pci_fixancient(pcicfgregs *cfg); static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); static int pci_porten(device_t dev); static int pci_memen(device_t dev); static void pci_assign_interrupt(device_t bus, device_t dev, int force_route); static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch); static int pci_probe(device_t dev); static int pci_attach(device_t dev); static int pci_detach(device_t dev); static void pci_load_vendor_data(void); static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc); static char *pci_describe_device(device_t dev); static int pci_modevent(module_t mod, int what, void *arg); static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg); static void pci_read_cap(device_t pcib, pcicfgregs *cfg); static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data); #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data); #endif static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); static void pci_mask_msix(device_t dev, u_int index); static void pci_unmask_msix(device_t dev, u_int index); static int pci_msi_blacklisted(void); static int pci_msix_blacklisted(void); static void pci_resume_msi(device_t dev); static void pci_resume_msix(device_t dev); static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq); static uint16_t pci_get_rid_method(device_t dev, device_t child); static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did); static device_method_t pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pci_probe), DEVMETHOD(device_attach, pci_attach), DEVMETHOD(device_detach, pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, pci_resume), /* Bus interface */ DEVMETHOD(bus_print_child, pci_print_child), DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), DEVMETHOD(bus_read_ivar, pci_read_ivar), DEVMETHOD(bus_write_ivar, pci_write_ivar), DEVMETHOD(bus_driver_added, pci_driver_added), DEVMETHOD(bus_setup_intr, pci_setup_intr), DEVMETHOD(bus_teardown_intr, pci_teardown_intr), DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), DEVMETHOD(bus_get_resource_list,pci_get_resource_list), DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), DEVMETHOD(bus_delete_resource, pci_delete_resource), DEVMETHOD(bus_alloc_resource, pci_alloc_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_release_resource, pci_release_resource), DEVMETHOD(bus_activate_resource, pci_activate_resource), DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), DEVMETHOD(bus_child_deleted, pci_child_deleted), DEVMETHOD(bus_child_detached, pci_child_detached), DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), DEVMETHOD(bus_child_location_str, pci_child_location_str_method), DEVMETHOD(bus_remap_intr, pci_remap_intr_method), DEVMETHOD(bus_suspend_child, pci_suspend_child), DEVMETHOD(bus_resume_child, pci_resume_child), DEVMETHOD(bus_rescan, pci_rescan_method), /* PCI interface */ DEVMETHOD(pci_read_config, pci_read_config_method), DEVMETHOD(pci_write_config, pci_write_config_method), DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), DEVMETHOD(pci_enable_io, pci_enable_io_method), DEVMETHOD(pci_disable_io, pci_disable_io_method), DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), DEVMETHOD(pci_find_cap, pci_find_cap_method), DEVMETHOD(pci_find_extcap, pci_find_extcap_method), DEVMETHOD(pci_find_htcap, pci_find_htcap_method), DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), DEVMETHOD(pci_enable_msi, pci_enable_msi_method), DEVMETHOD(pci_enable_msix, pci_enable_msix_method), DEVMETHOD(pci_disable_msi, pci_disable_msi_method), DEVMETHOD(pci_remap_msix, pci_remap_msix_method), DEVMETHOD(pci_release_msi, pci_release_msi_method), DEVMETHOD(pci_msi_count, pci_msi_count_method), DEVMETHOD(pci_msix_count, pci_msix_count_method), DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method), DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method), DEVMETHOD(pci_get_rid, pci_get_rid_method), DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method), DEVMETHOD(pci_child_added, pci_child_added_method), #ifdef PCI_IOV DEVMETHOD(pci_iov_attach, pci_iov_attach_method), DEVMETHOD(pci_iov_detach, pci_iov_detach_method), DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method), #endif DEVMETHOD_END }; DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); static devclass_t pci_devclass; DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL); MODULE_VERSION(pci, 1); static char *pci_vendordata; static size_t pci_vendordata_size; struct pci_quirk { uint32_t devid; /* Vendor/device of the card */ int type; #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ int arg1; int arg2; }; static const struct pci_quirk pci_quirks[] = { /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* As does the Serverworks OSB4 (the SMBus mapping register) */ { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, /* * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge * or the CMIC-SL (AKA ServerWorks GC_LE). */ { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work on earlier Intel chipsets including * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. */ { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI doesn't work with devices behind the AMD 8131 HT-PCIX * bridge. */ { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, /* * MSI-X allocation doesn't work properly for devices passed through * by VMware up to at least ESXi 5.1. */ { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */ { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */ /* * Some virtualization environments emulate an older chipset * but support MSI just fine. QEMU uses the Intel 82440. */ { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, /* * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus * controller depending on SoftPciRst register (PM_IO 0x55 [7]). * It prevents us from attaching hpet(4) when the bit is unset. * Note this quirk only affects SB600 revision A13 and earlier. * For SB600 A21 and later, firmware must set the bit to hide it. * For SB700 and later, it is unused and hardcoded to zero. */ { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, /* * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the * command register is set. */ { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't * issue MSI interrupts with PCIM_CMD_INTxDIS set either. */ { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ { 0 } }; /* map register information */ #define PCI_MAPMEM 0x01 /* memory map */ #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ #define PCI_MAPPORT 0x04 /* port map */ struct devlist pci_devq; uint32_t pci_generation; uint32_t pci_numdevs = 0; static int pcie_chipset, pcix_chipset; /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters"); static int pci_enable_io_modes = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not\n\ enable these bits correctly. We'd like to do this all the time, but there\n\ are some peripherals that this causes problems with."); static int pci_do_realloc_bars = 0; SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, &pci_do_realloc_bars, 0, "Attempt to allocate a new range for any BARs whose original " "firmware-assigned ranges fail to allocate during the initial device scan."); static int pci_do_power_nodriver = 0; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, &pci_do_power_nodriver, 0, "Place a function into D3 state when no driver attaches to it. 0 means\n\ disable. 1 means conservatively place devices into D3 state. 2 means\n\ aggressively place devices into D3 state. 3 means put absolutely everything\n\ in D3 state."); int pci_do_power_resume = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, &pci_do_power_resume, 1, "Transition from D3 -> D0 on resume."); int pci_do_power_suspend = 1; SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, &pci_do_power_suspend, 1, "Transition from D0 -> D3 on suspend."); static int pci_do_msi = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, "Enable support for MSI interrupts"); static int pci_do_msix = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, "Enable support for MSI-X interrupts"); static int pci_honor_msi_blacklist = 1; SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); #if defined(__i386__) || defined(__amd64__) static int pci_usb_takeover = 1; #else static int pci_usb_takeover = 0; #endif SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\ Disable this if you depend on BIOS emulation of USB devices, that is\n\ you use USB devices (like keyboard or mouse) but do not load USB drivers"); static int pci_clear_bars; SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, "Ignore firmware-assigned resources for BARs."); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static int pci_clear_buses; SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, "Ignore firmware-assigned bus numbers."); #endif static int pci_enable_ari = 1; SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 0, "Enable support for PCIe Alternative RID Interpretation"); static int pci_has_quirk(uint32_t devid, int quirk) { const struct pci_quirk *q; for (q = &pci_quirks[0]; q->devid; q++) { if (q->devid == devid && q->type == quirk) return (1); } return (0); } /* Find a device_t by bus/slot/function in domain 0 */ device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) { return (pci_find_dbsf(0, bus, slot, func)); } /* Find a device_t by domain/bus/slot/function */ device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.domain == domain) && (dinfo->cfg.bus == bus) && (dinfo->cfg.slot == slot) && (dinfo->cfg.func == func)) { return (dinfo->cfg.dev); } } return (NULL); } /* Find a device_t by vendor/device ID */ device_t pci_find_device(uint16_t vendor, uint16_t device) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if ((dinfo->cfg.vendor == vendor) && (dinfo->cfg.device == device)) { return (dinfo->cfg.dev); } } return (NULL); } device_t pci_find_class(uint8_t class, uint8_t subclass) { struct pci_devinfo *dinfo; STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { if (dinfo->cfg.baseclass == class && dinfo->cfg.subclass == subclass) { return (dinfo->cfg.dev); } } return (NULL); } static int pci_printf(pcicfgregs *cfg, const char *fmt, ...) { va_list ap; int retval; retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, cfg->func); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /* return base address of memory or port map */ static pci_addr_t pci_mapbase(uint64_t mapreg) { if (PCI_BAR_MEM(mapreg)) return (mapreg & PCIM_BAR_MEM_BASE); else return (mapreg & PCIM_BAR_IO_BASE); } /* return map type of memory or port map */ static const char * pci_maptype(uint64_t mapreg) { if (PCI_BAR_IO(mapreg)) return ("I/O Port"); if (mapreg & PCIM_BAR_MEM_PREFETCH) return ("Prefetchable Memory"); return ("Memory"); } /* return log2 of map size decoded for memory or port map */ int pci_mapsize(uint64_t testval) { int ln2size; testval = pci_mapbase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return base address of device ROM */ static pci_addr_t pci_rombase(uint64_t mapreg) { return (mapreg & PCIM_BIOS_ADDR_MASK); } /* return log2 of map size decided for device ROM */ static int pci_romsize(uint64_t testval) { int ln2size; testval = pci_rombase(testval); ln2size = 0; if (testval != 0) { while ((testval & 1) == 0) { ln2size++; testval >>= 1; } } return (ln2size); } /* return log2 of address range supported by map register */ static int pci_maprange(uint64_t mapreg) { int ln2range = 0; if (PCI_BAR_IO(mapreg)) ln2range = 32; else switch (mapreg & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_32: ln2range = 32; break; case PCIM_BAR_MEM_1MB: ln2range = 20; break; case PCIM_BAR_MEM_64: ln2range = 64; break; } return (ln2range); } /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ static void pci_fixancient(pcicfgregs *cfg) { if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) return; /* PCI to PCI bridges use header type 1 */ if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; } /* extract header type specific config data */ static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: cfg->subvendor = REG(PCIR_SUBVEND_0, 2); cfg->subdevice = REG(PCIR_SUBDEV_0, 2); cfg->mingnt = REG(PCIR_MINGNT, 1); cfg->maxlat = REG(PCIR_MAXLAT, 1); cfg->nummaps = PCI_MAXMAPS_0; break; case PCIM_HDRTYPE_BRIDGE: cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); cfg->nummaps = PCI_MAXMAPS_1; break; case PCIM_HDRTYPE_CARDBUS: cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); cfg->subvendor = REG(PCIR_SUBVEND_2, 2); cfg->subdevice = REG(PCIR_SUBDEV_2, 2); cfg->nummaps = PCI_MAXMAPS_2; break; } #undef REG } /* read configuration header into pcicfgregs structure */ struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f) { #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) uint16_t vid, did; vid = REG(PCIR_VENDOR, 2); did = REG(PCIR_DEVICE, 2); if (vid != 0xffff) return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did)); return (NULL); } struct pci_devinfo * pci_alloc_devinfo_method(device_t dev) { return (malloc(sizeof(struct pci_devinfo), M_DEVBUF, M_WAITOK | M_ZERO)); } static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did) { struct pci_devinfo *devlist_entry; pcicfgregs *cfg; devlist_entry = PCI_ALLOC_DEVINFO(bus); cfg = &devlist_entry->cfg; cfg->domain = d; cfg->bus = b; cfg->slot = s; cfg->func = f; cfg->vendor = vid; cfg->device = did; cfg->cmdreg = REG(PCIR_COMMAND, 2); cfg->statreg = REG(PCIR_STATUS, 2); cfg->baseclass = REG(PCIR_CLASS, 1); cfg->subclass = REG(PCIR_SUBCLASS, 1); cfg->progif = REG(PCIR_PROGIF, 1); cfg->revid = REG(PCIR_REVID, 1); cfg->hdrtype = REG(PCIR_HDRTYPE, 1); cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); cfg->lattimer = REG(PCIR_LATTIMER, 1); cfg->intpin = REG(PCIR_INTPIN, 1); cfg->intline = REG(PCIR_INTLINE, 1); cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; cfg->hdrtype &= ~PCIM_MFDEV; STAILQ_INIT(&cfg->maps); cfg->iov = NULL; pci_fixancient(cfg); pci_hdrtypedata(pcib, b, s, f, cfg); if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) pci_read_cap(pcib, cfg); STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links); devlist_entry->conf.pc_sel.pc_domain = cfg->domain; devlist_entry->conf.pc_sel.pc_bus = cfg->bus; devlist_entry->conf.pc_sel.pc_dev = cfg->slot; devlist_entry->conf.pc_sel.pc_func = cfg->func; devlist_entry->conf.pc_hdr = cfg->hdrtype; devlist_entry->conf.pc_subvendor = cfg->subvendor; devlist_entry->conf.pc_subdevice = cfg->subdevice; devlist_entry->conf.pc_vendor = cfg->vendor; devlist_entry->conf.pc_device = cfg->device; devlist_entry->conf.pc_class = cfg->baseclass; devlist_entry->conf.pc_subclass = cfg->subclass; devlist_entry->conf.pc_progif = cfg->progif; devlist_entry->conf.pc_revid = cfg->revid; pci_numdevs++; pci_generation++; return (devlist_entry); } #undef REG static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ cfg->ea.ea_location + (n), w) int num_ent; int ptr; int a, b; uint32_t val; int ent_size; uint32_t dw[4]; uint64_t base, max_offset; struct pci_ea_entry *eae; if (cfg->ea.ea_location == 0) return; STAILQ_INIT(&cfg->ea.ea_entries); /* Determine the number of entries */ num_ent = REG(PCIR_EA_NUM_ENT, 2); num_ent &= PCIM_EA_NUM_ENT_MASK; /* Find the first entry to care of */ ptr = PCIR_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) ptr += 4; for (a = 0; a < num_ent; a++) { eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO); eae->eae_cfg_offset = cfg->ea.ea_location + ptr; /* Read a number of dwords in the entry */ val = REG(ptr, 4); ptr += 4; ent_size = (val & PCIM_EA_ES); for (b = 0; b < ent_size; b++) { dw[b] = REG(ptr, 4); ptr += 4; } eae->eae_flags = val; eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; base = dw[0] & PCIM_EA_FIELD_MASK; max_offset = dw[1] | ~PCIM_EA_FIELD_MASK; b = 2; if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { base |= (uint64_t)dw[b] << 32UL; b++; } if (((dw[1] & PCIM_EA_IS_64) != 0) && (b < ent_size)) { max_offset |= (uint64_t)dw[b] << 32UL; b++; } eae->eae_base = base; eae->eae_max_offset = max_offset; STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); if (bootverbose) { printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n", cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); } } } #undef REG static void pci_read_cap(device_t pcib, pcicfgregs *cfg) { #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) uint64_t addr; #endif uint32_t val; int ptr, nextptr, ptrptr; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptrptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ break; default: return; /* no extended capabilities support */ } nextptr = REG(ptrptr, 1); /* sanity check? */ /* * Read capability entries. */ while (nextptr != 0) { /* Sanity check */ if (nextptr > 255) { printf("illegal PCI extended capability offset %d\n", nextptr); return; } /* Find the next entry */ ptr = nextptr; nextptr = REG(ptr + PCICAP_NEXTPTR, 1); /* Process this entry */ switch (REG(ptr + PCICAP_ID, 1)) { case PCIY_PMG: /* PCI power management */ if (cfg->pp.pp_cap == 0) { cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; if ((nextptr - ptr) > PCIR_POWER_DATA) cfg->pp.pp_data = ptr + PCIR_POWER_DATA; } break; case PCIY_HT: /* HyperTransport */ /* Determine HT-specific capability type. */ val = REG(ptr + PCIR_HT_COMMAND, 2); if ((val & 0xe000) == PCIM_HTCAP_SLAVE) cfg->ht.ht_slave = ptr; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) switch (val & PCIM_HTCMD_CAP_MASK) { case PCIM_HTCAP_MSI_MAPPING: if (!(val & PCIM_HTCMD_MSI_FIXED)) { /* Sanity check the mapping window. */ addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 4); addr <<= 32; addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 4); if (addr != MSI_INTEL_ADDR_BASE) device_printf(pcib, "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", cfg->domain, cfg->bus, cfg->slot, cfg->func, (long long)addr); } else addr = MSI_INTEL_ADDR_BASE; cfg->ht.ht_msimap = ptr; cfg->ht.ht_msictrl = val; cfg->ht.ht_msiaddr = addr; break; } #endif break; case PCIY_MSI: /* PCI MSI */ cfg->msi.msi_location = ptr; cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & PCIM_MSICTRL_MMC_MASK)>>1); break; case PCIY_MSIX: /* PCI MSI-X */ cfg->msix.msix_location = ptr; cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1; val = REG(ptr + PCIR_MSIX_TABLE, 4); cfg->msix.msix_table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; val = REG(ptr + PCIR_MSIX_PBA, 4); cfg->msix.msix_pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK); cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; break; case PCIY_VPD: /* PCI Vital Product Data */ cfg->vpd.vpd_reg = ptr; break; case PCIY_SUBVENDOR: /* Should always be true. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) { val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); cfg->subvendor = val & 0xffff; cfg->subdevice = val >> 16; } break; case PCIY_PCIX: /* PCI-X */ /* * Assume we have a PCI-X chipset if we have * at least one PCI-PCI bridge with a PCI-X * capability. Note that some systems with * PCI-express or HT chipsets might match on * this check as well. */ if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) pcix_chipset = 1; cfg->pcix.pcix_location = ptr; break; case PCIY_EXPRESS: /* PCI-express */ /* * Assume we have a PCI-express chipset if we have * at least one PCI-express device. */ pcie_chipset = 1; cfg->pcie.pcie_location = ptr; val = REG(ptr + PCIER_FLAGS, 2); cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; break; case PCIY_EA: /* Enhanced Allocation */ cfg->ea.ea_location = ptr; pci_ea_fill_info(pcib, cfg); break; default: break; } } #if defined(__powerpc__) /* * Enable the MSI mapping window for all HyperTransport * slaves. PCI-PCI bridges have their windows enabled via * PCIB_MAP_MSI(). */ if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { device_printf(pcib, "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 2); } #endif /* REG and WREG use carry through to next functions */ } /* * PCI Vital Product Data */ #define PCI_VPD_TIMEOUT 1000000 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); return (0); } #if 0 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) { int count = PCI_VPD_TIMEOUT; KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { if (--count < 0) return (ENXIO); DELAY(1); /* limit looping */ } return (0); } #endif #undef PCI_VPD_TIMEOUT struct vpd_readstate { device_t pcib; pcicfgregs *cfg; uint32_t val; int bytesinval; int off; uint8_t cksum; }; static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) { uint32_t reg; uint8_t byte; if (vrs->bytesinval == 0) { if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) return (ENXIO); vrs->val = le32toh(reg); vrs->off += 4; byte = vrs->val & 0xff; vrs->bytesinval = 3; } else { vrs->val = vrs->val >> 8; byte = vrs->val & 0xff; vrs->bytesinval--; } vrs->cksum += byte; *data = byte; return (0); } static void pci_read_vpd(device_t pcib, pcicfgregs *cfg) { struct vpd_readstate vrs; int state; int name; int remain; int i; int alloc, off; /* alloc/off for RO/W arrays */ int cksumvalid; int dflen; uint8_t byte; uint8_t byte2; /* init vpd reader */ vrs.bytesinval = 0; vrs.off = 0; vrs.pcib = pcib; vrs.cfg = cfg; vrs.cksum = 0; state = 0; name = remain = i = 0; /* shut up stupid gcc */ alloc = off = 0; /* shut up stupid gcc */ dflen = 0; /* shut up stupid gcc */ cksumvalid = -1; while (state >= 0) { if (vpd_nextbyte(&vrs, &byte)) { state = -2; break; } #if 0 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, vrs.off, vrs.bytesinval, byte, state, remain, name, i); #endif switch (state) { case 0: /* item name */ if (byte & 0x80) { if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } remain |= byte2 << 8; if (remain > (0x7f*4 - vrs.off)) { state = -1; pci_printf(cfg, "invalid VPD data, remain %#x\n", remain); } name = byte & 0x7f; } else { remain = byte & 0x7; name = (byte >> 3) & 0xf; } switch (name) { case 0x2: /* String */ cfg->vpd.vpd_ident = malloc(remain + 1, M_DEVBUF, M_WAITOK); i = 0; state = 1; break; case 0xf: /* End */ state = -1; break; case 0x10: /* VPD-R */ alloc = 8; off = 0; cfg->vpd.vpd_ros = malloc(alloc * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 2; break; case 0x11: /* VPD-W */ alloc = 8; off = 0; cfg->vpd.vpd_w = malloc(alloc * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 5; break; default: /* Invalid data, abort */ state = -1; break; } break; case 1: /* Identifier String */ cfg->vpd.vpd_ident[i++] = byte; remain--; if (remain == 0) { cfg->vpd.vpd_ident[i] = '\0'; state = 0; } break; case 2: /* VPD-R Keyword Header */ if (off == alloc) { cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_ros[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_ros[off].len = dflen = byte2; if (dflen == 0 && strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0) { /* * if this happens, we can't trust the rest * of the VPD. */ pci_printf(cfg, "bad keyword length: %d\n", dflen); cksumvalid = 0; state = -1; break; } else if (dflen == 0) { cfg->vpd.vpd_ros[off].value = malloc(1 * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); cfg->vpd.vpd_ros[off].value[0] = '\x00'; } else cfg->vpd.vpd_ros[off].value = malloc( (dflen + 1) * sizeof(*cfg->vpd.vpd_ros[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 3's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 2; else state = 3; break; case 3: /* VPD-R Keyword Value */ cfg->vpd.vpd_ros[off].value[i++] = byte; if (strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 2) == 0 && cksumvalid == -1) { if (vrs.cksum == 0) cksumvalid = 1; else { if (bootverbose) pci_printf(cfg, "bad VPD cksum, remain %hhu\n", vrs.cksum); cksumvalid = 0; state = -1; break; } } dflen--; remain--; /* keep in sync w/ state 2's transistions */ if (dflen == 0) cfg->vpd.vpd_ros[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_rocnt = off; cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, off * sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 2; break; case 4: remain--; if (remain == 0) state = 0; break; case 5: /* VPD-W Keyword Header */ if (off == alloc) { cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); } cfg->vpd.vpd_w[off].keyword[0] = byte; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].keyword[1] = byte2; if (vpd_nextbyte(&vrs, &byte2)) { state = -2; break; } cfg->vpd.vpd_w[off].len = dflen = byte2; cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; cfg->vpd.vpd_w[off].value = malloc((dflen + 1) * sizeof(*cfg->vpd.vpd_w[off].value), M_DEVBUF, M_WAITOK); remain -= 3; i = 0; /* keep in sync w/ state 6's transistions */ if (dflen == 0 && remain == 0) state = 0; else if (dflen == 0) state = 5; else state = 6; break; case 6: /* VPD-W Keyword Value */ cfg->vpd.vpd_w[off].value[i++] = byte; dflen--; remain--; /* keep in sync w/ state 5's transistions */ if (dflen == 0) cfg->vpd.vpd_w[off++].value[i++] = '\0'; if (dflen == 0 && remain == 0) { cfg->vpd.vpd_wcnt = off; cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, off * sizeof(*cfg->vpd.vpd_w), M_DEVBUF, M_WAITOK | M_ZERO); state = 0; } else if (dflen == 0) state = 5; break; default: pci_printf(cfg, "invalid state: %d\n", state); state = -1; break; } } if (cksumvalid == 0 || state < -1) { /* read-only data bad, clean up */ if (cfg->vpd.vpd_ros != NULL) { for (off = 0; cfg->vpd.vpd_ros[off].value; off++) free(cfg->vpd.vpd_ros[off].value, M_DEVBUF); free(cfg->vpd.vpd_ros, M_DEVBUF); cfg->vpd.vpd_ros = NULL; } } if (state < -1) { /* I/O error, clean up */ pci_printf(cfg, "failed to read VPD data.\n"); if (cfg->vpd.vpd_ident != NULL) { free(cfg->vpd.vpd_ident, M_DEVBUF); cfg->vpd.vpd_ident = NULL; } if (cfg->vpd.vpd_w != NULL) { for (off = 0; cfg->vpd.vpd_w[off].value; off++) free(cfg->vpd.vpd_w[off].value, M_DEVBUF); free(cfg->vpd.vpd_w, M_DEVBUF); cfg->vpd.vpd_w = NULL; } } cfg->vpd.vpd_cached = 1; #undef REG #undef WREG } int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); *identptr = cfg->vpd.vpd_ident; if (*identptr == NULL) return (ENXIO); return (0); } int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; int i; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(dev), cfg); for (i = 0; i < cfg->vpd.vpd_rocnt; i++) if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { *vptr = cfg->vpd.vpd_ros[i].value; return (0); } *vptr = NULL; return (ENXIO); } struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); return (&cfg->vpd); } /* * Find the requested HyperTransport capability and return the offset * in configuration space via the pointer provided. The function * returns 0 on success and an error code otherwise. */ int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) { int ptr, error; uint16_t val; error = pci_find_cap(child, PCIY_HT, &ptr); if (error) return (error); /* * Traverse the capabilities list checking each HT capability * to see if it matches the requested HT capability. */ while (ptr != 0) { val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); if (capability == PCIM_HTCAP_SLAVE || capability == PCIM_HTCAP_HOST) val &= 0xe000; else val &= PCIM_HTCMD_CAP_MASK; if (val == capability) { if (capreg != NULL) *capreg = ptr; return (0); } /* Skip to the next HT capability. */ while (ptr != 0) { ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); if (pci_read_config(child, ptr + PCICAP_ID, 1) == PCIY_HT) break; } } return (ENOENT); } /* * Find the requested capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; u_int32_t status; u_int8_t ptr; /* * Check the CAP_LIST bit of the PCI status register first. */ status = pci_read_config(child, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (ENXIO); /* * Determine the start pointer of the capabilities list. */ switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: case PCIM_HDRTYPE_BRIDGE: ptr = PCIR_CAP_PTR; break; case PCIM_HDRTYPE_CARDBUS: ptr = PCIR_CAP_PTR_2; break; default: /* XXX: panic? */ return (ENXIO); /* no extended capabilities support */ } ptr = pci_read_config(child, ptr, 1); /* * Traverse the capabilities list. */ while (ptr != 0) { if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); } return (ENOENT); } /* * Find the requested extended capability and return the offset in * configuration space via the pointer provided. The function returns * 0 on success and an error code otherwise. */ int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint32_t ecap; uint16_t ptr; /* Only supported for PCI-express devices. */ if (cfg->pcie.pcie_location == 0) return (ENXIO); ptr = PCIR_EXTCAP; ecap = pci_read_config(child, ptr, 4); if (ecap == 0xffffffff || ecap == 0) return (ENOENT); for (;;) { if (PCI_EXTCAP_ID(ecap) == capability) { if (capreg != NULL) *capreg = ptr; return (0); } ptr = PCI_EXTCAP_NEXTPTR(ecap); if (ptr == 0) break; ecap = pci_read_config(child, ptr, 4); } return (ENOENT); } /* * Support for MSI-X message interrupts. */ void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16; bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); bus_write_4(msix->msix_table_res, offset + 4, address >> 32); bus_write_4(msix->msix_table_res, offset + 8, data); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_mask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_msgnum > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); if (!(val & PCIM_MSIX_VCTRL_MASK)) { val |= PCIM_MSIX_VCTRL_MASK; bus_write_4(msix->msix_table_res, offset, val); } } void pci_unmask_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, val; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_table_offset + index * 16 + 12; val = bus_read_4(msix->msix_table_res, offset); if (val & PCIM_MSIX_VCTRL_MASK) { val &= ~PCIM_MSIX_VCTRL_MASK; bus_write_4(msix->msix_table_res, offset, val); } } int pci_pending_msix(device_t dev, u_int index) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; uint32_t offset, bit; KASSERT(msix->msix_table_len > index, ("bogus index")); offset = msix->msix_pba_offset + (index / 32) * 4; bit = 1 << index % 32; return (bus_read_4(msix->msix_pba_res, offset) & bit); } /* * Restore MSI-X registers and table during resume. If MSI-X is * enabled then walk the virtual table to restore the actual MSI-X * table. */ static void pci_resume_msix(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct msix_table_entry *mte; struct msix_vector *mv; int i; if (msix->msix_alloc > 0) { /* First, mask all vectors. */ for (i = 0; i < msix->msix_msgnum; i++) pci_mask_msix(dev, i); /* Second, program any messages with at least one handler. */ for (i = 0; i < msix->msix_table_len; i++) { mte = &msix->msix_table[i]; if (mte->mte_vector == 0 || mte->mte_handlers == 0) continue; mv = &msix->msix_vectors[mte->mte_vector - 1]; pci_enable_msix(dev, i, mv->mv_address, mv->mv_data); pci_unmask_msix(dev, i); } } pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); } /* * Attempt to allocate *count MSI-X messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at rid 1. */ int pci_alloc_msix_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irq, max; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI-X is blacklisted for this system, fail. */ if (pci_msix_blacklisted()) return (ENXIO); /* MSI-X capability present? */ if (cfg->msix.msix_location == 0 || !pci_do_msix) return (ENODEV); /* Make sure the appropriate BARs are mapped. */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_table_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); cfg->msix.msix_table_res = rle->res; if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, cfg->msix.msix_pba_bar); if (rle == NULL || rle->res == NULL || !(rman_get_flags(rle->res) & RF_ACTIVE)) return (ENXIO); } cfg->msix.msix_pba_res = rle->res; if (bootverbose) device_printf(child, "attempting to allocate %d MSI-X vectors (%d supported)\n", *count, cfg->msix.msix_msgnum); max = min(*count, cfg->msix.msix_msgnum); for (i = 0; i < max; i++) { /* Allocate a message. */ error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); if (error) { if (i == 0) return (error); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } actual = i; if (bootverbose) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); if (actual == 1) device_printf(child, "using IRQ %ju for MSI-X\n", rle->start); else { int run; /* * Be fancy and try to print contiguous runs of * IRQ values as ranges. 'irq' is the previous IRQ. * 'run' is true if we are in a range. */ device_printf(child, "using IRQs %ju", rle->start); irq = rle->start; run = 0; for (i = 1; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Still in a run? */ if (rle->start == irq + 1) { run = 1; irq++; continue; } /* Finish previous range. */ if (run) { printf("-%d", irq); run = 0; } /* Start new range. */ printf(",%ju", rle->start); irq = rle->start; } /* Unfinished range? */ if (run) printf("-%d", irq); printf(" for MSI-X\n"); } } /* Mask all vectors. */ for (i = 0; i < cfg->msix.msix_msgnum; i++) pci_mask_msix(child, i); /* Allocate and initialize vector data and virtual table. */ cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, M_DEVBUF, M_WAITOK | M_ZERO); cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < actual; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); cfg->msix.msix_vectors[i].mv_irq = rle->start; cfg->msix.msix_table[i].mte_vector = i + 1; } /* Update control register to enable MSI-X. */ cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, cfg->msix.msix_ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msix.msix_alloc = actual; cfg->msix.msix_table_len = actual; *count = actual; return (0); } /* * By default, pci_alloc_msix() will assign the allocated IRQ * resources consecutively to the first N messages in the MSI-X table. * However, device drivers may want to use different layouts if they * either receive fewer messages than they asked for, or they wish to * populate the MSI-X table sparsely. This method allows the driver * to specify what layout it wants. It must be called after a * successful pci_alloc_msix() but before any of the associated * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). * * The 'vectors' array contains 'count' message vectors. The array * maps directly to the MSI-X table in that index 0 in the array * specifies the vector for the first message in the MSI-X table, etc. * The vector value in each array index can either be 0 to indicate * that no vector should be assigned to a message slot, or it can be a * number from 1 to N (where N is the count returned from a * succcessful call to pci_alloc_msix()) to indicate which message * vector (IRQ) to be used for the corresponding message. * * On successful return, each message with a non-zero vector will have * an associated SYS_RES_IRQ whose rid is equal to the array index + * 1. Additionally, if any of the IRQs allocated via the previous * call to pci_alloc_msix() are not used in the mapping, those IRQs * will be freed back to the system automatically. * * For example, suppose a driver has a MSI-X table with 6 messages and * asks for 6 messages, but pci_alloc_msix() only returns a count of * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and * C. After the call to pci_alloc_msix(), the device will be setup to * have an MSI-X table of ABC--- (where - means no vector assigned). * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, * then the MSI-X table will look like A-AB-B, and the 'C' vector will * be freed back to the system. This device will also have valid * SYS_RES_IRQ rids of 1, 3, 4, and 6. * * In any case, the SYS_RES_IRQ rid X will always map to the message * at MSI-X table index X - 1 and will only be valid if a vector is * assigned to that table entry. */ int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i, irq, j, *used; /* * Have to have at least one message in the table but the * table can't be bigger than the actual MSI-X table in the * device. */ if (count == 0 || count > msix->msix_msgnum) return (EINVAL); /* Sanity check the vectors. */ for (i = 0; i < count; i++) if (vectors[i] > msix->msix_alloc) return (EINVAL); /* * Make sure there aren't any holes in the vectors to be used. * It's a big pain to support it, and it doesn't really make * sense anyway. Also, at least one vector must be used. */ used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) if (vectors[i] != 0) used[vectors[i] - 1] = 1; for (i = 0; i < msix->msix_alloc - 1; i++) if (used[i] == 0 && used[i + 1] == 1) { free(used, M_DEVBUF); return (EINVAL); } if (used[0] != 1) { free(used, M_DEVBUF); return (EINVAL); } /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) { free(used, M_DEVBUF); return (EBUSY); } rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) { free(used, M_DEVBUF); return (EBUSY); } } /* Free the existing resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } /* * Build the new virtual table keeping track of which vectors are * used. */ free(msix->msix_table, M_DEVBUF); msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < count; i++) msix->msix_table[i].mte_vector = vectors[i]; msix->msix_table_len = count; /* Free any unused IRQs and resize the vectors array if necessary. */ j = msix->msix_alloc - 1; if (used[j] == 0) { struct msix_vector *vec; while (used[j] == 0) { PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[j].mv_irq); j--; } vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, M_WAITOK); bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * (j + 1)); free(msix->msix_vectors, M_DEVBUF); msix->msix_vectors = vec; msix->msix_alloc = j + 1; } free(used, M_DEVBUF); /* Map the IRQs onto the rids. */ for (i = 0; i < count; i++) { if (vectors[i] == 0) continue; irq = msix->msix_vectors[vectors[i] - 1].mv_irq; resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, irq, 1); } if (bootverbose) { device_printf(child, "Remapped MSI-X IRQs as: "); for (i = 0; i < count; i++) { if (i != 0) printf(", "); if (vectors[i] == 0) printf("---"); else printf("%d", msix->msix_vectors[vectors[i] - 1].mv_irq); } printf("\n"); } return (0); } static int pci_release_msix(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; struct resource_list_entry *rle; int i; /* Do we have any messages to release? */ if (msix->msix_alloc == 0) return (ENODEV); /* Make sure none of the resources are allocated. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; if (msix->msix_table[i].mte_handlers > 0) return (EBUSY); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing resource")); if (rle->res != NULL) return (EBUSY); } /* Update control register to disable MSI-X. */ msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, msix->msix_ctrl, 2); /* Free the resource list entries. */ for (i = 0; i < msix->msix_table_len; i++) { if (msix->msix_table[i].mte_vector == 0) continue; resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); } free(msix->msix_table, M_DEVBUF); msix->msix_table_len = 0; /* Release the IRQs. */ for (i = 0; i < msix->msix_alloc; i++) PCIB_RELEASE_MSIX(device_get_parent(dev), child, msix->msix_vectors[i].mv_irq); free(msix->msix_vectors, M_DEVBUF); msix->msix_alloc = 0; return (0); } /* * Return the max supported MSI-X messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msix() can return. * Thus, it is subject to the tunables, etc. */ int pci_msix_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_msgnum); return (0); } int pci_msix_pba_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_pba_bar); return (-1); } int pci_msix_table_bar_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msix *msix = &dinfo->cfg.msix; if (pci_do_msix && msix->msix_location != 0) return (msix->msix_table_bar); return (-1); } /* * HyperTransport MSI mapping control */ void pci_ht_map_msi(device_t dev, uint64_t addr) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_ht *ht = &dinfo->cfg.ht; if (!ht->ht_msimap) return; if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && ht->ht_msiaddr >> 20 == addr >> 20) { /* Enable MSI -> HT mapping. */ ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { /* Disable MSI -> HT mapping. */ ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, ht->ht_msictrl, 2); } } int pci_get_max_read_req(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= PCIEM_CTL_MAX_READ_REQUEST; val >>= 12; return (1 << (val + 7)); } int pci_set_max_read_req(device_t dev, int size) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; uint16_t val; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return (0); if (size < 128) size = 128; if (size > 4096) size = 4096; size = (1 << (fls(size) - 1)); val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); val &= ~PCIEM_CTL_MAX_READ_REQUEST; val |= (fls(size) - 8) << 12; pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); return (size); } uint32_t pcie_read_config(device_t dev, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } return (pci_read_config(dev, cap + reg, width)); } void pcie_write_config(device_t dev, int reg, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) return; pci_write_config(dev, cap + reg, value, width); } /* * Adjusts a PCI-e capability register by clearing the bits in mask * and setting the bits in (value & mask). Bits not set in mask are * not adjusted. * * Returns the old value on success or all ones on failure. */ uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width) { struct pci_devinfo *dinfo = device_get_ivars(dev); uint32_t old, new; int cap; cap = dinfo->cfg.pcie.pcie_location; if (cap == 0) { if (width == 2) return (0xffff); return (0xffffffff); } old = pci_read_config(dev, cap + reg, width); new = old & ~mask; new |= (value & mask); pci_write_config(dev, cap + reg, new, width); return (old); } /* * Support for MSI message signalled interrupts. */ void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Write data and address values. */ pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 2); /* Enable MSI in the control register. */ msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Enable MSI -> HT mapping. */ pci_ht_map_msi(child, address); } void pci_disable_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; /* Disable MSI -> HT mapping. */ pci_ht_map_msi(child, 0); /* Disable MSI in the control register. */ msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } /* * Restore MSI registers during resume. If MSI is enabled then * restore the data and address registers in addition to the control * register. */ static void pci_resume_msi(device_t dev) { struct pci_devinfo *dinfo = device_get_ivars(dev); struct pcicfg_msi *msi = &dinfo->cfg.msi; uint64_t address; uint16_t data; if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { address = msi->msi_addr; data = msi->msi_data; pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, address & 0xffffffff, 4); if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, address >> 32, 4); pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, data, 2); } else pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, 2); } pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); } static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; int error, i, j; /* * Handle MSI first. We try to find this IRQ among our list * of MSI IRQs. If we find it, we request updated address and * data registers and apply the results. */ if (cfg->msi.msi_alloc > 0) { /* If we don't have any active handlers, nothing to do. */ if (cfg->msi.msi_handlers == 0) return (0); for (i = 0; i < cfg->msi.msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); if (rle->start == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); pci_disable_msi(dev); dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; pci_enable_msi(dev, addr, data); return (0); } } return (ENOENT); } /* * For MSI-X, we check to see if we have this IRQ. If we do, * we request the updated mapping info. If that works, we go * through all the slots that use this IRQ and update them. */ if (cfg->msix.msix_alloc > 0) { for (i = 0; i < cfg->msix.msix_alloc; i++) { mv = &cfg->msix.msix_vectors[i]; if (mv->mv_irq == irq) { error = PCIB_MAP_MSI(device_get_parent(bus), dev, irq, &addr, &data); if (error) return (error); mv->mv_address = addr; mv->mv_data = data; for (j = 0; j < cfg->msix.msix_table_len; j++) { mte = &cfg->msix.msix_table[j]; if (mte->mte_vector != i + 1) continue; if (mte->mte_handlers == 0) continue; pci_mask_msix(dev, j); pci_enable_msix(dev, j, addr, data); pci_unmask_msix(dev, j); } } } return (ENOENT); } return (ENOENT); } /* * Returns true if the specified device is blacklisted because MSI * doesn't work. */ int pci_msi_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); } /* * Determine if MSI is blacklisted globally on this system. Currently, * we just check for blacklisted chipsets as represented by the * host-PCI bridge at device 0:0:0. In the future, it may become * necessary to check other system attributes, such as the kenv values * that give the motherboard manufacturer and model number. */ static int pci_msi_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ if (!(pcie_chipset || pcix_chipset)) { if (vm_guest != VM_GUEST_NO) { /* * Whitelist older chipsets in virtual * machines known to support MSI. */ dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (!pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_ENABLE_MSI_VM)); } return (1); } dev = pci_find_bsf(0, 0, 0); if (dev != NULL) return (pci_msi_device_blacklisted(dev)); return (0); } /* * Returns true if the specified device is blacklisted because MSI-X * doesn't work. Note that this assumes that if MSI doesn't work, * MSI-X doesn't either. */ int pci_msix_device_blacklisted(device_t dev) { if (!pci_honor_msi_blacklist) return (0); if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_device_blacklisted(dev)); } /* * Determine if MSI-X is blacklisted globally on this system. If MSI * is blacklisted, assume that MSI-X is as well. Check for additional * chipsets where MSI works but MSI-X does not. */ static int pci_msix_blacklisted(void) { device_t dev; if (!pci_honor_msi_blacklist) return (0); dev = pci_find_bsf(0, 0, 0); if (dev != NULL && pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) return (1); return (pci_msi_blacklisted()); } /* * Attempt to allocate *count MSI messages. The actual number allocated is * returned in *count. After this function returns, each message will be * available to the driver as SYS_RES_IRQ resources starting at a rid 1. */ int pci_alloc_msi_method(device_t dev, device_t child, int *count) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; struct resource_list_entry *rle; int actual, error, i, irqs[32]; uint16_t ctrl; /* Don't let count == 0 get us into trouble. */ if (*count == 0) return (EINVAL); /* If rid 0 is allocated, then fail. */ rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); if (rle != NULL && rle->res != NULL) return (ENXIO); /* Already have allocated messages? */ if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) return (ENXIO); /* If MSI is blacklisted for this system, fail. */ if (pci_msi_blacklisted()) return (ENXIO); /* MSI capability present? */ if (cfg->msi.msi_location == 0 || !pci_do_msi) return (ENODEV); if (bootverbose) device_printf(child, "attempting to allocate %d MSI vectors (%d supported)\n", *count, cfg->msi.msi_msgnum); /* Don't ask for more than the device supports. */ actual = min(*count, cfg->msi.msi_msgnum); /* Don't ask for more than 32 messages. */ actual = min(actual, 32); /* MSI requires power of 2 number of messages. */ if (!powerof2(actual)) return (EINVAL); for (;;) { /* Try to allocate N messages. */ error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, actual, irqs); if (error == 0) break; if (actual == 1) return (error); /* Try N / 2. */ actual >>= 1; } /* * We now have N actual messages mapped onto SYS_RES_IRQ * resources in the irqs[] array, so add new resources * starting at rid 1. */ for (i = 0; i < actual; i++) resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irqs[i], irqs[i], 1); if (bootverbose) { if (actual == 1) device_printf(child, "using IRQ %d for MSI\n", irqs[0]); else { int run; /* * Be fancy and try to print contiguous runs * of IRQ values as ranges. 'run' is true if * we are in a range. */ device_printf(child, "using IRQs %d", irqs[0]); run = 0; for (i = 1; i < actual; i++) { /* Still in a run? */ if (irqs[i] == irqs[i - 1] + 1) { run = 1; continue; } /* Finish previous range. */ if (run) { printf("-%d", irqs[i - 1]); run = 0; } /* Start new range. */ printf(",%d", irqs[i]); } /* Unfinished range? */ if (run) printf("-%d", irqs[actual - 1]); printf(" for MSI\n"); } } /* Update control register with actual count. */ ctrl = cfg->msi.msi_ctrl; ctrl &= ~PCIM_MSICTRL_MME_MASK; ctrl |= (ffs(actual) - 1) << 4; cfg->msi.msi_ctrl = ctrl; pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); /* Update counts of alloc'd messages. */ cfg->msi.msi_alloc = actual; cfg->msi.msi_handlers = 0; *count = actual; return (0); } /* Release the MSI messages associated with this device. */ int pci_release_msi_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; struct resource_list_entry *rle; int error, i, irqs[32]; /* Try MSI-X first. */ error = pci_release_msix(dev, child); if (error != ENODEV) return (error); /* Do we have any messages to release? */ if (msi->msi_alloc == 0) return (ENODEV); KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); /* Make sure none of the resources are allocated. */ if (msi->msi_handlers > 0) return (EBUSY); for (i = 0; i < msi->msi_alloc; i++) { rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); KASSERT(rle != NULL, ("missing MSI resource")); if (rle->res != NULL) return (EBUSY); irqs[i] = rle->start; } /* Update control register with 0 count. */ KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), ("%s: MSI still enabled", __func__)); msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 2); /* Release the messages. */ PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); for (i = 0; i < msi->msi_alloc; i++) resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); /* Update alloc count. */ msi->msi_alloc = 0; msi->msi_addr = 0; msi->msi_data = 0; return (0); } /* * Return the max supported MSI messages this device supports. * Basically, assuming the MD code can alloc messages, this function * should return the maximum value that pci_alloc_msi() can return. * Thus, it is subject to the tunables, etc. */ int pci_msi_count_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); struct pcicfg_msi *msi = &dinfo->cfg.msi; if (pci_do_msi && msi->msi_location != 0) return (msi->msi_msgnum); return (0); } /* free pcicfgregs structure and all depending data structures */ int pci_freecfg(struct pci_devinfo *dinfo) { struct devlist *devlist_head; struct pci_map *pm, *next; int i; devlist_head = &pci_devq; if (dinfo->cfg.vpd.vpd_reg) { free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); free(dinfo->cfg.vpd.vpd_w, M_DEVBUF); } STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { free(pm, M_DEVBUF); } STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); free(dinfo, M_DEVBUF); /* increment the generation count */ pci_generation++; /* we're losing one device */ pci_numdevs--; return (0); } /* * PCI power manangement */ int pci_set_powerstate_method(device_t dev, device_t child, int state) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int oldstate, highest, delay; if (cfg->pp.pp_cap == 0) return (EOPNOTSUPP); /* * Optimize a no state change request away. While it would be OK to * write to the hardware in theory, some devices have shown odd * behavior when going from D3 -> D3. */ oldstate = pci_get_powerstate(child); if (oldstate == state) return (0); /* * The PCI power management specification states that after a state * transition between PCI power states, system software must * guarantee a minimal delay before the function accesses the device. * Compute the worst case delay that we need to guarantee before we * access the device. Many devices will be responsive much more * quickly than this delay, but there are some that don't respond * instantly to state changes. Transitions to/from D3 state require * 10ms, while D2 requires 200us, and D0/1 require none. The delay * is done below with DELAY rather than a sleeper function because * this function can be called from contexts where we cannot sleep. */ highest = (oldstate > state) ? oldstate : state; if (highest == PCI_POWERSTATE_D3) delay = 10000; else if (highest == PCI_POWERSTATE_D2) delay = 200; else delay = 0; status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) & ~PCIM_PSTAT_DMASK; switch (state) { case PCI_POWERSTATE_D0: status |= PCIM_PSTAT_D0; break; case PCI_POWERSTATE_D1: if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D1; break; case PCI_POWERSTATE_D2: if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) return (EOPNOTSUPP); status |= PCIM_PSTAT_D2; break; case PCI_POWERSTATE_D3: status |= PCIM_PSTAT_D3; break; default: return (EINVAL); } if (bootverbose) pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, state); PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); if (delay) DELAY(delay); return (0); } int pci_get_powerstate_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; uint16_t status; int result; if (cfg->pp.pp_cap != 0) { status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); switch (status & PCIM_PSTAT_DMASK) { case PCIM_PSTAT_D0: result = PCI_POWERSTATE_D0; break; case PCIM_PSTAT_D1: result = PCI_POWERSTATE_D1; break; case PCIM_PSTAT_D2: result = PCI_POWERSTATE_D2; break; case PCIM_PSTAT_D3: result = PCI_POWERSTATE_D3; break; default: result = PCI_POWERSTATE_UNKNOWN; break; } } else { /* No support, device is always at D0 */ result = PCI_POWERSTATE_D0; } return (result); } /* * Some convenience functions for PCI device drivers. */ static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command |= bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) { uint16_t command; command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); command &= ~bit; PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); } int pci_enable_busmaster_method(device_t dev, device_t child) { pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_disable_busmaster_method(device_t dev, device_t child) { pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); return (0); } int pci_enable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_set_command_bit(dev, child, bit); return (0); } int pci_disable_io_method(device_t dev, device_t child, int space) { uint16_t bit; switch(space) { case SYS_RES_IOPORT: bit = PCIM_CMD_PORTEN; break; case SYS_RES_MEMORY: bit = PCIM_CMD_MEMEN; break; default: return (EINVAL); } pci_clear_command_bit(dev, child, bit); return (0); } /* * New style pci driver. Parent device is either a pci-host-bridge or a * pci-pci-bridge. Both kinds are represented by instances of pcib. */ void pci_print_verbose(struct pci_devinfo *dinfo) { if (bootverbose) { pcicfgregs *cfg = &dinfo->cfg; printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", cfg->vendor, cfg->device, cfg->revid); printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", cfg->domain, cfg->bus, cfg->slot, cfg->func); printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, cfg->mfdev); printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", cfg->cmdreg, cfg->statreg, cfg->cachelnsz); printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); if (cfg->intpin > 0) printf("\tintpin=%c, irq=%d\n", cfg->intpin +'a' -1, cfg->intline); if (cfg->pp.pp_cap) { uint16_t status; status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", cfg->pp.pp_cap & PCIM_PCAP_SPEC, cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", status & PCIM_PSTAT_DMASK); } if (cfg->msi.msi_location) { int ctrl; ctrl = cfg->msi.msi_ctrl; printf("\tMSI supports %d message%s%s%s\n", cfg->msi.msi_msgnum, (cfg->msi.msi_msgnum == 1) ? "" : "s", (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); } if (cfg->msix.msix_location) { printf("\tMSI-X supports %d message%s ", cfg->msix.msix_msgnum, (cfg->msix.msix_msgnum == 1) ? "" : "s"); if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) printf("in map 0x%x\n", cfg->msix.msix_table_bar); else printf("in maps 0x%x and 0x%x\n", cfg->msix.msix_table_bar, cfg->msix.msix_pba_bar); } } } static int pci_porten(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; } static int pci_memen(device_t dev) { return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; } void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64) { struct pci_devinfo *dinfo; pci_addr_t map, testval; int ln2range; uint16_t cmd; /* * The device ROM BAR is special. It is always a 32-bit * memory BAR. Bit 0 is special and should not be set when * sizing the BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { map = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, 0xfffffffe, 4); testval = pci_read_config(dev, reg, 4); pci_write_config(dev, reg, map, 4); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = 0; return; } map = pci_read_config(dev, reg, 4); ln2range = pci_maprange(map); if (ln2range == 64) map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; /* * Disable decoding via the command register before * determining the BAR's length since we will be placing it in * a weird state. */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); /* * Determine the BAR's length by writing all 1's. The bottom * log_2(size) bits of the BAR will stick as 0 when we read * the value back. */ pci_write_config(dev, reg, 0xffffffff, 4); testval = pci_read_config(dev, reg, 4); if (ln2range == 64) { pci_write_config(dev, reg + 4, 0xffffffff, 4); testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; } /* * Restore the original value of the BAR. We may have reprogrammed * the BAR of the low-level console device and when booting verbose, * we need the console device addressable. */ pci_write_config(dev, reg, map, 4); if (ln2range == 64) pci_write_config(dev, reg + 4, map >> 32, 4); pci_write_config(dev, PCIR_COMMAND, cmd, 2); *mapp = map; *testvalp = testval; if (bar64 != NULL) *bar64 = (ln2range == 64); } static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) { struct pci_devinfo *dinfo; int ln2range; /* The device ROM BAR is always a 32-bit memory BAR. */ dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, base, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); if (ln2range == 64) pm->pm_value |= (pci_addr_t)pci_read_config(dev, pm->pm_reg + 4, 4) << 32; } struct pci_map * pci_find_bar(device_t dev, int reg) { struct pci_devinfo *dinfo; struct pci_map *pm; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (pm->pm_reg == reg) return (pm); } return (NULL); } int pci_bar_enabled(device_t dev, struct pci_map *pm) { struct pci_devinfo *dinfo; uint16_t cmd; dinfo = device_get_ivars(dev); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && !(pm->pm_value & PCIM_BIOS_ENABLE)) return (0); cmd = pci_read_config(dev, PCIR_COMMAND, 2); if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) return ((cmd & PCIM_CMD_MEMEN) != 0); else return ((cmd & PCIM_CMD_PORTEN) != 0); } struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) { struct pci_devinfo *dinfo; struct pci_map *pm, *prev; dinfo = device_get_ivars(dev); pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); pm->pm_reg = reg; pm->pm_value = value; pm->pm_size = size; STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", reg)); if (STAILQ_NEXT(prev, pm_link) == NULL || STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) break; } if (prev != NULL) STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); else STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); return (pm); } static void pci_restore_bars(device_t dev) { struct pci_devinfo *dinfo; struct pci_map *pm; int ln2range; dinfo = device_get_ivars(dev); STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) ln2range = 32; else ln2range = pci_maprange(pm->pm_value); pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); if (ln2range == 64) pci_write_config(dev, pm->pm_reg + 4, pm->pm_value >> 32, 4); } } /* * Add a resource based on a pci map register. Return 1 if the map * register is a 32bit map register or 2 if it is a 64bit register. */ static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch) { struct pci_map *pm; pci_addr_t base, map, testval; pci_addr_t start, end, count; int barlen, basezero, flags, maprange, mapsize, type; uint16_t cmd; struct resource *res; /* * The BAR may already exist if the device is a CardBus card * whose CIS is stored in this BAR. */ pm = pci_find_bar(dev, reg); if (pm != NULL) { maprange = pci_maprange(pm->pm_value); barlen = maprange == 64 ? 2 : 1; return (barlen); } pci_read_bar(dev, reg, &map, &testval, NULL); if (PCI_BAR_MEM(map)) { type = SYS_RES_MEMORY; if (map & PCIM_BAR_MEM_PREFETCH) prefetch = 1; } else type = SYS_RES_IOPORT; mapsize = pci_mapsize(testval); base = pci_mapbase(map); #ifdef __PCI_BAR_ZERO_VALID basezero = 0; #else basezero = base == 0; #endif maprange = pci_maprange(map); barlen = maprange == 64 ? 2 : 1; /* * For I/O registers, if bottom bit is set, and the next bit up * isn't clear, we know we have a BAR that doesn't conform to the * spec, so ignore it. Also, sanity check the size of the data * areas to the type of memory involved. Memory must be at least * 16 bytes in size, while I/O ranges must be at least 4. */ if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) return (barlen); if ((type == SYS_RES_MEMORY && mapsize < 4) || (type == SYS_RES_IOPORT && mapsize < 2)) return (barlen); /* Save a record of this BAR. */ pm = pci_add_bar(dev, reg, map, mapsize); if (bootverbose) { printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); if (type == SYS_RES_IOPORT && !pci_porten(dev)) printf(", port disabled\n"); else if (type == SYS_RES_MEMORY && !pci_memen(dev)) printf(", memory disabled\n"); else printf(", enabled\n"); } /* * If base is 0, then we have problems if this architecture does * not allow that. It is best to ignore such entries for the * moment. These will be allocated later if the driver specifically * requests them. However, some removable busses look better when * all resources are allocated, so allow '0' to be overriden. * * Similarly treat maps whose values is the same as the test value * read back. These maps have had all f's written to them by the * BIOS in an attempt to disable the resources. */ if (!force && (basezero || map == testval)) return (barlen); if ((u_long)base != base) { device_printf(bus, "pci%d:%d:%d:%d bar %#x too many address bits", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); return (barlen); } /* * This code theoretically does the right thing, but has * undesirable side effects in some cases where peripherals * respond oddly to having these bits enabled. Let the user * be able to turn them off (since pci_enable_io_modes is 1 by * default). */ if (pci_enable_io_modes) { /* Turn on resources that have been left off by a lazy BIOS */ if (type == SYS_RES_IOPORT && !pci_porten(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } if (type == SYS_RES_MEMORY && !pci_memen(dev)) { cmd = pci_read_config(dev, PCIR_COMMAND, 2); cmd |= PCIM_CMD_MEMEN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); } } else { if (type == SYS_RES_IOPORT && !pci_porten(dev)) return (barlen); if (type == SYS_RES_MEMORY && !pci_memen(dev)) return (barlen); } count = (pci_addr_t)1 << mapsize; flags = RF_ALIGNMENT_LOG2(mapsize); if (prefetch) flags |= RF_PREFETCHABLE; if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { start = 0; /* Let the parent decide. */ end = ~0; } else { start = base; end = base + count - 1; } resource_list_add(rl, type, reg, start, end, count); /* * Try to allocate the resource for this BAR from our parent * so that this resource range is already reserved. The * driver for this device will later inherit this resource in * pci_alloc_resource(). */ res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, flags); if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0)) { /* * If the allocation fails, try to allocate a resource for * this BAR using any available range. The firmware felt * it was important enough to assign a resource, so don't * disable decoding if we can help it. */ resource_list_delete(rl, type, reg); resource_list_add(rl, type, reg, 0, ~0, count); res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0, count, flags); } if (res == NULL) { /* * If the allocation fails, delete the resource list entry * and disable decoding for this device. * * If the driver requests this resource in the future, * pci_reserve_map() will try to allocate a fresh * resource range. */ resource_list_delete(rl, type, reg); pci_disable_io(dev, type); if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d bar %#x failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), reg); } else { start = rman_get_start(res); pci_write_bar(dev, pm, start); } return (barlen); } /* * For ATA devices we need to decide early what addressing mode to use. * Legacy demands that the primary and secondary ATA ports sits on the * same addresses that old ISA hardware did. This dictates that we use * those addresses and ignore the BAR's if we cannot set PCI native * addressing mode. */ static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask) { int rid, type, progif; #if 0 /* if this device supports PCI native addressing use it */ progif = pci_read_config(dev, PCIR_PROGIF, 1); if ((progif & 0x8a) == 0x8a) { if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { printf("Trying ATA native PCI addressing mode\n"); pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); } } #endif progif = pci_read_config(dev, PCIR_PROGIF, 1); type = SYS_RES_IOPORT; if (progif & PCIP_STORAGE_IDE_MODEPRIM) { pci_add_map(bus, dev, PCIR_BAR(0), rl, force, prefetchmask & (1 << 0)); pci_add_map(bus, dev, PCIR_BAR(1), rl, force, prefetchmask & (1 << 1)); } else { rid = PCIR_BAR(0); resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8, 0); rid = PCIR_BAR(1); resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1, 0); } if (progif & PCIP_STORAGE_IDE_MODESEC) { pci_add_map(bus, dev, PCIR_BAR(2), rl, force, prefetchmask & (1 << 2)); pci_add_map(bus, dev, PCIR_BAR(3), rl, force, prefetchmask & (1 << 3)); } else { rid = PCIR_BAR(2); resource_list_add(rl, type, rid, 0x170, 0x177, 8); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 0x177, 8, 0); rid = PCIR_BAR(3); resource_list_add(rl, type, rid, 0x376, 0x376, 1); (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 0x376, 1, 0); } pci_add_map(bus, dev, PCIR_BAR(4), rl, force, prefetchmask & (1 << 4)); pci_add_map(bus, dev, PCIR_BAR(5), rl, force, prefetchmask & (1 << 5)); } static void pci_assign_interrupt(device_t bus, device_t dev, int force_route) { struct pci_devinfo *dinfo = device_get_ivars(dev); pcicfgregs *cfg = &dinfo->cfg; char tunable_name[64]; int irq; /* Has to have an intpin to have an interrupt. */ if (cfg->intpin == 0) return; /* Let the user override the IRQ with a tunable. */ irq = PCI_INVALID_IRQ; snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.%d.INT%c.irq", cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) irq = PCI_INVALID_IRQ; /* * If we didn't get an IRQ via the tunable, then we either use the * IRQ value in the intline register or we ask the bus to route an * interrupt for us. If force_route is true, then we only use the * value in the intline register if the bus was unable to assign an * IRQ. */ if (!PCI_INTERRUPT_VALID(irq)) { if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) irq = PCI_ASSIGN_INTERRUPT(bus, dev); if (!PCI_INTERRUPT_VALID(irq)) irq = cfg->intline; } /* If after all that we don't have an IRQ, just bail. */ if (!PCI_INTERRUPT_VALID(irq)) return; /* Update the config register if it changed. */ if (irq != cfg->intline) { cfg->intline = irq; pci_write_config(dev, PCIR_INTLINE, irq, 1); } /* Add this IRQ as rid 0 interrupt resource. */ resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); } /* Perform early OHCI takeover from SMM. */ static void ohci_early_takeover(device_t self) { struct resource *res; uint32_t ctl; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; ctl = bus_read_4(res, OHCI_CONTROL); if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM active, request owner change\n"); bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { DELAY(1000); ctl = bus_read_4(res, OHCI_CONTROL); } if (ctl & OHCI_IR) { if (bootverbose) printf("ohci early: " "SMM does not respond, resetting\n"); bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); } /* Disable interrupts */ bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early UHCI takeover from SMM. */ static void uhci_early_takeover(device_t self) { struct resource *res; int rid; /* * Set the PIRQD enable bit and switch off all the others. We don't * want legacy support to interfere with us XXX Does this also mean * that the BIOS won't touch the keyboard anymore if it is connected * to the ports of the root hub? */ pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); /* Disable interrupts */ rid = PCI_UHCI_BASE_REG; res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res != NULL) { bus_write_2(res, UHCI_INTR, 0); bus_release_resource(self, SYS_RES_IOPORT, rid, res); } } /* Perform early EHCI takeover from SMM. */ static void ehci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_read_config(self, eecp, 4); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { continue; } bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); if (bios_sem == 0) { continue; } if (bootverbose) printf("ehci early: " "SMM active, request owner change\n"); pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); for (i = 0; (i < 100) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = pci_read_config(self, eecp + EHCI_LEGSUP_BIOS_SEM, 1); } if (bios_sem != 0) { if (bootverbose) printf("ehci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); bus_write_4(res, offs + EHCI_USBINTR, 0); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } /* Perform early XHCI takeover from SMM. */ static void xhci_early_takeover(device_t self) { struct resource *res; uint32_t cparams; uint32_t eec; uint8_t eecp; uint8_t bios_sem; uint8_t offs; int rid; int i; rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) return; cparams = bus_read_4(res, XHCI_HCSPARAMS0); eec = -1; /* Synchronise with the BIOS if it owns the controller. */ for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); eecp += XHCI_XECP_NEXT(eec) << 2) { eec = bus_read_4(res, eecp); if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) continue; bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); if (bios_sem == 0) continue; if (bootverbose) printf("xhci early: " "SMM active, request owner change\n"); bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); /* wait a maximum of 5 second */ for (i = 0; (i < 5000) && (bios_sem != 0); i++) { DELAY(1000); bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); } if (bios_sem != 0) { if (bootverbose) printf("xhci early: " "SMM does not respond\n"); } /* Disable interrupts */ offs = bus_read_1(res, XHCI_CAPLENGTH); bus_write_4(res, offs + XHCI_USBCMD, 0); bus_read_4(res, offs + XHCI_USBSTS); } bus_release_resource(self, SYS_RES_MEMORY, rid, res); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static void pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, struct resource_list *rl) { struct resource *res; char *cp; rman_res_t start, end, count; int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return; } /* * If the existing bus range is valid, attempt to reserve it * from our parent. If this fails for any reason, clear the * secbus and subbus registers. * * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? * This would at least preserve the existing sec_bus if it is * valid. */ sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); /* Quirk handling. */ switch (pci_get_devid(dev)) { case 0x12258086: /* Intel 82454KX/GX (Orion) */ sup_bus = pci_read_config(dev, 0x41, 1); if (sup_bus != 0xff) { sec_bus = sup_bus + 1; sub_bus = sup_bus + 1; PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; case 0x00dd10de: /* Compaq R3000 BIOS sets wrong subordinate bus number. */ if ((cp = kern_getenv("smbios.planar.maker")) == NULL) break; if (strncmp(cp, "Compal", 6) != 0) { freeenv(cp); break; } freeenv(cp); if ((cp = kern_getenv("smbios.planar.product")) == NULL) break; if (strncmp(cp, "08A0", 4) != 0) { freeenv(cp); break; } freeenv(cp); if (sub_bus < 0xa) { sub_bus = 0xa; PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); } break; } if (bootverbose) printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); if (sec_bus > 0 && sub_bus >= sec_bus) { start = sec_bus; end = sub_bus; count = end - start + 1; resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count); /* * If requested, clear secondary bus registers in * bridge devices to force a complete renumbering * rather than reserving the existing range. However, * preserve the existing size. */ if (pci_clear_buses) goto clear; rid = 0; res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, start, end, count, 0); if (res != NULL) return; if (bootverbose) device_printf(bus, "pci%d:%d:%d:%d secbus failed to allocate\n", pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); } clear: PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); } static struct resource * pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; struct resource *res; int sec_reg, sub_reg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; rl = &dinfo->resources; switch (cfg->hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_BRIDGE: sec_reg = PCIR_SECBUS_1; sub_reg = PCIR_SUBBUS_1; break; case PCIM_HDRTYPE_CARDBUS: sec_reg = PCIR_SECBUS_2; sub_reg = PCIR_SUBBUS_2; break; default: return (NULL); } if (*rid != 0) return (NULL); if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, PCI_RES_BUS, *rid); device_printf(child, "allocating %ju bus%s failed\n", count, count == 1 ? "" : "es"); return (NULL); } if (bootverbose) device_printf(child, "Lazy allocation of %ju bus%s at %ju\n", count, count == 1 ? "" : "es", rman_get_start(res)); PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); } return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, end, count, flags)); } #endif static int pci_ea_bei_to_rid(device_t dev, int bei) { #ifdef PCI_IOV struct pci_devinfo *dinfo; int iov_pos; struct pcicfg_iov *iov; dinfo = device_get_ivars(dev); iov = dinfo->cfg.iov; if (iov != NULL) iov_pos = iov->iov_pos; else iov_pos = 0; #endif /* Check if matches BAR */ if ((bei >= PCIM_EA_BEI_BAR_0) && (bei <= PCIM_EA_BEI_BAR_5)) return (PCIR_BAR(bei)); /* Check ROM */ if (bei == PCIM_EA_BEI_ROM) return (PCIR_BIOS); #ifdef PCI_IOV /* Check if matches VF_BAR */ if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) && (bei <= PCIM_EA_BEI_VF_BAR_5)) return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + iov_pos); #endif return (-1); } int pci_ea_is_enabled(device_t dev, int rid) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); } return (0); } void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov) { struct pci_ea_entry *ea; struct pci_devinfo *dinfo; pci_addr_t start, end, count; struct resource_list *rl; int type, flags, rid; struct resource *res; uint32_t tmp; #ifdef PCI_IOV struct pcicfg_iov *iov; #endif dinfo = device_get_ivars(dev); rl = &dinfo->resources; flags = 0; #ifdef PCI_IOV iov = dinfo->cfg.iov; #endif if (dinfo->cfg.ea.ea_location == 0) return; STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { /* * TODO: Ignore EA-BAR if is not enabled. * Currently the EA implementation supports * only situation, where EA structure contains * predefined entries. In case they are not enabled * leave them unallocated and proceed with * a legacy-BAR mechanism. */ if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) continue; switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { case PCIM_EA_P_MEM_PREFETCH: case PCIM_EA_P_VF_MEM_PREFETCH: flags = RF_PREFETCHABLE; /* FALLTHROUGH */ case PCIM_EA_P_VF_MEM: case PCIM_EA_P_MEM: type = SYS_RES_MEMORY; break; case PCIM_EA_P_IO: type = SYS_RES_IOPORT; break; default: continue; } if (alloc_iov != 0) { #ifdef PCI_IOV /* Allocating IOV, confirm BEI matches */ if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) continue; #else continue; #endif } else { /* Allocating BAR, confirm BEI matches */ if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && (ea->eae_bei != PCIM_EA_BEI_ROM)) continue; } rid = pci_ea_bei_to_rid(dev, ea->eae_bei); if (rid < 0) continue; /* Skip resources already allocated by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL)) continue; start = ea->eae_base; count = ea->eae_max_offset + 1; #ifdef PCI_IOV if (iov != NULL) count = count * iov->iov_num_vfs; #endif end = start + count - 1; if (count == 0) continue; resource_list_add(rl, type, rid, start, end, count); res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count, flags); if (res == NULL) { resource_list_delete(rl, type, rid); /* * Failed to allocate using EA, disable entry. * Another attempt to allocation will be performed * further, but this time using legacy BAR registers */ tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); tmp &= ~PCIM_EA_ENABLE; pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); /* * Disabling entry might fail in case it is hardwired. * Read flags again to match current status. */ ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); continue; } /* As per specification, fill BAR with zeros */ pci_write_config(dev, rid, 0, 4); } } void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) { struct pci_devinfo *dinfo; pcicfgregs *cfg; struct resource_list *rl; const struct pci_quirk *q; uint32_t devid; int i; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; rl = &dinfo->resources; devid = (cfg->device << 16) | cfg->vendor; /* Allocate resources using Enhanced Allocation */ pci_add_resources_ea(bus, dev, 0); /* ATA devices needs special map treatment */ if ((pci_get_class(dev) == PCIC_STORAGE) && (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || (!pci_read_config(dev, PCIR_BAR(0), 4) && !pci_read_config(dev, PCIR_BAR(2), 4))) ) pci_ata_maps(bus, dev, rl, force, prefetchmask); else for (i = 0; i < cfg->nummaps;) { /* Skip resources already managed by EA */ if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) || (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) || pci_ea_is_enabled(dev, PCIR_BAR(i))) { i++; continue; } /* * Skip quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_UNMAP_REG && q->arg1 == PCIR_BAR(i)) break; if (q->devid != 0) { i++; continue; } i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, prefetchmask & (1 << i)); } /* * Add additional, quirked resources. */ for (q = &pci_quirks[0]; q->devid != 0; q++) if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) pci_add_map(bus, dev, q->arg1, rl, force, 0); if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { #ifdef __PCI_REROUTE_INTERRUPT /* * Try to re-route interrupts. Sometimes the BIOS or * firmware may leave bogus values in these registers. * If the re-route fails, then just stick with what we * have. */ pci_assign_interrupt(bus, dev, 1); #else pci_assign_interrupt(bus, dev, 0); #endif } if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) xhci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) ehci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) ohci_early_takeover(dev); else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) uhci_early_takeover(dev); } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) /* * Reserve resources for secondary bus ranges behind bridge * devices. */ pci_reserve_secbus(bus, dev, cfg, rl); #endif } static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func) { struct pci_devinfo *dinfo; dinfo = pci_read_device(pcib, dev, domain, busno, slot, func); if (dinfo != NULL) pci_add_child(dev, dinfo); return (dinfo); } void pci_add_children(device_t dev, int domain, int busno) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_devinfo *dinfo; int maxslots; int s, f, pcifunchigh; uint8_t hdrtype; int first_func; /* * Try to detect a device at slot 0, function 0. If it exists, try to * enable ARI. We must enable ARI before detecting the rest of the * functions on this bus as ARI changes the set of slots and functions * that are legal on this bus. */ dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0); if (dinfo != NULL && pci_enable_ari) PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); /* * Start looking for new devices on slot 0 at function 1 because we * just identified the device at slot 0, function 0. */ first_func = 1; maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++, first_func = 0) { pcifunchigh = 0; f = 0; DELAY(1); hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = first_func; f <= pcifunchigh; f++) pci_identify_function(pcib, dev, domain, busno, s, f); } #undef REG } int pci_rescan_method(device_t dev) { #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) device_t pcib = device_get_parent(dev); struct pci_softc *sc; device_t child, *devlist, *unchanged; int devcount, error, i, j, maxslots, oldcount; int busno, domain, s, f, pcifunchigh; uint8_t hdrtype; /* No need to check for ARI on a rescan. */ error = device_get_children(dev, &devlist, &devcount); if (error) return (error); if (devcount != 0) { unchanged = malloc(devcount * sizeof(device_t), M_TEMP, M_NOWAIT | M_ZERO); if (unchanged == NULL) { free(devlist, M_TEMP); return (ENOMEM); } } else unchanged = NULL; sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); maxslots = PCIB_MAXSLOTS(pcib); for (s = 0; s <= maxslots; s++) { /* If function 0 is not present, skip to the next slot. */ f = 0; if (REG(PCIR_VENDOR, 2) == 0xffff) continue; pcifunchigh = 0; hdrtype = REG(PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (hdrtype & PCIM_MFDEV) pcifunchigh = PCIB_MAXFUNCS(pcib); for (f = 0; f <= pcifunchigh; f++) { if (REG(PCIR_VENDOR, 2) == 0xfff) continue; /* * Found a valid function. Check if a * device_t for this device already exists. */ for (i = 0; i < devcount; i++) { child = devlist[i]; if (child == NULL) continue; if (pci_get_slot(child) == s && pci_get_function(child) == f) { unchanged[i] = child; goto next_func; } } pci_identify_function(pcib, dev, domain, busno, s, f); next_func:; } } /* Remove devices that are no longer present. */ for (i = 0; i < devcount; i++) { if (unchanged[i] != NULL) continue; device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); oldcount = devcount; /* Try to attach the devices just added. */ error = device_get_children(dev, &devlist, &devcount); if (error) { free(unchanged, M_TEMP); return (error); } for (i = 0; i < devcount; i++) { for (j = 0; j < oldcount; j++) { if (devlist[i] == unchanged[j]) goto next_device; } device_probe_and_attach(devlist[i]); next_device:; } free(unchanged, M_TEMP); free(devlist, M_TEMP); return (0); #undef REG } #ifdef PCI_IOV device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { struct pci_devinfo *pf_dinfo, *vf_dinfo; device_t pcib; int busno, slot, func; pf_dinfo = device_get_ivars(pf); pcib = device_get_parent(bus); PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func); vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno, slot, func, vid, did); vf_dinfo->cfg.flags |= PCICFG_VF; pci_add_child(bus, vf_dinfo); return (vf_dinfo->cfg.dev); } device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did) { return (pci_add_iov_child(bus, pf, rid, vid, did)); } #endif void pci_add_child(device_t bus, struct pci_devinfo *dinfo) { dinfo->cfg.dev = device_add_child(bus, NULL, -1); device_set_ivars(dinfo->cfg.dev, dinfo); resource_list_init(&dinfo->resources); pci_cfg_save(dinfo->cfg.dev, dinfo, 0); pci_cfg_restore(dinfo->cfg.dev, dinfo); pci_print_verbose(dinfo); pci_add_resources(bus, dinfo->cfg.dev, 0, 0); pci_child_added(dinfo->cfg.dev); } void pci_child_added_method(device_t dev, device_t child) { } static int pci_probe(device_t dev) { device_set_desc(dev, "PCI bus"); /* Allow other subclasses to override this driver. */ return (BUS_PROBE_GENERIC); } int pci_attach_common(device_t dev) { struct pci_softc *sc; int busno, domain; #ifdef PCI_DMA_BOUNDARY int error, tag_valid; #endif #ifdef PCI_RES_BUS int rid; #endif sc = device_get_softc(dev); domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); #ifdef PCI_RES_BUS rid = 0; sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 1, 0); if (sc->sc_bus == NULL) { device_printf(dev, "failed to allocate bus number\n"); return (ENXIO); } #endif if (bootverbose) device_printf(dev, "domain=%d, physical bus=%d\n", domain, busno); #ifdef PCI_DMA_BOUNDARY tag_valid = 0; if (device_get_devclass(device_get_parent(device_get_parent(dev))) != devclass_find("pci")) { error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag); if (error) device_printf(dev, "Failed to create DMA tag: %d\n", error); else tag_valid = 1; } if (!tag_valid) #endif sc->sc_dma_tag = bus_get_dma_tag(dev); return (0); } static int pci_attach(device_t dev) { int busno, domain, error; error = pci_attach_common(dev); if (error) return (error); /* * Since there can be multiple independently numbered PCI * busses on systems with multiple PCI domains, we can't use * the unit number to decide which bus we are probing. We ask * the parent pcib what our domain and bus numbers are. */ domain = pcib_get_domain(dev); busno = pcib_get_bus(dev); pci_add_children(dev, domain, busno); return (bus_generic_attach(dev)); } static int pci_detach(device_t dev) { #ifdef PCI_RES_BUS struct pci_softc *sc; #endif int error; error = bus_generic_detach(dev); if (error) return (error); #ifdef PCI_RES_BUS sc = device_get_softc(dev); error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); if (error) return (error); #endif return (device_delete_children(dev)); } static void pci_set_power_child(device_t dev, device_t child, int state) { device_t pcib; int dstate; /* * Set the device to the given state. If the firmware suggests * a different power state, use it instead. If power management * is not present, the firmware is responsible for managing * device power. Skip children who aren't attached since they * are handled separately. */ pcib = device_get_parent(dev); dstate = state; if (device_is_attached(child) && PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) pci_set_powerstate(child, dstate); } int pci_suspend_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; int error; dinfo = device_get_ivars(child); /* * Save the PCI configuration space for the child and set the * device in the appropriate power state for this sleep state. */ pci_cfg_save(child, dinfo, 0); /* Suspend devices before potentially powering them down. */ error = bus_generic_suspend_child(dev, child); if (error) return (error); if (pci_do_power_suspend) pci_set_power_child(dev, child, PCI_POWERSTATE_D3); return (0); } int pci_resume_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; if (pci_do_power_resume) pci_set_power_child(dev, child, PCI_POWERSTATE_D0); dinfo = device_get_ivars(child); pci_cfg_restore(child, dinfo); if (!device_is_attached(child)) pci_cfg_save(child, dinfo, 1); bus_generic_resume_child(dev, child); return (0); } int pci_resume(device_t dev) { device_t child, *devlist; int error, i, numdevs; if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) return (error); /* * Resume critical devices first, then everything else later. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: BUS_RESUME_CHILD(dev, child); break; } } for (i = 0; i < numdevs; i++) { child = devlist[i]; switch (pci_get_class(child)) { case PCIC_DISPLAY: case PCIC_MEMORY: case PCIC_BRIDGE: case PCIC_BASEPERIPH: break; default: BUS_RESUME_CHILD(dev, child); } } free(devlist, M_TEMP); return (0); } static void pci_load_vendor_data(void) { caddr_t data; void *ptr; size_t sz; data = preload_search_by_type("pci_vendor_data"); if (data != NULL) { ptr = preload_fetch_addr(data); sz = preload_fetch_size(data); if (ptr != NULL && sz != 0) { pci_vendordata = ptr; pci_vendordata_size = sz; /* terminate the database */ pci_vendordata[pci_vendordata_size] = '\n'; } } } void pci_driver_added(device_t dev, driver_t *driver) { int numdevs; device_t *devlist; device_t child; struct pci_devinfo *dinfo; int i; if (bootverbose) device_printf(dev, "driver added\n"); DEVICE_IDENTIFY(driver, dev); if (device_get_children(dev, &devlist, &numdevs) != 0) return; for (i = 0; i < numdevs; i++) { child = devlist[i]; if (device_get_state(child) != DS_NOTPRESENT) continue; dinfo = device_get_ivars(child); pci_print_verbose(dinfo); if (bootverbose) pci_printf(&dinfo->cfg, "reprobing on driver added\n"); pci_cfg_restore(child, dinfo); if (device_probe_and_attach(child) != 0) pci_child_detached(dev, child); } free(devlist, M_TEMP); } int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { struct pci_devinfo *dinfo; struct msix_table_entry *mte; struct msix_vector *mv; uint64_t addr; uint32_t data; void *cookie; int error, rid; error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, arg, &cookie); if (error) return (error); /* If this is not a direct child, just bail out. */ if (device_get_parent(child) != dev) { *cookiep = cookie; return(0); } rid = rman_get_rid(irq); if (rid == 0) { /* Make sure that INTx is enabled */ pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. * Ask our parent to map the MSI and give * us the address and data register values. * If we fail for some reason, teardown the * interrupt handler. */ dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if (dinfo->cfg.msi.msi_addr == 0) { KASSERT(dinfo->cfg.msi.msi_handlers == 0, ("MSI has handlers, but vectors not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; dinfo->cfg.msi.msi_addr = addr; dinfo->cfg.msi.msi_data = data; } if (dinfo->cfg.msi.msi_handlers == 0) pci_enable_msi(child, dinfo->cfg.msi.msi_addr, dinfo->cfg.msi.msi_data); dinfo->cfg.msi.msi_handlers++; } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; KASSERT(mte->mte_vector != 0, ("no message vector")); mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; KASSERT(mv->mv_irq == rman_get_start(irq), ("IRQ mismatch")); if (mv->mv_address == 0) { KASSERT(mte->mte_handlers == 0, ("MSI-X table entry has handlers, but vector not mapped")); error = PCIB_MAP_MSI(device_get_parent(dev), child, rman_get_start(irq), &addr, &data); if (error) goto bad; mv->mv_address = addr; mv->mv_data = data; } if (mte->mte_handlers == 0) { pci_enable_msix(child, rid - 1, mv->mv_address, mv->mv_data); pci_unmask_msix(child, rid - 1); } mte->mte_handlers++; } /* * Make sure that INTx is disabled if we are using MSI/MSI-X, * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, * in which case we "enable" INTx so MSI/MSI-X actually works. */ if (!pci_has_quirk(pci_get_devid(child), PCI_QUIRK_MSI_INTX_BUG)) pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); else pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); bad: if (error) { (void)bus_generic_teardown_intr(dev, child, irq, cookie); return (error); } } *cookiep = cookie; return (0); } int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct msix_table_entry *mte; struct resource_list_entry *rle; struct pci_devinfo *dinfo; int error, rid; if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) return (EINVAL); /* If this isn't a direct child, just bail out */ if (device_get_parent(child) != dev) return(bus_generic_teardown_intr(dev, child, irq, cookie)); rid = rman_get_rid(irq); if (rid == 0) { /* Mask INTx */ pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); } else { /* * Check to see if the interrupt is MSI or MSI-X. If so, * decrement the appropriate handlers count and mask the * MSI-X message, or disable MSI messages if the count * drops to 0. */ dinfo = device_get_ivars(child); rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); if (rle->res != irq) return (EINVAL); if (dinfo->cfg.msi.msi_alloc > 0) { KASSERT(rid <= dinfo->cfg.msi.msi_alloc, ("MSI-X index too high")); if (dinfo->cfg.msi.msi_handlers == 0) return (EINVAL); dinfo->cfg.msi.msi_handlers--; if (dinfo->cfg.msi.msi_handlers == 0) pci_disable_msi(child); } else { KASSERT(dinfo->cfg.msix.msix_alloc > 0, ("No MSI or MSI-X interrupts allocated")); KASSERT(rid <= dinfo->cfg.msix.msix_table_len, ("MSI-X index too high")); mte = &dinfo->cfg.msix.msix_table[rid - 1]; if (mte->mte_handlers == 0) return (EINVAL); mte->mte_handlers--; if (mte->mte_handlers == 0) pci_mask_msix(child, rid - 1); } } error = bus_generic_teardown_intr(dev, child, irq, cookie); if (rid > 0) KASSERT(error == 0, ("%s: generic teardown failed for MSI/MSI-X", __func__)); return (error); } int pci_print_child(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd"); if (device_get_flags(dev)) retval += printf(" flags %#x", device_get_flags(dev)); retval += printf(" at device %d.%d", pci_get_slot(child), pci_get_function(child)); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } static const struct { int class; int subclass; int report; /* 0 = bootverbose, 1 = always */ const char *desc; } pci_nomatch_tab[] = { {PCIC_OLD, -1, 1, "old"}, {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, {PCIC_STORAGE, -1, 1, "mass storage"}, {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, {PCIC_NETWORK, -1, 1, "network"}, {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, {PCIC_DISPLAY, -1, 1, "display"}, {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, {PCIC_MEMORY, -1, 1, "memory"}, {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, {PCIC_BRIDGE, -1, 1, "bridge"}, {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, {PCIC_INPUTDEV, -1, 1, "input device"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, {PCIC_DOCKING, -1, 1, "docking station"}, {PCIC_PROCESSOR, -1, 1, "processor"}, {PCIC_SERIALBUS, -1, 1, "serial bus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, {PCIC_WIRELESS, -1, 1, "wireless controller"}, {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, {PCIC_SATCOM, -1, 1, "satellite communication"}, {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, {PCIC_DASP, -1, 0, "dasp"}, {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, {0, 0, 0, NULL} }; void pci_probe_nomatch(device_t dev, device_t child) { int i, report; const char *cp, *scp; char *device; /* * Look for a listing for this device in a loaded device database. */ report = 1; if ((device = pci_describe_device(child)) != NULL) { device_printf(dev, "<%s>", device); free(device, M_DEVBUF); } else { /* * Scan the class/subclass descriptions for a general * description. */ cp = "unknown"; scp = NULL; for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { if (pci_nomatch_tab[i].class == pci_get_class(child)) { if (pci_nomatch_tab[i].subclass == -1) { cp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } else if (pci_nomatch_tab[i].subclass == pci_get_subclass(child)) { scp = pci_nomatch_tab[i].desc; report = pci_nomatch_tab[i].report; } } } if (report || bootverbose) { device_printf(dev, "<%s%s%s>", cp ? cp : "", ((cp != NULL) && (scp != NULL)) ? ", " : "", scp ? scp : ""); } } if (report || bootverbose) { printf(" at device %d.%d (no driver attached)\n", pci_get_slot(child), pci_get_function(child)); } pci_cfg_save(child, device_get_ivars(child), 1); } void pci_child_detached(device_t dev, device_t child) { struct pci_devinfo *dinfo; struct resource_list *rl; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* * Have to deallocate IRQs before releasing any MSI messages and * have to release MSI messages before deallocating any memory * BARs. */ if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n"); (void)pci_release_msi(child); } if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); #ifdef PCI_RES_BUS if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); #endif pci_cfg_save(child, dinfo, 1); } /* * Parse the PCI device database, if loaded, and return a pointer to a * description of the device. * * The database is flat text formatted as follows: * * Any line not in a valid format is ignored. * Lines are terminated with newline '\n' characters. * * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then * the vendor name. * * A DEVICE line is entered immediately below the corresponding VENDOR ID. * - devices cannot be listed without a corresponding VENDOR line. * A DEVICE line consists of a TAB, the 4 digit (hex) device code, * another TAB, then the device name. */ /* * Assuming (ptr) points to the beginning of a line in the database, * return the vendor or device and description of the next entry. * The value of (vendor) or (device) inappropriate for the entry type * is set to -1. Returns nonzero at the end of the database. * * Note that this is slightly unrobust in the face of corrupt data; * we attempt to safeguard against this by spamming the end of the * database with a newline when we initialise. */ static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) { char *cp = *ptr; int left; *device = -1; *vendor = -1; **desc = '\0'; for (;;) { left = pci_vendordata_size - (cp - pci_vendordata); if (left <= 0) { *ptr = cp; return(1); } /* vendor entry? */ if (*cp != '\t' && sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) break; /* device entry? */ if (*cp == '\t' && sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) break; /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n') { cp++; left--; } } /* skip to next line */ while (*cp != '\n' && left > 0) { cp++; left--; } if (*cp == '\n' && left > 0) cp++; *ptr = cp; return(0); } static char * pci_describe_device(device_t dev) { int vendor, device; char *desc, *vp, *dp, *line; desc = vp = dp = NULL; /* * If we have no vendor data, we can't do anything. */ if (pci_vendordata == NULL) goto out; /* * Scan the vendor data looking for this device */ line = pci_vendordata; if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &vp)) goto out; if (vendor == pci_get_vendor(dev)) break; } if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) goto out; for (;;) { if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { *dp = 0; break; } if (vendor != -1) { *dp = 0; break; } if (device == pci_get_device(dev)) break; } if (dp[0] == '\0') snprintf(dp, 80, "0x%x", pci_get_device(dev)); if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != NULL) sprintf(desc, "%s, %s", vp, dp); out: if (vp != NULL) free(vp, M_DEVBUF); if (dp != NULL) free(dp, M_DEVBUF); return(desc); } int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; switch (which) { case PCI_IVAR_ETHADDR: /* * The generic accessor doesn't deal with failure, so * we set the return value, then return an error. */ *((uint8_t **) result) = NULL; return (EINVAL); case PCI_IVAR_SUBVENDOR: *result = cfg->subvendor; break; case PCI_IVAR_SUBDEVICE: *result = cfg->subdevice; break; case PCI_IVAR_VENDOR: *result = cfg->vendor; break; case PCI_IVAR_DEVICE: *result = cfg->device; break; case PCI_IVAR_DEVID: *result = (cfg->device << 16) | cfg->vendor; break; case PCI_IVAR_CLASS: *result = cfg->baseclass; break; case PCI_IVAR_SUBCLASS: *result = cfg->subclass; break; case PCI_IVAR_PROGIF: *result = cfg->progif; break; case PCI_IVAR_REVID: *result = cfg->revid; break; case PCI_IVAR_INTPIN: *result = cfg->intpin; break; case PCI_IVAR_IRQ: *result = cfg->intline; break; case PCI_IVAR_DOMAIN: *result = cfg->domain; break; case PCI_IVAR_BUS: *result = cfg->bus; break; case PCI_IVAR_SLOT: *result = cfg->slot; break; case PCI_IVAR_FUNCTION: *result = cfg->func; break; case PCI_IVAR_CMDREG: *result = cfg->cmdreg; break; case PCI_IVAR_CACHELNSZ: *result = cfg->cachelnsz; break; case PCI_IVAR_MINGNT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->mingnt; break; case PCI_IVAR_MAXLAT: if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { *result = -1; return (EINVAL); } *result = cfg->maxlat; break; case PCI_IVAR_LATTIMER: *result = cfg->lattimer; break; default: return (ENOENT); } return (0); } int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); switch (which) { case PCI_IVAR_INTPIN: dinfo->cfg.intpin = value; return (0); case PCI_IVAR_ETHADDR: case PCI_IVAR_SUBVENDOR: case PCI_IVAR_SUBDEVICE: case PCI_IVAR_VENDOR: case PCI_IVAR_DEVICE: case PCI_IVAR_DEVID: case PCI_IVAR_CLASS: case PCI_IVAR_SUBCLASS: case PCI_IVAR_PROGIF: case PCI_IVAR_REVID: case PCI_IVAR_IRQ: case PCI_IVAR_DOMAIN: case PCI_IVAR_BUS: case PCI_IVAR_SLOT: case PCI_IVAR_FUNCTION: return (EINVAL); /* disallow for now */ default: return (ENOENT); } } #include "opt_ddb.h" #ifdef DDB #include #include /* * List resources based on pci map registers, used for within ddb */ DB_SHOW_COMMAND(pciregs, db_pci_dump) { struct pci_devinfo *dinfo; struct devlist *devlist_head; struct pci_conf *p; const char *name; int i, error, none_count; none_count = 0; /* get the head of the device queue */ devlist_head = &pci_devq; /* * Go through the list of devices and print out devices */ for (error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); p = &dinfo->conf; db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " "chip=0x%08x rev=0x%02x hdr=0x%02x\n", (name && *name) ? name : "none", (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : none_count++, p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, p->pc_sel.pc_func, (p->pc_class << 16) | (p->pc_subclass << 8) | p->pc_progif, (p->pc_subdevice << 16) | p->pc_subvendor, (p->pc_device << 16) | p->pc_vendor, p->pc_revid, p->pc_hdr); } } #endif /* DDB */ static struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags) { struct pci_devinfo *dinfo = device_get_ivars(child); struct resource_list *rl = &dinfo->resources; struct resource *res; struct pci_map *pm; pci_addr_t map, testval; int mapsize; res = NULL; /* If rid is managed by EA, ignore it */ if (pci_ea_is_enabled(child, *rid)) goto out; pm = pci_find_bar(child, *rid); if (pm != NULL) { /* This is a BAR that we failed to allocate earlier. */ mapsize = pm->pm_size; map = pm->pm_value; } else { /* * Weed out the bogons, and figure out how large the * BAR/map is. BARs that read back 0 here are bogus * and unimplemented. Note: atapci in legacy mode are * special and handled elsewhere in the code. If you * have a atapci device in legacy mode and it fails * here, that other code is broken. */ pci_read_bar(child, *rid, &map, &testval, NULL); /* * Determine the size of the BAR and ignore BARs with a size * of 0. Device ROM BARs use a different mask value. */ if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) mapsize = pci_romsize(testval); else mapsize = pci_mapsize(testval); if (mapsize == 0) goto out; pm = pci_add_bar(child, *rid, map, mapsize); } if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { if (type != SYS_RES_MEMORY) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an memio\n", device_get_nameunit(child), type, *rid); goto out; } } else { if (type != SYS_RES_IOPORT) { if (bootverbose) device_printf(dev, "child %s requested type %d for rid %#x," " but the BAR says it is an ioport\n", device_get_nameunit(child), type, *rid); goto out; } } /* * For real BARs, we need to override the size that * the driver requests, because that's what the BAR * actually uses and we would otherwise have a * situation where we might allocate the excess to * another driver, which won't work. */ count = ((pci_addr_t)1 << mapsize) * num; if (RF_ALIGNMENT(flags) < mapsize) flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) flags |= RF_PREFETCHABLE; /* * Allocate enough resource, and then write back the * appropriate BAR for that resource. */ resource_list_add(rl, type, *rid, start, end, count); res = resource_list_reserve(rl, dev, child, type, rid, start, end, count, flags & ~RF_ACTIVE); if (res == NULL) { resource_list_delete(rl, type, *rid); device_printf(child, "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n", count, *rid, type, start, end); goto out; } if (bootverbose) device_printf(child, "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n", count, *rid, type, rman_get_start(res)); map = rman_get_start(res); pci_write_bar(child, pm, map); out: return (res); } struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; struct resource *res; pcicfgregs *cfg; /* * Perform lazy resource allocation */ dinfo = device_get_ivars(child); rl = &dinfo->resources; cfg = &dinfo->cfg; switch (type) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) case PCI_RES_BUS: return (pci_alloc_secbus(dev, child, rid, start, end, count, flags)); #endif case SYS_RES_IRQ: /* * Can't alloc legacy interrupt once MSI messages have * been allocated. */ if (*rid == 0 && (cfg->msi.msi_alloc > 0 || cfg->msix.msix_alloc > 0)) return (NULL); /* * If the child device doesn't have an interrupt * routed and is deserving of an interrupt, try to * assign it one. */ if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && (cfg->intpin != 0)) pci_assign_interrupt(dev, child, 0); break; case SYS_RES_IOPORT: case SYS_RES_MEMORY: #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. * For those allocations just pass the request up the * tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { switch (*rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: /* * XXX: Should we bother creating a resource * list entry? */ return (bus_generic_alloc_resource(dev, child, type, rid, start, end, count, flags)); } } #endif /* Reserve resources for this BAR if needed. */ rle = resource_list_find(rl, type, *rid); if (rle == NULL) { res = pci_reserve_map(dev, child, type, rid, start, end, count, num, flags); if (res == NULL) return (NULL); } } return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #ifdef PCI_IOV struct pci_devinfo *dinfo; #endif if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); #ifdef PCI_IOV dinfo = device_get_ivars(child); if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (NULL); case SYS_RES_MEMORY: return (pci_vf_alloc_mem_resource(dev, child, rid, start, end, count, flags)); } /* Fall through for other types of resource allocations. */ } #endif return (pci_alloc_multi_resource(dev, child, type, rid, start, end, count, 1, flags)); } int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; struct resource_list *rl; pcicfgregs *cfg; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); dinfo = device_get_ivars(child); cfg = &dinfo->cfg; #ifdef PCI_IOV if (dinfo->cfg.flags & PCICFG_VF) { switch (type) { /* VFs can't have I/O BARs. */ case SYS_RES_IOPORT: return (EDOOFUS); case SYS_RES_MEMORY: return (pci_vf_release_mem_resource(dev, child, rid, r)); } /* Fall through for other types of resource allocations. */ } #endif #ifdef NEW_PCIB /* * PCI-PCI bridge I/O window resources are not BARs. For * those allocations just pass the request up the tree. */ if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { switch (rid) { case PCIR_IOBASEL_1: case PCIR_MEMBASE_1: case PCIR_PMBASEL_1: return (bus_generic_release_resource(dev, child, type, rid, r)); } } #endif rl = &dinfo->resources; return (resource_list_release(rl, dev, child, type, rid, r)); } int pci_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_activate_resource(dev, child, type, rid, r); if (error) return (error); /* Enable decoding in the command register when activating BARs. */ if (device_get_parent(child) == dev) { /* Device ROMs need their decoding explicitly enabled. */ dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r) | PCIM_BIOS_ENABLE); switch (type) { case SYS_RES_IOPORT: case SYS_RES_MEMORY: error = PCI_ENABLE_IO(dev, child, type); break; } } return (error); } int pci_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct pci_devinfo *dinfo; int error; error = bus_generic_deactivate_resource(dev, child, type, rid, r); if (error) return (error); /* Disable decoding for device ROMs. */ if (device_get_parent(child) == dev) { dinfo = device_get_ivars(child); if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) pci_write_bar(child, pci_find_bar(child, rid), rman_get_start(r)); } return (0); } void pci_child_deleted(device_t dev, device_t child) { struct resource_list_entry *rle; struct resource_list *rl; struct pci_devinfo *dinfo; dinfo = device_get_ivars(child); rl = &dinfo->resources; /* Turn off access to resources we're about to free */ if (bus_child_present(child) != 0) { pci_write_config(child, PCIR_COMMAND, pci_read_config(child, PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); pci_disable_busmaster(child); } /* Free all allocated resources */ STAILQ_FOREACH(rle, rl, link) { if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, rle->type, rle->rid)) { pci_printf(&dinfo->cfg, "Resource still owned, oops. " "(type=%d, rid=%d, addr=%lx)\n", rle->type, rle->rid, rman_get_start(rle->res)); bus_release_resource(child, rle->type, rle->rid, rle->res); } resource_list_unreserve(rl, dev, child, rle->type, rle->rid); } } resource_list_free(rl); pci_freecfg(dinfo); } void pci_delete_resource(device_t dev, device_t child, int type, int rid) { struct pci_devinfo *dinfo; struct resource_list *rl; struct resource_list_entry *rle; if (device_get_parent(child) != dev) return; dinfo = device_get_ivars(child); rl = &dinfo->resources; rle = resource_list_find(rl, type, rid); if (rle == NULL) return; if (rle->res) { if (rman_get_flags(rle->res) & RF_ACTIVE || resource_list_busy(rl, type, rid)) { device_printf(dev, "delete_resource: " "Resource still owned by child, oops. " "(type=%d, rid=%d, addr=%jx)\n", type, rid, rman_get_start(rle->res)); return; } resource_list_unreserve(rl, dev, child, type, rid); } resource_list_delete(rl, type, rid); } struct resource_list * pci_get_resource_list (device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); } bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev) { struct pci_softc *sc = device_get_softc(bus); return (sc->sc_dma_tag); } uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; #ifdef PCI_IOV /* * SR-IOV VFs don't implement the VID or DID registers, so we have to * emulate them here. */ if (cfg->flags & PCICFG_VF) { if (reg == PCIR_VENDOR) { switch (width) { case 4: return (cfg->device << 16 | cfg->vendor); case 2: return (cfg->vendor); case 1: return (cfg->vendor & 0xff); default: return (0xffffffff); } } else if (reg == PCIR_DEVICE) { switch (width) { /* Note that an unaligned 4-byte read is an error. */ case 2: return (cfg->device); case 1: return (cfg->device & 0xff); default: return (0xffffffff); } } } #endif return (PCIB_READ_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, width)); } void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; PCIB_WRITE_CONFIG(device_get_parent(dev), cfg->bus, cfg->slot, cfg->func, reg, val, width); } int pci_child_location_str_method(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child), pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); return (0); } int pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, size_t buflen) { struct pci_devinfo *dinfo; pcicfgregs *cfg; dinfo = device_get_ivars(child); cfg = &dinfo->cfg; snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, cfg->progif); return (0); } int pci_assign_interrupt_method(device_t dev, device_t child) { struct pci_devinfo *dinfo = device_get_ivars(child); pcicfgregs *cfg = &dinfo->cfg; return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, cfg->intpin)); } static void pci_lookup(void *arg, const char *name, device_t *dev) { long val; char *end; int domain, bus, slot, func; if (*dev != NULL) return; /* * Accept pciconf-style selectors of either pciD:B:S:F or * pciB:S:F. In the latter case, the domain is assumed to * be zero. */ if (strncmp(name, "pci", 3) != 0) return; val = strtol(name + 3, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; domain = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != ':') return; bus = val; val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX) return; slot = val; if (*end == ':') { val = strtol(end + 1, &end, 10); if (val < 0 || val > INT_MAX || *end != '\0') return; func = val; } else if (*end == '\0') { func = slot; slot = bus; bus = domain; domain = 0; } else return; if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) return; *dev = pci_find_dbsf(domain, bus, slot, func); } static int pci_modevent(module_t mod, int what, void *arg) { static struct cdev *pci_cdev; static eventhandler_tag tag; switch (what) { case MOD_LOAD: STAILQ_INIT(&pci_devq); pci_generation = 0; pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, "pci"); pci_load_vendor_data(); tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 1000); break; case MOD_UNLOAD: if (tag != NULL) EVENTHANDLER_DEREGISTER(dev_lookup, tag); destroy_dev(pci_cdev); break; } return (0); } static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) { #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); if (version > 1) { WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); } #undef WREG } static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) { pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, dinfo->cfg.pcix.pcix_command, 2); } void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) { /* * Restore the device to full power mode. We must do this * before we restore the registers because moving from D3 to * D0 will cause the chip's BARs and some other registers to * be reset to some unknown power on reset values. Cut down * the noise on boot by doing nothing if we are already in * state D0. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); break; case PCIM_HDRTYPE_BRIDGE: pci_write_config(dev, PCIR_SECLAT_1, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_1, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_1, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_1, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_1, dinfo->cfg.bridge.br_control, 2); break; case PCIM_HDRTYPE_CARDBUS: pci_write_config(dev, PCIR_SECLAT_2, dinfo->cfg.bridge.br_seclat, 1); pci_write_config(dev, PCIR_SUBBUS_2, dinfo->cfg.bridge.br_subbus, 1); pci_write_config(dev, PCIR_SECBUS_2, dinfo->cfg.bridge.br_secbus, 1); pci_write_config(dev, PCIR_PRIBUS_2, dinfo->cfg.bridge.br_pribus, 1); pci_write_config(dev, PCIR_BRIDGECTL_2, dinfo->cfg.bridge.br_control, 2); break; } pci_restore_bars(dev); /* * Restore extended capabilities for PCI-Express and PCI-X */ if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_restore_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_restore_pcix(dev, dinfo); /* Restore MSI and MSI-X configurations if they are present. */ if (dinfo->cfg.msi.msi_location != 0) pci_resume_msi(dev); if (dinfo->cfg.msix.msix_location != 0) pci_resume_msix(dev); +#ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_restore(dev, dinfo); - +#endif } static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) { #define RREG(n) pci_read_config(dev, pos + (n), 2) struct pcicfg_pcie *cfg; int version, pos; cfg = &dinfo->cfg.pcie; pos = cfg->pcie_location; cfg->pcie_flags = RREG(PCIER_FLAGS); version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ENDPOINT || cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || cfg->pcie_type == PCIEM_TYPE_ROOT_EC) cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); if (version > 1) { cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); } #undef RREG } static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) { dinfo->cfg.pcix.pcix_command = pci_read_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); } void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) { uint32_t cls; int ps; /* * Some drivers apparently write to these registers w/o updating our * cached copy. No harm happens if we update the copy, so do so here * so we can restore them. The COMMAND register is modified by the * bus w/o updating the cache. This should represent the normally * writable portion of the 'defined' part of type 0/1/2 headers. */ dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); break; case PCIM_HDRTYPE_BRIDGE: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_1, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_1, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_1, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_1, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_1, 2); break; case PCIM_HDRTYPE_CARDBUS: dinfo->cfg.bridge.br_seclat = pci_read_config(dev, PCIR_SECLAT_2, 1); dinfo->cfg.bridge.br_subbus = pci_read_config(dev, PCIR_SUBBUS_2, 1); dinfo->cfg.bridge.br_secbus = pci_read_config(dev, PCIR_SECBUS_2, 1); dinfo->cfg.bridge.br_pribus = pci_read_config(dev, PCIR_PRIBUS_2, 1); dinfo->cfg.bridge.br_control = pci_read_config(dev, PCIR_BRIDGECTL_2, 2); dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); break; } if (dinfo->cfg.pcie.pcie_location != 0) pci_cfg_save_pcie(dev, dinfo); if (dinfo->cfg.pcix.pcix_location != 0) pci_cfg_save_pcix(dev, dinfo); +#ifdef PCI_IOV if (dinfo->cfg.iov != NULL) pci_iov_cfg_save(dev, dinfo); +#endif /* * don't set the state for display devices, base peripherals and * memory devices since bad things happen when they are powered down. * We should (a) have drivers that can easily detach and (b) use * generic drivers for these devices so that some device actually * attaches. We need to make sure that when we implement (a) we don't * power the device down on a reattach. */ cls = pci_get_class(dev); if (!setstate) return; switch (pci_do_power_nodriver) { case 0: /* NO powerdown at all */ return; case 1: /* Conservative about what to power down */ if (cls == PCIC_STORAGE) return; /*FALLTHROUGH*/ case 2: /* Aggressive about what to power down */ if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || cls == PCIC_BASEPERIPH) return; /*FALLTHROUGH*/ case 3: /* Power down everything */ break; } /* * PCI spec says we can only go into D3 state from D0 state. * Transition from D[12] into D0 before going to D3 state. */ ps = pci_get_powerstate(dev); if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) pci_set_powerstate(dev, PCI_POWERSTATE_D3); } /* Wrapper APIs suitable for device driver use. */ void pci_save_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); } void pci_restore_state(device_t dev) { struct pci_devinfo *dinfo; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); } static uint16_t pci_get_rid_method(device_t dev, device_t child) { return (PCIB_GET_RID(device_get_parent(dev), child)); } /* Find the upstream port of a given PCI device in a root complex. */ device_t pci_find_pcie_root_port(device_t dev) { struct pci_devinfo *dinfo; devclass_t pci_class; device_t pcib, bus; pci_class = devclass_find("pci"); KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class, ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); /* * Walk the bridge hierarchy until we find a PCI-e root * port or a non-PCI device. */ for (;;) { bus = device_get_parent(dev); KASSERT(bus != NULL, ("%s: null parent of %s", __func__, device_get_nameunit(dev))); pcib = device_get_parent(bus); KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__, device_get_nameunit(bus))); /* * pcib's parent must be a PCI bus for this to be a * PCI-PCI bridge. */ if (device_get_devclass(device_get_parent(pcib)) != pci_class) return (NULL); dinfo = device_get_ivars(pcib); if (dinfo->cfg.pcie.pcie_location != 0 && dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) return (pcib); dev = pcib; } } Index: user/ngie/release-pkg-fix-tests/sys/kern/subr_hash.c =================================================================== --- user/ngie/release-pkg-fix-tests/sys/kern/subr_hash.c (revision 299054) +++ user/ngie/release-pkg-fix-tests/sys/kern/subr_hash.c (revision 299055) @@ -1,150 +1,152 @@ /*- * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 */ #include __FBSDID("$FreeBSD$"); #include #include #include +static __inline int +hash_mflags(int flags) +{ + + return ((flags & HASH_NOWAIT) ? M_NOWAIT : M_WAITOK); +} + /* * General routine to allocate a hash table with control of memory flags. */ void * hashinit_flags(int elements, struct malloc_type *type, u_long *hashmask, int flags) { long hashsize; LIST_HEAD(generic, generic) *hashtbl; int i; KASSERT(elements > 0, ("%s: bad elements", __func__)); /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */ KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT), ("Bad flags (0x%x) passed to hashinit_flags", flags)); for (hashsize = 1; hashsize <= elements; hashsize <<= 1) continue; hashsize >>= 1; - if (flags & HASH_NOWAIT) - hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), - type, M_NOWAIT); - else - hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), - type, M_WAITOK); - + hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, + hash_mflags(flags)); if (hashtbl != NULL) { for (i = 0; i < hashsize; i++) LIST_INIT(&hashtbl[i]); *hashmask = hashsize - 1; } return (hashtbl); } /* * Allocate and initialize a hash table with default flag: may sleep. */ void * hashinit(int elements, struct malloc_type *type, u_long *hashmask) { return (hashinit_flags(elements, type, hashmask, HASH_WAITOK)); } void hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask) { LIST_HEAD(generic, generic) *hashtbl, *hp; hashtbl = vhashtbl; for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++) KASSERT(LIST_EMPTY(hp), ("%s: hashtbl %p not empty " "(malloc type %s)", __func__, hashtbl, type->ks_shortdesc)); free(hashtbl, type); } static const int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039, 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; #define NPRIMES nitems(primes) /* * General routine to allocate a prime number sized hash table with control of * memory flags. */ void * phashinit_flags(int elements, struct malloc_type *type, u_long *nentries, int flags) { long hashsize; LIST_HEAD(generic, generic) *hashtbl; - int i, m_flags; + int i; KASSERT(elements > 0, ("%s: bad elements", __func__)); /* Exactly one of HASH_WAITOK and HASH_NOWAIT must be set. */ KASSERT((flags & HASH_WAITOK) ^ (flags & HASH_NOWAIT), ("Bad flags (0x%x) passed to phashinit_flags", flags)); for (i = 1, hashsize = primes[1]; hashsize <= elements;) { i++; if (i == NPRIMES) break; hashsize = primes[i]; } hashsize = primes[i - 1]; - m_flags = (flags & HASH_NOWAIT) ? M_NOWAIT : M_WAITOK; - hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, m_flags); + hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, + hash_mflags(flags)); if (hashtbl == NULL) return (NULL); for (i = 0; i < hashsize; i++) LIST_INIT(&hashtbl[i]); *nentries = hashsize; return (hashtbl); } /* * Allocate and initialize a prime number sized hash table with default flag: * may sleep. */ void * phashinit(int elements, struct malloc_type *type, u_long *nentries) { return (phashinit_flags(elements, type, nentries, HASH_WAITOK)); } Index: user/ngie/release-pkg-fix-tests/usr.bin/mkuzip/Makefile.depend =================================================================== --- user/ngie/release-pkg-fix-tests/usr.bin/mkuzip/Makefile.depend (revision 299054) +++ user/ngie/release-pkg-fix-tests/usr.bin/mkuzip/Makefile.depend (revision 299055) @@ -1,22 +1,23 @@ # $FreeBSD$ # Autogenerated - do NOT edit! DIRDEPS = \ gnu/lib/csu \ gnu/lib/libgcc \ include \ include/xlocale \ lib/${CSU_DIR} \ lib/libc \ lib/libcompiler_rt \ lib/liblzma \ lib/libmd \ lib/libthr \ lib/libz \ + lib/msun \ .include .if ${DEP_RELDIR} == ${_DEP_RELDIR} # local dependencies - needed for -jN in clean tree .endif Index: user/ngie/release-pkg-fix-tests =================================================================== --- user/ngie/release-pkg-fix-tests (revision 299054) +++ user/ngie/release-pkg-fix-tests (revision 299055) Property changes on: user/ngie/release-pkg-fix-tests ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r299013-299054