Index: projects/clang360-import/contrib/amd/amd/amfs_program.c =================================================================== --- projects/clang360-import/contrib/amd/amd/amfs_program.c (revision 277895) +++ projects/clang360-import/contrib/amd/amd/amfs_program.c (revision 277896) @@ -1,205 +1,207 @@ /* * Copyright (c) 1997-2006 Erez Zadok * Copyright (c) 1989 Jan-Simon Pendry * Copyright (c) 1989 Imperial College of Science, Technology & Medicine * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry at Imperial College, London. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * File: am-utils/amd/amfs_program.c * */ /* * Program file system */ #ifdef HAVE_CONFIG_H # include #endif /* HAVE_CONFIG_H */ #include #include /* forward definitions */ static char *amfs_program_match(am_opts *fo); static int amfs_program_mount(am_node *am, mntfs *mf); static int amfs_program_umount(am_node *am, mntfs *mf); static int amfs_program_init(mntfs *mf); /* * Ops structure */ am_ops amfs_program_ops = { "program", amfs_program_match, amfs_program_init, amfs_program_mount, amfs_program_umount, amfs_error_lookup_child, amfs_error_mount_child, amfs_error_readdir, 0, /* amfs_program_readlink */ 0, /* amfs_program_mounted */ 0, /* amfs_program_umounted */ amfs_generic_find_srvr, 0, /* amfs_program_get_wchan */ FS_MKMNT | FS_BACKGROUND | FS_AMQINFO, /* nfs_fs_flags */ #ifdef HAVE_FS_AUTOFS AUTOFS_PROGRAM_FS_FLAGS, #endif /* HAVE_FS_AUTOFS */ }; /* * Execute needs a mount and unmount command. */ static char * amfs_program_match(am_opts *fo) { char *prog; if (fo->opt_unmount && fo->opt_umount) { plog(XLOG_ERROR, "program: cannot specify both unmount and umount options"); return 0; } if (!fo->opt_mount) { plog(XLOG_ERROR, "program: must specify mount command"); return 0; } if (!fo->opt_unmount && !fo->opt_umount) { fo->opt_unmount = str3cat(NULL, UNMOUNT_PROGRAM, " umount ", fo->opt_fs); plog(XLOG_INFO, "program: un/umount not specified; using default \"%s\"", fo->opt_unmount); } prog = strchr(fo->opt_mount, ' '); return strdup(prog ? prog + 1 : fo->opt_mount); } static int amfs_program_init(mntfs *mf) { /* check if already saved value */ if (mf->mf_private != NULL) return 0; /* save unmount (or umount) command */ if (mf->mf_fo->opt_unmount != NULL) mf->mf_private = (opaque_t) strdup(mf->mf_fo->opt_unmount); else mf->mf_private = (opaque_t) strdup(mf->mf_fo->opt_umount); mf->mf_prfree = (void (*)(opaque_t)) free; return 0; } static int amfs_program_exec(char *info) { char **xivec; int error; /* * Split copy of command info string */ info = strdup(info); if (info == 0) return ENOBUFS; xivec = strsplit(info, ' ', '\''); /* * Put stdout to stderr */ (void) fclose(stdout); if (!logfp) logfp = stderr; /* initialize before possible first use */ - (void) dup(fileno(logfp)); + if (dup(fileno(logfp)) == -1) + return errno; if (fileno(logfp) != fileno(stderr)) { (void) fclose(stderr); - (void) dup(fileno(logfp)); + if (dup(fileno(logfp)) == -1) + return errno; } /* * Try the exec */ if (amuDebug(D_FULL)) { char **cp = xivec; plog(XLOG_DEBUG, "executing (un)mount command..."); while (*cp) { plog(XLOG_DEBUG, "arg[%ld] = '%s'", (long) (cp - xivec), *cp); cp++; } } if (xivec[0] == 0 || xivec[1] == 0) { errno = EINVAL; plog(XLOG_USER, "1st/2nd args missing to (un)mount program"); } else { (void) execv(xivec[0], xivec + 1); } /* * Save error number */ error = errno; plog(XLOG_ERROR, "exec failed: %m"); /* * Free allocate memory */ XFREE(info); XFREE(xivec); /* * Return error */ return error; } static int amfs_program_mount(am_node *am, mntfs *mf) { return amfs_program_exec(mf->mf_fo->opt_mount); } static int amfs_program_umount(am_node *am, mntfs *mf) { return amfs_program_exec((char *) mf->mf_private); } Index: projects/clang360-import/contrib/amd/amd/readdir.c =================================================================== --- projects/clang360-import/contrib/amd/amd/readdir.c (revision 277895) +++ projects/clang360-import/contrib/amd/amd/readdir.c (revision 277896) @@ -1,498 +1,509 @@ /* * Copyright (c) 1997-2006 Erez Zadok * Copyright (c) 1990 Jan-Simon Pendry * Copyright (c) 1990 Imperial College of Science, Technology & Medicine * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry at Imperial College, London. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * File: am-utils/amd/readdir.c * */ #ifdef HAVE_CONFIG_H # include #endif /* HAVE_CONFIG_H */ #include #include /**************************************************************************** *** MACROS *** ****************************************************************************/ #define DOT_DOT_COOKIE (u_int) 1 #define MAX_CHAIN 2048 /**************************************************************************** *** FORWARD DEFINITIONS *** ****************************************************************************/ static int key_already_in_chain(char *keyname, const nfsentry *chain); static nfsentry *make_entry_chain(am_node *mp, const nfsentry *current_chain, int fully_browsable); static int amfs_readdir_browsable(am_node *mp, nfscookie cookie, nfsdirlist *dp, nfsentry *ep, u_int count, int fully_browsable); +static const u_int dotdotcookie = DOT_DOT_COOKIE; /**************************************************************************** *** FUNCTIONS *** ****************************************************************************/ /* * Was: NEW_TOPLVL_READDIR * Search a chain for an entry with some name. * -Erez Zadok */ static int key_already_in_chain(char *keyname, const nfsentry *chain) { const nfsentry *tmpchain = chain; while (tmpchain) { if (keyname && tmpchain->ne_name && STREQ(keyname, tmpchain->ne_name)) return 1; tmpchain = tmpchain->ne_nextentry; } return 0; } /* * Create a chain of entries which are not linked. * -Erez Zadok */ static nfsentry * make_entry_chain(am_node *mp, const nfsentry *current_chain, int fully_browsable) { static u_int last_cookie = (u_int) 2; /* monotonically increasing */ static nfsentry chain[MAX_CHAIN]; static int max_entries = MAX_CHAIN; char *key; int num_entries = 0, i; u_int preflen = 0; nfsentry *retval = (nfsentry *) NULL; mntfs *mf; mnt_map *mmp; if (!mp) { plog(XLOG_DEBUG, "make_entry_chain: mp is (NULL)"); return retval; } mf = mp->am_mnt; if (!mf) { plog(XLOG_DEBUG, "make_entry_chain: mp->am_mnt is (NULL)"); return retval; } mmp = (mnt_map *) mf->mf_private; if (!mmp) { plog(XLOG_DEBUG, "make_entry_chain: mp->am_mnt->mf_private is (NULL)"); return retval; } if (mp->am_pref) preflen = strlen(mp->am_pref); /* iterate over keys */ for (i = 0; i < NKVHASH; i++) { kv *k; for (k = mmp->kvhash[i]; k ; k = k->next) { /* * Skip unwanted entries which are either not real entries or * very difficult to interpret (wildcards...) This test needs * lots of improvement. Any takers? */ key = k->key; if (!key) continue; /* Skip '/defaults' */ if (STREQ(key, "/defaults")) continue; /* Skip '*' */ if (!fully_browsable && strchr(key, '*')) continue; /* * If the map has a prefix-string then check if the key starts with * this string, and if it does, skip over this prefix. If it has a * prefix and it doesn't match the start of the key, skip it. */ if (preflen) { if (preflen > strlen(key)) continue; if (!NSTREQ(key, mp->am_pref, preflen)) continue; key += preflen; } /* no more '/' are allowed, unless browsable_dirs=full was used */ if (!fully_browsable && strchr(key, '/')) continue; /* no duplicates allowed */ if (key_already_in_chain(key, current_chain)) continue; /* fill in a cell and link the entry */ if (num_entries >= max_entries) { /* out of space */ plog(XLOG_DEBUG, "make_entry_chain: no more space in chain"); if (num_entries > 0) { chain[num_entries - 1].ne_nextentry = 0; retval = &chain[0]; } return retval; } /* we have space. put entry in next cell */ ++last_cookie; - chain[num_entries].ne_fileid = (u_int) last_cookie; - *(u_int *) chain[num_entries].ne_cookie = (u_int) last_cookie; + chain[num_entries].ne_fileid = last_cookie; + (void)memcpy(chain[num_entries].ne_cookie, &last_cookie, + sizeof(last_cookie)); chain[num_entries].ne_name = key; if (num_entries < max_entries - 1) { /* link to next one */ chain[num_entries].ne_nextentry = &chain[num_entries + 1]; } ++num_entries; } /* end of "while (k)" */ } /* end of "for (i ... NKVHASH ..." */ /* terminate chain */ if (num_entries > 0) { chain[num_entries - 1].ne_nextentry = 0; retval = &chain[0]; } return retval; } /* This one is called only if map is browsable */ static int amfs_readdir_browsable(am_node *mp, nfscookie cookie, nfsdirlist *dp, nfsentry *ep, u_int count, int fully_browsable) { u_int gen = *(u_int *) cookie; int chain_length, i; static nfsentry *te, *te_next; static int j; dp->dl_eof = FALSE; /* assume readdir not done */ if (amuDebug(D_READDIR)) plog(XLOG_DEBUG, "amfs_readdir_browsable gen=%u, count=%d", gen, count); if (gen == 0) { /* * In the default instance (which is used to start a search) we return * "." and "..". * * This assumes that the count is big enough to allow both "." and ".." * to be returned in a single packet. If it isn't (which would be * fairly unbelievable) then tough. */ dlog("amfs_readdir_browsable: default search"); /* * Check for enough room. This is extremely approximate but is more * than enough space. Really need 2 times: * 4byte fileid * 4byte cookie * 4byte name length * 4byte name * plus the dirlist structure */ if (count < (2 * (2 * (sizeof(*ep) + sizeof("..") + 4) + sizeof(*dp)))) return EINVAL; /* * compute # of entries to send in this chain. * heuristics: 128 bytes per entry. * This is too much probably, but it seems to work better because * of the re-entrant nature of nfs_readdir, and esp. on systems * like OpenBSD 2.2. */ chain_length = count / 128; /* reset static state counters */ te = te_next = NULL; dp->dl_entries = ep; /* construct "." */ ep[0].ne_fileid = mp->am_gen; ep[0].ne_name = "."; ep[0].ne_nextentry = &ep[1]; - *(u_int *) ep[0].ne_cookie = 0; + (void)memset(ep[0].ne_cookie, 0, sizeof(u_int)); /* construct ".." */ if (mp->am_parent) ep[1].ne_fileid = mp->am_parent->am_gen; else ep[1].ne_fileid = mp->am_gen; ep[1].ne_name = ".."; ep[1].ne_nextentry = 0; *(u_int *) ep[1].ne_cookie = DOT_DOT_COOKIE; /* * If map is browsable, call a function make_entry_chain() to construct * a linked list of unmounted keys, and return it. Then link the chain * to the regular list. Get the chain only once, but return * chunks of it each time. */ te = make_entry_chain(mp, dp->dl_entries, fully_browsable); if (!te) return 0; if (amuDebug(D_READDIR)) { nfsentry *ne; for (j = 0, ne = te; ne; ne = ne->ne_nextentry) plog(XLOG_DEBUG, "gen1 key %4d \"%s\"", j++, ne->ne_name); } /* return only "chain_length" entries */ te_next = te; for (i=1; ine_nextentry; if (!te_next) break; } if (te_next) { nfsentry *te_saved = te_next->ne_nextentry; te_next->ne_nextentry = NULL; /* terminate "te" chain */ te_next = te_saved; /* save rest of "te" for next iteration */ dp->dl_eof = FALSE; /* tell readdir there's more */ } else { dp->dl_eof = TRUE; /* tell readdir that's it */ } ep[1].ne_nextentry = te; /* append this chunk of "te" chain */ if (amuDebug(D_READDIR)) { nfsentry *ne; for (j = 0, ne = te; ne; ne = ne->ne_nextentry) plog(XLOG_DEBUG, "gen2 key %4d \"%s\"", j++, ne->ne_name); - for (j = 0, ne = ep; ne; ne = ne->ne_nextentry) + for (j = 0, ne = ep; ne; ne = ne->ne_nextentry) { + u_int cookie; + (void)memcpy(&cookie, ne->ne_cookie, sizeof(cookie)); plog(XLOG_DEBUG, "gen2+ key %4d \"%s\" fi=%d ck=%d", - j++, ne->ne_name, ne->ne_fileid, *(u_int *)ne->ne_cookie); + j++, ne->ne_name, ne->ne_fileid, cookie); + } plog(XLOG_DEBUG, "EOF is %d", dp->dl_eof); } return 0; } /* end of "if (gen == 0)" statement */ dlog("amfs_readdir_browsable: real child"); if (gen == DOT_DOT_COOKIE) { dlog("amfs_readdir_browsable: End of readdir in %s", mp->am_path); dp->dl_eof = TRUE; dp->dl_entries = 0; return 0; } /* * If browsable directories, then continue serving readdir() with another * chunk of entries, starting from where we left off (when gen was equal * to 0). Once again, assume last chunk served to readdir. */ dp->dl_eof = TRUE; dp->dl_entries = ep; te = te_next; /* reset 'te' from last saved te_next */ if (!te) { /* another indicator of end of readdir */ dp->dl_entries = 0; return 0; } /* * compute # of entries to send in this chain. * heuristics: 128 bytes per entry. */ chain_length = count / 128; /* return only "chain_length" entries */ for (i = 1; i < chain_length; ++i) { te_next = te_next->ne_nextentry; if (!te_next) break; } if (te_next) { nfsentry *te_saved = te_next->ne_nextentry; te_next->ne_nextentry = NULL; /* terminate "te" chain */ te_next = te_saved; /* save rest of "te" for next iteration */ dp->dl_eof = FALSE; /* tell readdir there's more */ } ep = te; /* send next chunk of "te" chain */ dp->dl_entries = ep; if (amuDebug(D_READDIR)) { nfsentry *ne; plog(XLOG_DEBUG, "dl_entries=%p, te_next=%p, dl_eof=%d", dp->dl_entries, te_next, dp->dl_eof); for (ne = te; ne; ne = ne->ne_nextentry) plog(XLOG_DEBUG, "gen3 key %4d \"%s\"", j++, ne->ne_name); } return 0; } /* * This readdir function which call a special version of it that allows * browsing if browsable_dirs=yes was set on the map. */ int amfs_generic_readdir(am_node *mp, nfscookie cookie, nfsdirlist *dp, nfsentry *ep, u_int count) { u_int gen = *(u_int *) cookie; am_node *xp; mntent_t mnt; dp->dl_eof = FALSE; /* assume readdir not done */ /* check if map is browsable */ if (mp->am_mnt && mp->am_mnt->mf_mopts) { mnt.mnt_opts = mp->am_mnt->mf_mopts; if (amu_hasmntopt(&mnt, "fullybrowsable")) return amfs_readdir_browsable(mp, cookie, dp, ep, count, TRUE); if (amu_hasmntopt(&mnt, "browsable")) return amfs_readdir_browsable(mp, cookie, dp, ep, count, FALSE); } /* when gen is 0, we start reading from the beginning of the directory */ if (gen == 0) { /* * In the default instance (which is used to start a search) we return * "." and "..". * * This assumes that the count is big enough to allow both "." and ".." * to be returned in a single packet. If it isn't (which would be * fairly unbelievable) then tough. */ dlog("amfs_generic_readdir: default search"); /* * Check for enough room. This is extremely approximate but is more * than enough space. Really need 2 times: * 4byte fileid * 4byte cookie * 4byte name length * 4byte name * plus the dirlist structure */ if (count < (2 * (2 * (sizeof(*ep) + sizeof("..") + 4) + sizeof(*dp)))) return EINVAL; xp = next_nonerror_node(mp->am_child); dp->dl_entries = ep; /* construct "." */ ep[0].ne_fileid = mp->am_gen; ep[0].ne_name = "."; ep[0].ne_nextentry = &ep[1]; - *(u_int *) ep[0].ne_cookie = 0; + (void)memset(ep[0].ne_cookie, 0, sizeof(u_int)); /* construct ".." */ if (mp->am_parent) ep[1].ne_fileid = mp->am_parent->am_gen; else ep[1].ne_fileid = mp->am_gen; ep[1].ne_name = ".."; ep[1].ne_nextentry = 0; *(u_int *) ep[1].ne_cookie = (xp ? xp->am_gen : DOT_DOT_COOKIE); if (!xp) dp->dl_eof = TRUE; /* by default assume readdir done */ if (amuDebug(D_READDIR)) { nfsentry *ne; int j; - for (j = 0, ne = ep; ne; ne = ne->ne_nextentry) + for (j = 0, ne = ep; ne; ne = ne->ne_nextentry) { + u_int cookie; + (void)memcpy(&cookie, ne->ne_cookie, sizeof(cookie)); plog(XLOG_DEBUG, "gen1 key %4d \"%s\" fi=%d ck=%d", - j++, ne->ne_name, ne->ne_fileid, *(u_int *)ne->ne_cookie); + j++, ne->ne_name, ne->ne_fileid, cookie); + } } return 0; } dlog("amfs_generic_readdir: real child"); if (gen == DOT_DOT_COOKIE) { dlog("amfs_generic_readdir: End of readdir in %s", mp->am_path); dp->dl_eof = TRUE; dp->dl_entries = 0; if (amuDebug(D_READDIR)) plog(XLOG_DEBUG, "end of readdir eof=TRUE, dl_entries=0\n"); return 0; } /* non-browsable directories code */ xp = mp->am_child; while (xp && xp->am_gen != gen) xp = xp->am_osib; if (xp) { int nbytes = count / 2; /* conservative */ int todo = MAX_READDIR_ENTRIES; dp->dl_entries = ep; do { am_node *xp_next = next_nonerror_node(xp->am_osib); if (xp_next) { - *(u_int *) ep->ne_cookie = xp_next->am_gen; + (void)memcpy(ep->ne_cookie, &xp_next->am_gen, sizeof(xp_next->am_gen)); } else { - *(u_int *) ep->ne_cookie = DOT_DOT_COOKIE; + (void)memcpy(ep->ne_cookie, &dotdotcookie, sizeof(dotdotcookie)); dp->dl_eof = TRUE; } ep->ne_fileid = xp->am_gen; ep->ne_name = xp->am_name; nbytes -= sizeof(*ep) + 1; if (xp->am_name) nbytes -= strlen(xp->am_name); xp = xp_next; if (nbytes > 0 && !dp->dl_eof && todo > 1) { ep->ne_nextentry = ep + 1; ep++; --todo; } else { todo = 0; } } while (todo > 0); ep->ne_nextentry = 0; if (amuDebug(D_READDIR)) { nfsentry *ne; int j; - for (j=0,ne=ep; ne; ne=ne->ne_nextentry) + for (j=0,ne=ep; ne; ne=ne->ne_nextentry) { + u_int cookie; + (void)memcpy(&cookie, ne->ne_cookie, sizeof(cookie)); plog(XLOG_DEBUG, "gen2 key %4d \"%s\" fi=%d ck=%d", - j++, ne->ne_name, ne->ne_fileid, *(u_int *)ne->ne_cookie); + j++, ne->ne_name, ne->ne_fileid, cookie); + } } return 0; } return ESTALE; } Index: projects/clang360-import/contrib/amd/hlfsd/homedir.c =================================================================== --- projects/clang360-import/contrib/amd/hlfsd/homedir.c (revision 277895) +++ projects/clang360-import/contrib/amd/hlfsd/homedir.c (revision 277896) @@ -1,807 +1,808 @@ /* * Copyright (c) 1997-2006 Erez Zadok * Copyright (c) 1989 Jan-Simon Pendry * Copyright (c) 1989 Imperial College of Science, Technology & Medicine * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry at Imperial College, London. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * File: am-utils/hlfsd/homedir.c * * HLFSD was written at Columbia University Computer Science Department, by * Erez Zadok and Alexander Dupuy * It is being distributed under the same terms and conditions as amd does. */ #ifdef HAVE_CONFIG_H # include #endif /* HAVE_CONFIG_H */ #include #include /* * STATIC VARIABLES AND FUNCTIONS: */ static FILE *passwd_fp = NULL; static char pw_name[16], pw_dir[128]; static int cur_pwtab_num = 0, max_pwtab_num = 0; static int hlfsd_diskspace(char *); static int hlfsd_stat(char *, struct stat *); static int passwd_line = 0; static int plt_reset(void); static struct passwd passwd_ent; static uid2home_t *lastchild; static uid2home_t *pwtab; static void delay(uid2home_t *, int); static void table_add(u_int, const char *, const char *); static char mboxfile[MAXPATHLEN]; static char *root_home; /* root's home directory */ /* GLOBAL FUNCTIONS */ char *homeof(char *username); int uidof(char *username); /* GLOBALS VARIABLES */ username2uid_t *untab; /* user name table */ /* * Return the home directory pathname for the user with uid "userid". */ char * homedir(int userid, int groupid) { static char linkval[MAXPATHLEN + 1]; static struct timeval tp; uid2home_t *found; char *homename; struct stat homestat; int old_groupid, old_userid; if ((found = plt_search(userid)) == (uid2home_t *) NULL) { return alt_spooldir; /* use alt spool for unknown uid */ } homename = found->home; if (homename[0] != '/' || homename[1] == '\0') { found->last_status = 1; return alt_spooldir; /* use alt spool for / or rel. home */ } if ((int) userid == 0) /* force all uid 0 to use root's home */ xsnprintf(linkval, sizeof(linkval), "%s/%s", root_home, home_subdir); else xsnprintf(linkval, sizeof(linkval), "%s/%s", homename, home_subdir); if (noverify) { found->last_status = 0; return linkval; } /* * To optimize hlfsd, we don't actually check the validity of the * symlink if it has been checked in the last N seconds. It is * very likely that the link, machine, and filesystem are still * valid, as long as N is small. But if N is large, that may not be * true. That's why the default N is 5 minutes, but we allow the * user to override this value via a command line option. Note that * we do not update the last_access_time each time it is accessed, * but only once every N seconds. */ if (gettimeofday(&tp, (struct timezone *) NULL) < 0) { tp.tv_sec = 0; } else { if ((tp.tv_sec - found->last_access_time) < cache_interval) { if (found->last_status == 0) { return linkval; } else { return alt_spooldir; } } else { found->last_access_time = tp.tv_sec; } } /* * only run this forking code if did not ask for -D fork */ if (!amuDebug(D_FORK)) { /* fork child to process request if none in progress */ if (found->child && kill(found->child, 0)) found->child = 0; if (found->child) delay(found, 5); /* wait a bit if in progress */ if (found->child) { /* better safe than sorry - maybe */ found->last_status = 1; return alt_spooldir; } if ((found->child = fork()) < 0) { found->last_status = 1; return alt_spooldir; } if (found->child) { /* PARENT */ if (lastchild) dlog("cache spill uid = %ld, pid = %ld, home = %s", (long) lastchild->uid, (long) lastchild->child, lastchild->home); lastchild = found; return (char *) NULL; /* return NULL to parent, so it can continue */ } } /* * CHILD: (or parent if -D fork) * * Check and create dir if needed. * Check disk space and/or quotas too. * * We don't need to set the _last_status field of found after the fork * in the child, b/c that information would be later determined in * nfsproc_readlink_2() and the correct exit status would be returned * to the parent upon SIGCHLD in interlock(). * */ am_set_mypid(); /* for logging routines */ if ((old_groupid = setgid(groupid)) < 0) { plog(XLOG_WARNING, "could not setgid to %d: %m", groupid); return linkval; } if ((old_userid = seteuid(userid)) < 0) { plog(XLOG_WARNING, "could not seteuid to %d: %m", userid); setgid(old_groupid); return linkval; } if (hlfsd_stat(linkval, &homestat) < 0) { if (errno == ENOENT) { /* make the spool dir if possible */ /* don't use recursive mkdirs here */ if (mkdir(linkval, PERS_SPOOLMODE) < 0) { seteuid(old_userid); setgid(old_groupid); plog(XLOG_WARNING, "can't make directory %s: %m", linkval); return alt_spooldir; } /* fall through to testing the disk space / quota */ } else { /* the home dir itself must not exist then */ seteuid(old_userid); setgid(old_groupid); plog(XLOG_WARNING, "bad link to %s: %m", linkval); return alt_spooldir; } } /* * If gets here, then either the spool dir in the home dir exists, * or it was just created. In either case, we now need to * test if we can create a small file and write at least one * byte into it. This will test that we have both enough inodes * and disk blocks to spare, or they fall within the user's quotas too. * We are still seteuid to the user at this point. */ if (hlfsd_diskspace(linkval) < 0) { seteuid(old_userid); setgid(old_groupid); plog(XLOG_WARNING, "no more space in %s: %m", linkval); return alt_spooldir; } else { seteuid(old_userid); setgid(old_groupid); return linkval; } } static int hlfsd_diskspace(char *path) { char buf[MAXPATHLEN]; int fd, len; xsnprintf(buf, sizeof(buf), "%s/._hlfstmp_%lu", path, (long) getpid()); if ((fd = open(buf, O_RDWR | O_CREAT, 0600)) < 0) { plog(XLOG_ERROR, "cannot open %s: %m", buf); return -1; } len = strlen(buf); if (write(fd, buf, len) < len) { plog(XLOG_ERROR, "cannot write \"%s\" (%d bytes) to %s : %m", buf, len, buf); close(fd); unlink(buf); /* cleanup just in case */ return -1; } if (unlink(buf) < 0) { plog(XLOG_ERROR, "cannot unlink %s : %m", buf); } close(fd); return 0; } static int hlfsd_stat(char *path, struct stat *statp) { if (stat(path, statp) < 0) return -1; else if (!S_ISDIR(statp->st_mode)) { errno = ENOTDIR; return -1; } return 0; } static void delay(uid2home_t *found, int secs) { struct timeval tv; dlog("delaying on child %ld for %d seconds", (long) found->child, secs); tv.tv_usec = 0; do { tv.tv_sec = secs; if (select(0, 0, 0, 0, &tv) == 0) break; } while (--secs && found->child); } /* * This function is called when a child has terminated after * servicing an nfs request. We need to check the exit status and * update the last_status field of the requesting user. */ RETSIGTYPE interlock(int signum) { int child; uid2home_t *lostchild; int status; #ifdef HAVE_WAITPID while ((child = waitpid((pid_t) -1, &status, WNOHANG)) > 0) { #else /* not HAVE_WAITPID */ while ((child = wait3(&status, WNOHANG, (struct rusage *) 0)) > 0) { #endif /* not HAVE_WAITPID */ /* high chances this was the last child forked */ if (lastchild && lastchild->child == child) { lastchild->child = 0; if (WIFEXITED(status)) lastchild->last_status = WEXITSTATUS(status); lastchild = (uid2home_t *) NULL; } else { /* and if not, we have to search for it... */ for (lostchild = pwtab; lostchild < &pwtab[cur_pwtab_num]; lostchild++) { if (lostchild->child == child) { if (WIFEXITED(status)) lostchild->last_status = WEXITSTATUS(status); lostchild->child = 0; break; } } } } } /* * PASSWORD AND USERNAME LOOKUP TABLES FUNCTIONS */ /* * get index of UserName table entry which matches username. * must not return uid_t because we want to return a negative number. */ int untab_index(char *username) { int max, min, mid, cmp; max = cur_pwtab_num - 1; min = 0; do { mid = (max + min) / 2; cmp = strcmp(untab[mid].username, username); if (cmp == 0) /* record found! */ return mid; if (cmp > 0) max = mid; else min = mid; } while (max > min + 1); if (STREQ(untab[max].username, username)) return max; if (STREQ(untab[min].username, username)) return min; /* if gets here then record was not found */ return -1; } /* * Don't make this return a uid_t, because we need to return negative * numbers as well (error codes.) */ int uidof(char *username) { int idx; if ((idx = untab_index(username)) < 0) /* not found */ return INVALIDID; /* an invalid user id */ return untab[idx].uid; } /* * Don't make this return a uid_t, because we need to return negative * numbers as well (error codes.) */ char * homeof(char *username) { int idx; if ((idx = untab_index(username)) < 0) /* not found */ return (char *) NULL; /* an invalid user id */ return untab[idx].home; } char * mailbox(int uid, char *username) { char *home; if (uid < 0) return (char *) NULL; /* not found */ if ((home = homeof(username)) == (char *) NULL) return (char *) NULL; if (STREQ(home, "/")) xsnprintf(mboxfile, sizeof(mboxfile), "/%s/%s", home_subdir, username); else xsnprintf(mboxfile, sizeof(mboxfile), "%s/%s/%s", home, home_subdir, username); return mboxfile; } static int plt_compare_fxn(const voidp x, const voidp y) { uid2home_t *i = (uid2home_t *) x; uid2home_t *j = (uid2home_t *) y; return i->uid - j->uid; } static int unt_compare_fxn(const voidp x, const voidp y) { username2uid_t *i = (username2uid_t *) x; username2uid_t *j = (username2uid_t *) y; return strcmp(i->username, j->username); } /* perform initialization of user passwd database */ static void hlfsd_setpwent(void) { if (!passwdfile) { setpwent(); return; } passwd_fp = fopen(passwdfile, "r"); if (!passwd_fp) { plog(XLOG_ERROR, "unable to read passwd file %s: %m", passwdfile); return; } plog(XLOG_INFO, "reading password entries from file %s", passwdfile); passwd_line = 0; memset((char *) &passwd_ent, 0, sizeof(struct passwd)); passwd_ent.pw_name = (char *) &pw_name; passwd_ent.pw_dir = (char *) &pw_dir; } /* perform de-initialization of user passwd database */ static void hlfsd_endpwent(void) { if (!passwdfile) { /* * Don't actually run this because we will be making more passwd calls * afterwards. On Solaris 2.5.1, making getpwent() calls after calling * endpwent() results in a memory leak! (and no, even Purify didn't * detect it...) * endpwent(); */ return; } if (passwd_fp) { fclose(passwd_fp); } } /* perform record reading/parsing of individual passwd database records */ static struct passwd * hlfsd_getpwent(void) { char buf[256], *cp; /* check if to perform standard unix function */ if (!passwdfile) { return getpwent(); } /* return here to read another entry */ readent: /* return NULL if reached end of file */ if (feof(passwd_fp)) return NULL; pw_name[0] = pw_dir[0] = '\0'; /* read records */ buf[0] = '\0'; - fgets(buf, 256, passwd_fp); + if (fgets(buf, 256, passwd_fp) == NULL) + return NULL; passwd_line++; if (buf[0] == '\0') goto readent; /* read user name */ cp = strtok(buf, ":"); if (!cp || cp[0] == '\0') { plog(XLOG_ERROR, "no user name on line %d of %s", passwd_line, passwdfile); goto readent; } /* pw_name will show up in passwd_ent.pw_name */ xstrlcpy(pw_name, cp, sizeof(pw_name)); /* skip passwd */ strtok(NULL, ":"); /* read uid */ cp = strtok(NULL, ":"); if (!cp || cp[0] == '\0') { plog(XLOG_ERROR, "no uid on line %d of %s", passwd_line, passwdfile); goto readent; } passwd_ent.pw_uid = atoi(cp); /* skip gid and gcos */ strtok(NULL, ":"); strtok(NULL, ":"); /* read home dir */ cp = strtok(NULL, ":"); if (!cp || cp[0] == '\0') { plog(XLOG_ERROR, "no home dir on line %d of %s", passwd_line, passwdfile); goto readent; } /* pw_dir will show up in passwd_ent.pw_dir */ xstrlcpy(pw_dir, cp, sizeof(pw_dir)); /* the rest of the fields are unimportant and not being considered */ plog(XLOG_USER, "hlfsd_getpwent: name=%s, uid=%ld, dir=%s", passwd_ent.pw_name, (long) passwd_ent.pw_uid, passwd_ent.pw_dir); return &passwd_ent; } /* * read and hash the passwd file or NIS map */ void plt_init(void) { struct passwd *pent_p; if (plt_reset() < 0) /* could not reset table. skip. */ return; plog(XLOG_INFO, "reading password map"); hlfsd_setpwent(); /* prepare to read passwd entries */ while ((pent_p = hlfsd_getpwent()) != (struct passwd *) NULL) { table_add(pent_p->pw_uid, pent_p->pw_dir, pent_p->pw_name); if (STREQ("root", pent_p->pw_name)) { int len; if (root_home) XFREE(root_home); root_home = strdup(pent_p->pw_dir); len = strlen(root_home); /* remove any trailing '/' chars from root's home (even if just one) */ while (len > 0 && root_home[len - 1] == '/') { len--; root_home[len] = '\0'; } } } hlfsd_endpwent(); qsort((char *) pwtab, cur_pwtab_num, sizeof(uid2home_t), plt_compare_fxn); qsort((char *) untab, cur_pwtab_num, sizeof(username2uid_t), unt_compare_fxn); if (!root_home) root_home = strdup(""); plog(XLOG_INFO, "password map read and sorted"); } /* * This is essentially so that we don't reset known good lookup tables when a * YP server goes down. */ static int plt_reset(void) { int i; hlfsd_setpwent(); if (hlfsd_getpwent() == (struct passwd *) NULL) { hlfsd_endpwent(); return -1; /* did not reset table */ } hlfsd_endpwent(); lastchild = (uid2home_t *) NULL; if (max_pwtab_num > 0) /* was used already. cleanup old table */ for (i = 0; i < cur_pwtab_num; ++i) { if (pwtab[i].home) { XFREE(pwtab[i].home); pwtab[i].home = (char *) NULL; } pwtab[i].uid = INVALIDID; /* not a valid uid (yet...) */ pwtab[i].child = (pid_t) 0; pwtab[i].uname = (char *) NULL; /* only a ptr to untab[i].username */ if (untab[i].username) { XFREE(untab[i].username); untab[i].username = (char *) NULL; } untab[i].uid = INVALIDID; /* invalid uid */ untab[i].home = (char *) NULL; /* only a ptr to pwtab[i].home */ } cur_pwtab_num = 0; /* zero current size */ if (root_home) XFREE(root_home); return 0; /* resetting ok */ } /* * u: uid number * h: home directory * n: user ID name */ static void table_add(u_int u, const char *h, const char *n) { int i; if (max_pwtab_num <= 0) { /* was never initialized */ max_pwtab_num = 1; pwtab = (uid2home_t *) xmalloc(max_pwtab_num * sizeof(uid2home_t)); memset((char *) &pwtab[0], 0, max_pwtab_num * sizeof(uid2home_t)); untab = (username2uid_t *) xmalloc(max_pwtab_num * sizeof(username2uid_t)); memset((char *) &untab[0], 0, max_pwtab_num * sizeof(username2uid_t)); } /* check if need more space. */ if (cur_pwtab_num + 1 > max_pwtab_num) { /* need more space in table */ max_pwtab_num *= 2; plog(XLOG_INFO, "reallocating table spaces to %d entries", max_pwtab_num); pwtab = (uid2home_t *) xrealloc(pwtab, sizeof(uid2home_t) * max_pwtab_num); untab = (username2uid_t *) xrealloc(untab, sizeof(username2uid_t) * max_pwtab_num); /* zero out newly added entries */ for (i=cur_pwtab_num; i u) max = mid; else min = mid; } while (max > min + 1); if (pwtab[max].uid == u) return &pwtab[max]; if (pwtab[min].uid == u) return &pwtab[min]; /* if gets here then record was not found */ return (uid2home_t *) NULL; } #if defined(DEBUG) || defined(DEBUG_PRINT) void plt_print(int signum) { FILE *dumpfile; int dumpfd; char dumptmp[] = "/usr/tmp/hlfsd.dump.XXXXXX"; int i; #ifdef HAVE_MKSTEMP dumpfd = mkstemp(dumptmp); #else /* not HAVE_MKSTEMP */ mktemp(dumptmp); if (!dumptmp) { plog(XLOG_ERROR, "cannot create temporary dump file"); return; } dumpfd = open(dumptmp, O_RDONLY); #endif /* not HAVE_MKSTEMP */ if (dumpfd < 0) { plog(XLOG_ERROR, "cannot open temporary dump file"); return; } if ((dumpfile = fdopen(dumpfd, "a")) != NULL) { plog(XLOG_INFO, "dumping internal state to file %s", dumptmp); fprintf(dumpfile, "\n\nNew plt_dump():\n"); for (i = 0; i < cur_pwtab_num; ++i) fprintf(dumpfile, "%4d %5lu %10lu %1d %4lu \"%s\" uname=\"%s\"\n", i, (long) pwtab[i].child, pwtab[i].last_access_time, pwtab[i].last_status, (long) pwtab[i].uid, pwtab[i].home, pwtab[i].uname); fprintf(dumpfile, "\nUserName table by plt_print():\n"); for (i = 0; i < cur_pwtab_num; ++i) fprintf(dumpfile, "%4d : \"%s\" %4lu \"%s\"\n", i, untab[i].username, (long) untab[i].uid, untab[i].home); close(dumpfd); fclose(dumpfile); } } void plt_dump(uid2home_t *lastc, pid_t this) { FILE *dumpfile; int i; if ((dumpfile = fopen("/var/tmp/hlfsdump", "a")) != NULL) { fprintf(dumpfile, "\n\nNEW PLT_DUMP -- "); fprintf(dumpfile, "lastchild->child=%d ", (int) (lastc ? lastc->child : -999)); fprintf(dumpfile, ", child from wait3=%lu:\n", (long) this); for (i = 0; i < cur_pwtab_num; ++i) fprintf(dumpfile, "%4d %5lu: %4lu \"%s\" uname=\"%s\"\n", i, (long) pwtab[i].child, (long) pwtab[i].uid, pwtab[i].home, pwtab[i].uname); fprintf(dumpfile, "\nUserName table by plt_dump():\n"); for (i = 0; i < cur_pwtab_num; ++i) fprintf(dumpfile, "%4d : \"%s\" %4lu \"%s\"\n", i, untab[i].username, (long) untab[i].uid, untab[i].home); fprintf(dumpfile, "ezk: ent=%d, uid=%lu, home=\"%s\"\n", untab_index("ezk"), (long) untab[untab_index("ezk")].uid, pwtab[untab[untab_index("ezk")].uid].home); fprintf(dumpfile, "rezk: ent=%d, uid=%lu, home=\"%s\"\n", untab_index("rezk"), (long) untab[untab_index("rezk")].uid, pwtab[untab[untab_index("rezk")].uid].home); fclose(dumpfile); } } #endif /* defined(DEBUG) || defined(DEBUG_PRINT) */ Index: projects/clang360-import/contrib/amd/hlfsd/stubs.c =================================================================== --- projects/clang360-import/contrib/amd/hlfsd/stubs.c (revision 277895) +++ projects/clang360-import/contrib/amd/hlfsd/stubs.c (revision 277896) @@ -1,534 +1,532 @@ /* * Copyright (c) 1997-2006 Erez Zadok * Copyright (c) 1989 Jan-Simon Pendry * Copyright (c) 1989 Imperial College of Science, Technology & Medicine * Copyright (c) 1989 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Jan-Simon Pendry at Imperial College, London. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgment: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * File: am-utils/hlfsd/stubs.c * * HLFSD was written at Columbia University Computer Science Department, by * Erez Zadok and Alexander Dupuy * It is being distributed under the same terms and conditions as amd does. */ #ifdef HAVE_CONFIG_H # include #endif /* HAVE_CONFIG_H */ #include #include /* * STATIC VARIABLES: */ static nfsfattr rootfattr = {NFDIR, 0040555, 2, 0, 0, 512, 512, 0, 1, 0, ROOTID}; static nfsfattr slinkfattr = {NFLNK, 0120777, 1, 0, 0, NFS_MAXPATHLEN, 512, 0, (NFS_MAXPATHLEN + 1) / 512, 0, SLINKID}; /* user name file attributes */ static nfsfattr un_fattr = {NFLNK, 0120777, 1, 0, 0, NFS_MAXPATHLEN, 512, 0, (NFS_MAXPATHLEN + 1) / 512, 0, INVALIDID}; static int started; static am_nfs_fh slink; static am_nfs_fh un_fhandle; /* * GLOBALS: */ am_nfs_fh root; am_nfs_fh *root_fhp = &root; /* initialize NFS file handles for hlfsd */ void hlfsd_init_filehandles(void) { u_int ui; ui = ROOTID; memcpy(root.fh_data, &ui, sizeof(ui)); ui = SLINKID; memcpy(slink.fh_data, &ui, sizeof(ui)); ui = INVALIDID; memcpy(un_fhandle.fh_data, &ui, sizeof(ui)); } voidp nfsproc_null_2_svc(voidp argp, struct svc_req *rqstp) { static char res; return (voidp) &res; } /* compare if two filehandles are equal */ static int eq_fh(const am_nfs_fh *fh1, const am_nfs_fh *fh2) { return (!memcmp((char *) fh1, (char *) fh2, sizeof(am_nfs_fh))); } nfsattrstat * nfsproc_getattr_2_svc(am_nfs_fh *argp, struct svc_req *rqstp) { static nfsattrstat res; uid_t uid = (uid_t) INVALIDID; gid_t gid = (gid_t) INVALIDID; if (!started) { started++; rootfattr.na_ctime = startup; rootfattr.na_mtime = startup; slinkfattr.na_ctime = startup; slinkfattr.na_mtime = startup; un_fattr.na_ctime = startup; un_fattr.na_mtime = startup; } if (getcreds(rqstp, &uid, &gid, nfsxprt) < 0) { res.ns_status = NFSERR_STALE; return &res; } if (eq_fh(argp, &root)) { #if 0 /* * XXX: increment mtime of parent directory, causes NFS clients to * invalidate their cache for that directory. * Some NFS clients may need this code. */ if (uid != rootfattr.na_uid) { clocktime(&rootfattr.na_mtime); rootfattr.na_uid = uid; } #endif res.ns_status = NFS_OK; res.ns_u.ns_attr_u = rootfattr; } else if (eq_fh(argp, &slink)) { #ifndef MNT2_NFS_OPT_SYMTTL /* * This code is needed to defeat Solaris 2.4's (and newer) symlink * values cache. It forces the last-modified time of the symlink to be * current. It is not needed if the O/S has an nfs flag to turn off the * symlink-cache at mount time (such as Irix 5.x and 6.x). -Erez. * * Additionally, Linux currently ignores the nt_useconds field, * so we must update the nt_seconds field every time. */ if (uid != slinkfattr.na_uid) { clocktime(&slinkfattr.na_mtime); slinkfattr.na_uid = uid; } #endif /* not MNT2_NFS_OPT_SYMTTL */ res.ns_status = NFS_OK; res.ns_u.ns_attr_u = slinkfattr; } else { if (gid != hlfs_gid) { res.ns_status = NFSERR_STALE; } else { - memset((char *) &uid, 0, sizeof(int)); - uid = *(u_int *) argp->fh_data; + (void)memcpy(&uid, argp->fh_data, sizeof(uid)); if (plt_search(uid) != (uid2home_t *) NULL) { res.ns_status = NFS_OK; un_fattr.na_fileid = uid; res.ns_u.ns_attr_u = un_fattr; dlog("nfs_getattr: successful search for uid=%ld, gid=%ld", (long) uid, (long) gid); } else { /* not found */ res.ns_status = NFSERR_STALE; } } } return &res; } nfsattrstat * nfsproc_setattr_2_svc(nfssattrargs *argp, struct svc_req *rqstp) { static nfsattrstat res = {NFSERR_ROFS}; return &res; } voidp nfsproc_root_2_svc(voidp argp, struct svc_req *rqstp) { static char res; return (voidp) &res; } nfsdiropres * nfsproc_lookup_2_svc(nfsdiropargs *argp, struct svc_req *rqstp) { static nfsdiropres res; int idx; uid_t uid = (uid_t) INVALIDID; gid_t gid = (gid_t) INVALIDID; if (!started) { started++; rootfattr.na_ctime = startup; rootfattr.na_mtime = startup; slinkfattr.na_ctime = startup; slinkfattr.na_mtime = startup; un_fattr.na_ctime = startup; un_fattr.na_mtime = startup; } if (eq_fh(&argp->da_fhandle, &slink)) { res.dr_status = NFSERR_NOTDIR; return &res; } if (getcreds(rqstp, &uid, &gid, nfsxprt) < 0) { res.dr_status = NFSERR_NOENT; return &res; } if (eq_fh(&argp->da_fhandle, &root)) { if (argp->da_name[0] == '.' && (argp->da_name[1] == '\0' || (argp->da_name[1] == '.' && argp->da_name[2] == '\0'))) { #if 0 /* * XXX: increment mtime of parent directory, causes NFS clients to * invalidate their cache for that directory. * Some NFS clients may need this code. */ if (uid != rootfattr.na_uid) { clocktime(&rootfattr.na_mtime); rootfattr.na_uid = uid; } #endif res.dr_u.dr_drok_u.drok_fhandle = root; res.dr_u.dr_drok_u.drok_attributes = rootfattr; res.dr_status = NFS_OK; return &res; } if (STREQ(argp->da_name, slinkname)) { #ifndef MNT2_NFS_OPT_SYMTTL /* * This code is needed to defeat Solaris 2.4's (and newer) symlink * values cache. It forces the last-modified time of the symlink to be * current. It is not needed if the O/S has an nfs flag to turn off the * symlink-cache at mount time (such as Irix 5.x and 6.x). -Erez. * * Additionally, Linux currently ignores the nt_useconds field, * so we must update the nt_seconds field every time. */ if (uid != slinkfattr.na_uid) { clocktime(&slinkfattr.na_mtime); slinkfattr.na_uid = uid; } #endif /* not MNT2_NFS_OPT_SYMTTL */ res.dr_u.dr_drok_u.drok_fhandle = slink; res.dr_u.dr_drok_u.drok_attributes = slinkfattr; res.dr_status = NFS_OK; return &res; } if (gid != hlfs_gid) { res.dr_status = NFSERR_NOENT; return &res; } /* if gets here, gid == hlfs_gid */ if ((idx = untab_index(argp->da_name)) < 0) { res.dr_status = NFSERR_NOENT; return &res; } else { /* entry found and gid is permitted */ un_fattr.na_fileid = untab[idx].uid; res.dr_u.dr_drok_u.drok_attributes = un_fattr; - memset((char *) &un_fhandle, 0, sizeof(am_nfs_fh)); - *(u_int *) un_fhandle.fh_data = (u_int) untab[idx].uid; + memset(&un_fhandle, 0, sizeof(un_fhandle)); + memcpy(un_fhandle.fh_data, &untab[idx].uid, sizeof(untab[idx].uid)); xstrlcpy((char *) &un_fhandle.fh_data[sizeof(int)], untab[idx].username, sizeof(am_nfs_fh) - sizeof(int)); res.dr_u.dr_drok_u.drok_fhandle = un_fhandle; res.dr_status = NFS_OK; dlog("nfs_lookup: successful lookup for uid=%ld, gid=%ld: username=%s", (long) uid, (long) gid, untab[idx].username); return &res; } } /* end of "if (eq_fh(argp->dir.data, root.data)) {" */ res.dr_status = NFSERR_STALE; return &res; } nfsreadlinkres * nfsproc_readlink_2_svc(am_nfs_fh *argp, struct svc_req *rqstp) { static nfsreadlinkres res; uid_t userid = (uid_t) INVALIDID; gid_t groupid = hlfs_gid + 1; /* anything not hlfs_gid */ int retval = 0; char *path_val = (char *) NULL; char *username; static uid_t last_uid = (uid_t) INVALIDID; if (eq_fh(argp, &root)) { res.rlr_status = NFSERR_ISDIR; } else if (eq_fh(argp, &slink)) { if (getcreds(rqstp, &userid, &groupid, nfsxprt) < 0) return (nfsreadlinkres *) NULL; clocktime(&slinkfattr.na_atime); res.rlr_status = NFS_OK; if (groupid == hlfs_gid) { res.rlr_u.rlr_data_u = DOTSTRING; } else if (!(res.rlr_u.rlr_data_u = path_val = homedir(userid, groupid))) { /* * parent process (fork in homedir()) continues * processing, by getting a NULL returned as a * "special". Child returns result. */ return (nfsreadlinkres *) NULL; } } else { /* check if asked for user mailbox */ if (getcreds(rqstp, &userid, &groupid, nfsxprt) < 0) { return (nfsreadlinkres *) NULL; } if (groupid == hlfs_gid) { - memset((char *) &userid, 0, sizeof(int)); - userid = *(u_int *) argp->fh_data; + memcpy(&userid, argp->fh_data, sizeof(userid)); username = (char *) &argp->fh_data[sizeof(int)]; if (!(res.rlr_u.rlr_data_u = mailbox(userid, username))) return (nfsreadlinkres *) NULL; } else { res.rlr_status = NFSERR_STALE; } } /* print info, but try to avoid repetitions */ if (userid != last_uid) { plog(XLOG_USER, "mailbox for uid=%ld, gid=%ld is %s", (long) userid, (long) groupid, (char *) res.rlr_u.rlr_data_u); last_uid = userid; } /* I don't think it will pass this if -D fork */ if (serverpid == getpid()) return &res; if (!svc_sendreply(nfsxprt, (XDRPROC_T_TYPE) xdr_readlinkres, (SVC_IN_ARG_TYPE) &res)) svcerr_systemerr(nfsxprt); /* * Child exists here. We need to determine which * exist status to return. The exit status * is gathered using wait() and determines * if we returned $HOME/.hlfsspool or $ALTDIR. The parent * needs this info so it can update the lookup table. */ if (path_val && alt_spooldir && STREQ(path_val, alt_spooldir)) retval = 1; /* could not get real home dir (or uid 0 user) */ else retval = 0; /* * If asked for -D fork, then must return the value, * NOT exit, or else the main hlfsd server exits. * Bug: where is that status information being collected? */ if (amuDebug(D_FORK)) return &res; exit(retval); } nfsreadres * nfsproc_read_2_svc(nfsreadargs *argp, struct svc_req *rqstp) { static nfsreadres res = {NFSERR_ACCES}; return &res; } voidp nfsproc_writecache_2_svc(voidp argp, struct svc_req *rqstp) { static char res; return (voidp) &res; } nfsattrstat * nfsproc_write_2_svc(nfswriteargs *argp, struct svc_req *rqstp) { static nfsattrstat res = {NFSERR_ROFS}; return &res; } nfsdiropres * nfsproc_create_2_svc(nfscreateargs *argp, struct svc_req *rqstp) { static nfsdiropres res = {NFSERR_ROFS}; return &res; } nfsstat * nfsproc_remove_2_svc(nfsdiropargs *argp, struct svc_req *rqstp) { static nfsstat res = {NFSERR_ROFS}; return &res; } nfsstat * nfsproc_rename_2_svc(nfsrenameargs *argp, struct svc_req *rqstp) { static nfsstat res = {NFSERR_ROFS}; return &res; } nfsstat * nfsproc_link_2_svc(nfslinkargs *argp, struct svc_req *rqstp) { static nfsstat res = {NFSERR_ROFS}; return &res; } nfsstat * nfsproc_symlink_2_svc(nfssymlinkargs *argp, struct svc_req *rqstp) { static nfsstat res = {NFSERR_ROFS}; return &res; } nfsdiropres * nfsproc_mkdir_2_svc(nfscreateargs *argp, struct svc_req *rqstp) { static nfsdiropres res = {NFSERR_ROFS}; return &res; } nfsstat * nfsproc_rmdir_2_svc(nfsdiropargs *argp, struct svc_req *rqstp) { static nfsstat res = {NFSERR_ROFS}; return &res; } nfsreaddirres * nfsproc_readdir_2_svc(nfsreaddirargs *argp, struct svc_req *rqstp) { static nfsreaddirres res; static nfsentry slinkent = {SLINKID, 0, {SLINKCOOKIE}}; static nfsentry dotdotent = {ROOTID, "..", {DOTDOTCOOKIE}, &slinkent}; static nfsentry dotent = {ROOTID, ".", {DOTCOOKIE}, &dotdotent}; slinkent.ne_name = slinkname; if (eq_fh(&argp->rda_fhandle, &slink)) { res.rdr_status = NFSERR_NOTDIR; } else if (eq_fh(&argp->rda_fhandle, &root)) { clocktime(&rootfattr.na_atime); res.rdr_status = NFS_OK; switch (argp->rda_cookie[0]) { case 0: res.rdr_u.rdr_reply_u.dl_entries = &dotent; break; case DOTCOOKIE: res.rdr_u.rdr_reply_u.dl_entries = &dotdotent; break; case DOTDOTCOOKIE: res.rdr_u.rdr_reply_u.dl_entries = &slinkent; break; case SLINKCOOKIE: res.rdr_u.rdr_reply_u.dl_entries = (nfsentry *) 0; break; } res.rdr_u.rdr_reply_u.dl_eof = TRUE; } else { res.rdr_status = NFSERR_STALE; } return &res; } nfsstatfsres * nfsproc_statfs_2_svc(am_nfs_fh *argp, struct svc_req *rqstp) { static nfsstatfsres res = {NFS_OK}; res.sfr_u.sfr_reply_u.sfrok_tsize = 1024; res.sfr_u.sfr_reply_u.sfrok_bsize = 1024; /* * Some "df" programs automatically assume that file systems * with zero blocks are meta-filesystems served by automounters. */ res.sfr_u.sfr_reply_u.sfrok_blocks = 0; res.sfr_u.sfr_reply_u.sfrok_bfree = 0; res.sfr_u.sfr_reply_u.sfrok_bavail = 0; return &res; } Index: projects/clang360-import/contrib/amd =================================================================== --- projects/clang360-import/contrib/amd (revision 277895) +++ projects/clang360-import/contrib/amd (revision 277896) Property changes on: projects/clang360-import/contrib/amd ___________________________________________________________________ Added: svn:mergeinfo ## -0,0 +0,18 ## Merged /projects/ipfw/contrib/amd:r267383-272837 Merged /projects/pf/head/contrib/amd:r263908 Merged /projects/zfsd/head/contrib/amd:r266519,269993 Merged /projects/building-blocks/contrib/amd:r275198,275297,275306-275307,275309,275311,275556,275558,275600,277445,277670,277673 Merged /projects/clang-sparc64/contrib/amd:r262258-262612 Merged /vendor/resolver/dist/contrib/amd:r1540-186085 Merged /projects/clang350-import/contrib/amd:r274961-276476 Merged /projects/release-embedded/contrib/amd:r262314,262504,262510-262511,262580,262660,262662,262700,262713,262774,262786-262788,262790-262792,262798,262802,262808 Merged /vendor/amd/dist:r277866,277870 Merged /projects/largeSMP/contrib/amd:r221273-222812,222815-223757 Merged /projects/quota64/contrib/amd:r184125-207707 Merged /projects/head_mfi/contrib/amd:r233621 Merged /projects/random_number_generator/contrib/amd:r254613-256243 Merged /head/contrib/amd:r277327-277895 Merged /projects/lldb-r201577/contrib/amd:r262185-262527 Merged /projects/elftoolchain-update-r3130/contrib/amd:r276164,276167,276170-276172 Merged /projects/multi-fibv6/head/contrib/amd:r230929-231848 Merged /projects/elftoolchain/contrib/amd:r260687-261245 Index: projects/clang360-import/lib/libc/gen/sem_new.c =================================================================== --- projects/clang360-import/lib/libc/gen/sem_new.c (revision 277895) +++ projects/clang360-import/lib/libc/gen/sem_new.c (revision 277896) @@ -1,448 +1,450 @@ /* * Copyright (C) 2010 David Xu . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice(s), this list of conditions and the following disclaimer as * the first lines of this file unmodified other than the possible * addition of one or more copyright notices. * 2. Redistributions in binary form must reproduce the above copyright * notice(s), this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "namespace.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "un-namespace.h" #include "libc_private.h" __weak_reference(_sem_close, sem_close); __weak_reference(_sem_destroy, sem_destroy); __weak_reference(_sem_getvalue, sem_getvalue); __weak_reference(_sem_init, sem_init); __weak_reference(_sem_open, sem_open); __weak_reference(_sem_post, sem_post); __weak_reference(_sem_timedwait, sem_timedwait); __weak_reference(_sem_trywait, sem_trywait); __weak_reference(_sem_unlink, sem_unlink); __weak_reference(_sem_wait, sem_wait); #define SEM_PREFIX "/tmp/SEMD" #define SEM_MAGIC ((u_int32_t)0x73656d32) _Static_assert(SEM_VALUE_MAX <= USEM_MAX_COUNT, "SEM_VALUE_MAX too large"); struct sem_nameinfo { int open_count; char *name; dev_t dev; ino_t ino; sem_t *sem; LIST_ENTRY(sem_nameinfo) next; }; static pthread_once_t once = PTHREAD_ONCE_INIT; static pthread_mutex_t sem_llock; static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list); static void sem_prefork() { _pthread_mutex_lock(&sem_llock); } static void sem_postfork() { _pthread_mutex_unlock(&sem_llock); } static void sem_child_postfork() { _pthread_mutex_unlock(&sem_llock); } static void sem_module_init(void) { pthread_mutexattr_t ma; _pthread_mutexattr_init(&ma); _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE); _pthread_mutex_init(&sem_llock, &ma); _pthread_mutexattr_destroy(&ma); _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork); } static inline int sem_check_validity(sem_t *sem) { if (sem->_magic == SEM_MAGIC) return (0); else { errno = EINVAL; return (-1); } } int _sem_init(sem_t *sem, int pshared, unsigned int value) { if (value > SEM_VALUE_MAX) { errno = EINVAL; return (-1); } bzero(sem, sizeof(sem_t)); sem->_magic = SEM_MAGIC; sem->_kern._count = (u_int32_t)value; sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0; return (0); } sem_t * _sem_open(const char *name, int flags, ...) { char path[PATH_MAX]; struct stat sb; va_list ap; struct sem_nameinfo *ni = NULL; sem_t *sem = NULL; int fd = -1, mode, len, errsave; int value = 0; if (name[0] != '/') { errno = EINVAL; return (SEM_FAILED); } name++; strcpy(path, SEM_PREFIX); if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { errno = ENAMETOOLONG; return (SEM_FAILED); } if (flags & ~(O_CREAT|O_EXCL)) { errno = EINVAL; return (SEM_FAILED); } if ((flags & O_CREAT) != 0) { va_start(ap, flags); mode = va_arg(ap, int); value = va_arg(ap, int); va_end(ap); } fd = -1; _pthread_once(&once, sem_module_init); _pthread_mutex_lock(&sem_llock); LIST_FOREACH(ni, &sem_list, next) { if (ni->name != NULL && strcmp(name, ni->name) == 0) { fd = _open(path, flags | O_RDWR | O_CLOEXEC | O_EXLOCK, mode); if (fd == -1 || _fstat(fd, &sb) == -1) goto error; if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL) || ni->dev != sb.st_dev || ni->ino != sb.st_ino) { ni->name = NULL; ni = NULL; break; } ni->open_count++; sem = ni->sem; _pthread_mutex_unlock(&sem_llock); _close(fd); return (sem); } } len = sizeof(*ni) + strlen(name) + 1; ni = (struct sem_nameinfo *)malloc(len); if (ni == NULL) { errno = ENOSPC; goto error; } ni->name = (char *)(ni+1); strcpy(ni->name, name); if (fd == -1) { fd = _open(path, flags | O_RDWR | O_CLOEXEC | O_EXLOCK, mode); if (fd == -1 || _fstat(fd, &sb) == -1) goto error; } if (sb.st_size < sizeof(sem_t)) { sem_t tmp; tmp._magic = SEM_MAGIC; tmp._kern._count = value; tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED; if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) goto error; } flock(fd, LOCK_UN); sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_NOSYNC, fd, 0); if (sem == MAP_FAILED) { sem = NULL; if (errno == ENOMEM) errno = ENOSPC; goto error; } if (sem->_magic != SEM_MAGIC) { errno = EINVAL; goto error; } ni->open_count = 1; ni->sem = sem; ni->dev = sb.st_dev; ni->ino = sb.st_ino; LIST_INSERT_HEAD(&sem_list, ni, next); _close(fd); _pthread_mutex_unlock(&sem_llock); return (sem); error: errsave = errno; if (fd != -1) _close(fd); if (sem != NULL) munmap(sem, sizeof(sem_t)); free(ni); _pthread_mutex_unlock(&sem_llock); errno = errsave; return (SEM_FAILED); } int _sem_close(sem_t *sem) { struct sem_nameinfo *ni; if (sem_check_validity(sem) != 0) return (-1); if (!(sem->_kern._flags & SEM_NAMED)) { errno = EINVAL; return (-1); } _pthread_once(&once, sem_module_init); _pthread_mutex_lock(&sem_llock); LIST_FOREACH(ni, &sem_list, next) { if (sem == ni->sem) { if (--ni->open_count > 0) { _pthread_mutex_unlock(&sem_llock); return (0); } else break; } } if (ni) { LIST_REMOVE(ni, next); _pthread_mutex_unlock(&sem_llock); munmap(sem, sizeof(*sem)); free(ni); return (0); } _pthread_mutex_unlock(&sem_llock); errno = EINVAL; return (-1); } int _sem_unlink(const char *name) { char path[PATH_MAX]; if (name[0] != '/') { errno = ENOENT; return -1; } name++; strcpy(path, SEM_PREFIX); if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { errno = ENAMETOOLONG; return (-1); } return (unlink(path)); } int _sem_destroy(sem_t *sem) { if (sem_check_validity(sem) != 0) return (-1); if (sem->_kern._flags & SEM_NAMED) { errno = EINVAL; return (-1); } sem->_magic = 0; return (0); } int _sem_getvalue(sem_t * __restrict sem, int * __restrict sval) { if (sem_check_validity(sem) != 0) return (-1); *sval = (int)USEM_COUNT(sem->_kern._count); return (0); } static __inline int usem_wake(struct _usem2 *sem) { return _umtx_op(sem, UMTX_OP_SEM2_WAKE, 0, NULL, NULL); } static __inline int usem_wait(struct _usem2 *sem, const struct timespec *abstime) { struct _umtx_time *tm_p, timeout; size_t tm_size; if (abstime == NULL) { tm_p = NULL; tm_size = 0; } else { timeout._clockid = CLOCK_REALTIME; timeout._flags = UMTX_ABSTIME; timeout._timeout = *abstime; tm_p = &timeout; tm_size = sizeof(timeout); } return _umtx_op(sem, UMTX_OP_SEM2_WAIT, 0, (void *)tm_size, __DECONST(void*, tm_p)); } int _sem_trywait(sem_t *sem) { int val; if (sem_check_validity(sem) != 0) return (-1); while (USEM_COUNT(val = sem->_kern._count) > 0) { if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) return (0); } errno = EAGAIN; return (-1); } int _sem_timedwait(sem_t * __restrict sem, const struct timespec * __restrict abstime) { int val, retval; if (sem_check_validity(sem) != 0) return (-1); retval = 0; _pthread_testcancel(); for (;;) { while (USEM_COUNT(val = sem->_kern._count) > 0) { if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) return (0); } if (retval) { _pthread_testcancel(); break; } /* * The timeout argument is only supposed to * be checked if the thread would have blocked. */ if (abstime != NULL) { if (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0) { errno = EINVAL; return (-1); } } _pthread_cancel_enter(1); retval = usem_wait(&sem->_kern, abstime); _pthread_cancel_leave(0); } return (retval); } int _sem_wait(sem_t *sem) { return _sem_timedwait(sem, NULL); } /* * POSIX: * The sem_post() interface is reentrant with respect to signals and may be * invoked from a signal-catching function. * The implementation does not use lock, so it should be safe. */ int _sem_post(sem_t *sem) { unsigned int count; if (sem_check_validity(sem) != 0) return (-1); do { count = sem->_kern._count; - if (USEM_COUNT(count) + 1 > SEM_VALUE_MAX) - return (EOVERFLOW); + if (USEM_COUNT(count) + 1 > SEM_VALUE_MAX) { + errno = EOVERFLOW; + return (-1); + } } while (!atomic_cmpset_rel_int(&sem->_kern._count, count, count + 1)); if (count & USEM_HAS_WAITERS) usem_wake(&sem->_kern); return (0); } Index: projects/clang360-import/lib/libc/gen/sem_post.3 =================================================================== --- projects/clang360-import/lib/libc/gen/sem_post.3 (revision 277895) +++ projects/clang360-import/lib/libc/gen/sem_post.3 (revision 277896) @@ -1,77 +1,80 @@ .\" Copyright (C) 2000 Jason Evans . .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice(s), this list of conditions and the following disclaimer as .\" the first lines of this file unmodified other than the possible .\" addition of one or more copyright notices. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice(s), this list of conditions and the following disclaimer in .\" the documentation and/or other materials provided with the .\" distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY .\" EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR .\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE .\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR .\" BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, .\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE .\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, .\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd February 15, 2000 +.Dd January 28, 2015 .Dt SEM_POST 3 .Os .Sh NAME .Nm sem_post .Nd increment (unlock) a semaphore .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In semaphore.h .Ft int .Fn sem_post "sem_t *sem" .Sh DESCRIPTION The .Fn sem_post function increments (unlocks) the semaphore pointed to by .Fa sem . If there are threads blocked on the semaphore when .Fn sem_post is called, then the highest priority thread that has been blocked the longest on the semaphore will be allowed to return from .Fn sem_wait . .Pp The .Fn sem_post function is signal-reentrant and may be called within signal handlers. .Sh RETURN VALUES .Rv -std sem_post .Sh ERRORS The .Fn sem_post function will fail if: .Bl -tag -width Er .It Bq Er EINVAL The .Fa sem argument points to an invalid semaphore. +.It Bq Er EOVERFLOW +The semaphore value would exceed +.Dv SEM_VALUE_MAX . .El .Sh SEE ALSO .Xr sem_getvalue 3 , .Xr sem_trywait 3 , .Xr sem_wait 3 .Sh STANDARDS The .Fn sem_post function conforms to .St -p1003.1-96 . Index: projects/clang360-import/lib/libc/mips/gen/sigsetjmp.S =================================================================== --- projects/clang360-import/lib/libc/mips/gen/sigsetjmp.S (revision 277895) +++ projects/clang360-import/lib/libc/mips/gen/sigsetjmp.S (revision 277896) @@ -1,77 +1,77 @@ /* $NetBSD: sigsetjmp.S,v 1.8 2005/09/17 11:49:39 tsutsui Exp $ */ /*- * Copyright (c) 1991, 1993, 1995, * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Havard Eidnes. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #if defined(LIBC_SCCS) && !defined(lint) ASMSTR("from: @(#)setjmp.s 8.1 (Berkeley) 6/4/93") ASMSTR("$NetBSD: sigsetjmp.S,v 1.8 2005/09/17 11:49:39 tsutsui Exp $") #endif /* LIBC_SCCS and not lint */ #include "SYS.h" #ifdef __ABICALLS__ .abicalls #endif /* * C library -- sigsetjmp, siglongjmp * * siglongjmp(a,v) * will generate a "return(v)" from * the last call to * sigsetjmp(a, savemask) * by restoring registers from the stack, * and dependent on savemask restores the * signal mask. */ LEAF(sigsetjmp) PIC_PROLOGUE(sigsetjmp) - bne a1, 0x0, 1f # do saving of signal mask? + bne a1, zero, 1f # do saving of signal mask? PIC_TAILCALL(_setjmp) 1: PIC_TAILCALL(setjmp) END(sigsetjmp) LEAF(siglongjmp) PIC_PROLOGUE(siglongjmp) REG_L t0, (_JB_MAGIC * SZREG)(a0) REG_LI t1, _JB_MAGIC__SETJMP bne t0, t1, 1f # setjmp or _setjmp magic? PIC_TAILCALL(_longjmp) 1: PIC_TAILCALL(longjmp) END(siglongjmp) Index: projects/clang360-import/lib/libc =================================================================== --- projects/clang360-import/lib/libc (revision 277895) +++ projects/clang360-import/lib/libc (revision 277896) Property changes on: projects/clang360-import/lib/libc ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/lib/libc:r277719-277895 Index: projects/clang360-import/share/man/man4/sfxge.4 =================================================================== --- projects/clang360-import/share/man/man4/sfxge.4 (revision 277895) +++ projects/clang360-import/share/man/man4/sfxge.4 (revision 277896) @@ -1,129 +1,141 @@ .\" Copyright (c) 2011 Solarflare Communications, Inc. .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" $FreeBSD$ .\" .Dd September 30, 2014 .Dt SFXGE 4 .Os .Sh NAME .Nm sfxge .Nd "Solarflare 10Gb Ethernet adapter driver" .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "device sfxge" .Ed .Pp To load the driver as a module at boot time, place the following line in .Xr loader.conf 5 : .Bd -literal -offset indent sfxge_load="YES" .Ed .Sh DESCRIPTION The .Nm driver provides support for 10Gb Ethernet adapters based on Solarflare SFC9000 family controllers. The driver supports jumbo frames, transmit/receive checksum offload, TCP Segmentation Offload (TSO), Large Receive Offload (LRO), VLAN checksum offload, VLAN TSO, and Receive Side Scaling (RSS) using MSI-X interrupts. .Pp The driver allocates 1 receive queue, transmit queue, event queue and IRQ per CPU up to a maximum of 64. IRQ affinities should be spread out using .Xr cpuset 1 . Interrupt moderation may be controlled through the sysctl .Va dev.sfxge.%d.int_mod (units are microseconds). .Pp For more information on configuring this device, see .Xr ifconfig 8 . .Pp A large number of MAC, PHY and data path statistics are available under the sysctl .Va dev.sfxge.%d.stats . The adapter's VPD fields including its serial number are available under the sysctl .Va dev.sfxge.%d.vpd . .Sh HARDWARE The .Nm driver supports all 10Gb Ethernet adapters based on Solarflare SFC9000 family controllers. .Sh LOADER TUNABLES Tunables can be set at the .Xr loader 8 prompt before booting the kernel or stored in .Xr loader.conf 5 . Actual values can be obtained using .Xr sysctl 8 . .Bl -tag -width indent .It Va hw.sfxge.rx_ring The maximum number of descriptors in a receive queue ring. Supported values are: 512, 1024, 2048 and 4096. .It Va hw.sfxge.tx_ring The maximum number of descriptors in a transmit queue ring. Supported values are: 512, 1024, 2048 and 4096. .It Va hw.sfxge.tx_dpl_get_max The maximum length of the deferred packet .Dq get-list -for queued transmit -packets, used only if the transmit queue lock can be acquired. +for queued transmit packets (TCP and non-TCP), used only if the transmit +queue lock can be acquired. If a packet is dropped, the -.Va tx_early_drops +.Va tx_get_overflow counter is incremented and the local sender receives ENOBUFS. The value must be greater than 0. +.It Va hw.sfxge.tx_dpl_get_non_tcp_max +The maximum number of non-TCP packets in the deferred packet +.Dq get-list +, used only if the transmit queue lock can be acquired. +If packet is dropped, the +.Va tx_get_non_tcp_overflow +counter is incremented and the local sender receives ENOBUFS. +The value must be greater than 0. .It Va hw.sfxge.tx_dpl_put_max The maximum length of the deferred packet .Dq put-list for queued transmit packets, used if the transmit queue lock cannot be acquired. If a packet is dropped, the -.Va tx_early_drops +.Va tx_put_overflow counter is incremented and the local sender receives ENOBUFS. The value must be greater than or equal to 0. +.It Va hw.sfxge.N.max_rss_channels +The maximum number of allocated RSS channels for the Nth adapter. +If set to 0 or unset, the number of channels is determined by the number +of CPU cores. .El .Sh SUPPORT For general information and support, go to the Solarflare support website at: .Pa https://support.solarflare.com . .Sh SEE ALSO .Xr cpuset 1 , .Xr arp 4 , .Xr netintro 4 , .Xr ng_ether 4 , .Xr vlan 4 , .Xr ifconfig 8 .Sh AUTHORS The .Nm driver was written by .An Philip Paeps and .An Solarflare Communications, Inc. Index: projects/clang360-import/share/man/man4 =================================================================== --- projects/clang360-import/share/man/man4 (revision 277895) +++ projects/clang360-import/share/man/man4 (revision 277896) Property changes on: projects/clang360-import/share/man/man4 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share/man/man4:r277777-277895 Index: projects/clang360-import/share =================================================================== --- projects/clang360-import/share (revision 277895) +++ projects/clang360-import/share (revision 277896) Property changes on: projects/clang360-import/share ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/share:r277844-277895 Index: projects/clang360-import/sys/arm/ti/am335x/am335x_prcm.c =================================================================== --- projects/clang360-import/sys/arm/ti/am335x/am335x_prcm.c (revision 277895) +++ projects/clang360-import/sys/arm/ti/am335x/am335x_prcm.c (revision 277896) @@ -1,798 +1,798 @@ /*- * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define CM_PER 0 #define CM_PER_L4LS_CLKSTCTRL (CM_PER + 0x000) #define CM_PER_L3S_CLKSTCTRL (CM_PER + 0x004) #define CM_PER_L3_CLKSTCTRL (CM_PER + 0x00C) #define CM_PER_CPGMAC0_CLKCTRL (CM_PER + 0x014) #define CM_PER_LCDC_CLKCTRL (CM_PER + 0x018) #define CM_PER_USB0_CLKCTRL (CM_PER + 0x01C) #define CM_PER_TPTC0_CLKCTRL (CM_PER + 0x024) #define CM_PER_UART5_CLKCTRL (CM_PER + 0x038) #define CM_PER_MMC0_CLKCTRL (CM_PER + 0x03C) #define CM_PER_I2C2_CLKCTRL (CM_PER + 0x044) #define CM_PER_I2C1_CLKCTRL (CM_PER + 0x048) #define CM_PER_UART1_CLKCTRL (CM_PER + 0x06C) #define CM_PER_UART2_CLKCTRL (CM_PER + 0x070) #define CM_PER_UART3_CLKCTRL (CM_PER + 0x074) #define CM_PER_UART4_CLKCTRL (CM_PER + 0x078) #define CM_PER_TIMER7_CLKCTRL (CM_PER + 0x07C) #define CM_PER_TIMER2_CLKCTRL (CM_PER + 0x080) #define CM_PER_TIMER3_CLKCTRL (CM_PER + 0x084) #define CM_PER_TIMER4_CLKCTRL (CM_PER + 0x088) #define CM_PER_GPIO1_CLKCTRL (CM_PER + 0x0AC) #define CM_PER_GPIO2_CLKCTRL (CM_PER + 0x0B0) #define CM_PER_GPIO3_CLKCTRL (CM_PER + 0x0B4) #define CM_PER_TPCC_CLKCTRL (CM_PER + 0x0BC) #define CM_PER_EPWMSS1_CLKCTRL (CM_PER + 0x0CC) #define CM_PER_EPWMSS0_CLKCTRL (CM_PER + 0x0D4) #define CM_PER_EPWMSS2_CLKCTRL (CM_PER + 0x0D8) #define CM_PER_L3_INSTR_CLKCTRL (CM_PER + 0x0DC) #define CM_PER_L3_CLKCTRL (CM_PER + 0x0E0) #define CM_PER_PRUSS_CLKCTRL (CM_PER + 0x0E8) #define CM_PER_TIMER5_CLKCTRL (CM_PER + 0x0EC) #define CM_PER_TIMER6_CLKCTRL (CM_PER + 0x0F0) #define CM_PER_MMC1_CLKCTRL (CM_PER + 0x0F4) #define CM_PER_MMC2_CLKCTRL (CM_PER + 0x0F8) #define CM_PER_TPTC1_CLKCTRL (CM_PER + 0x0FC) #define CM_PER_TPTC2_CLKCTRL (CM_PER + 0x100) #define CM_PER_SPINLOCK0_CLKCTRL (CM_PER + 0x10C) #define CM_PER_MAILBOX0_CLKCTRL (CM_PER + 0x110) #define CM_PER_OCPWP_L3_CLKSTCTRL (CM_PER + 0x12C) #define CM_PER_OCPWP_CLKCTRL (CM_PER + 0x130) #define CM_PER_CPSW_CLKSTCTRL (CM_PER + 0x144) #define CM_PER_PRUSS_CLKSTCTRL (CM_PER + 0x140) #define CM_WKUP 0x400 #define CM_WKUP_CLKSTCTRL (CM_WKUP + 0x000) #define CM_WKUP_CONTROL_CLKCTRL (CM_WKUP + 0x004) #define CM_WKUP_GPIO0_CLKCTRL (CM_WKUP + 0x008) #define CM_WKUP_CM_L3_AON_CLKSTCTRL (CM_WKUP + 0x01C) #define CM_WKUP_CM_CLKSEL_DPLL_MPU (CM_WKUP + 0x02C) #define CM_WKUP_CM_IDLEST_DPLL_DISP (CM_WKUP + 0x048) #define CM_WKUP_CM_CLKSEL_DPLL_DISP (CM_WKUP + 0x054) #define CM_WKUP_CM_CLKDCOLDO_DPLL_PER (CM_WKUP + 0x07C) #define CM_WKUP_CM_CLKMODE_DPLL_DISP (CM_WKUP + 0x098) #define CM_WKUP_I2C0_CLKCTRL (CM_WKUP + 0x0B8) #define CM_WKUP_ADC_TSC_CLKCTRL (CM_WKUP + 0x0BC) #define CM_DPLL 0x500 #define CLKSEL_TIMER7_CLK (CM_DPLL + 0x004) #define CLKSEL_TIMER2_CLK (CM_DPLL + 0x008) #define CLKSEL_TIMER3_CLK (CM_DPLL + 0x00C) #define CLKSEL_TIMER4_CLK (CM_DPLL + 0x010) #define CLKSEL_TIMER5_CLK (CM_DPLL + 0x018) #define CLKSEL_TIMER6_CLK (CM_DPLL + 0x01C) #define CLKSEL_PRUSS_OCP_CLK (CM_DPLL + 0x030) #define CM_RTC 0x800 #define CM_RTC_RTC_CLKCTRL (CM_RTC + 0x000) #define CM_RTC_CLKSTCTRL (CM_RTC + 0x004) #define PRM_PER 0xC00 #define PRM_PER_RSTCTRL (PRM_PER + 0x00) #define PRM_DEVICE_OFFSET 0xF00 #define PRM_RSTCTRL (PRM_DEVICE_OFFSET + 0x00) struct am335x_prcm_softc { struct resource * res[2]; bus_space_tag_t bst; bus_space_handle_t bsh; }; static struct resource_spec am335x_prcm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static struct am335x_prcm_softc *am335x_prcm_sc = NULL; static int am335x_clk_noop_activate(struct ti_clock_dev *clkdev); static int am335x_clk_generic_activate(struct ti_clock_dev *clkdev); static int am335x_clk_gpio_activate(struct ti_clock_dev *clkdev); static int am335x_clk_noop_deactivate(struct ti_clock_dev *clkdev); static int am335x_clk_generic_deactivate(struct ti_clock_dev *clkdev); static int am335x_clk_noop_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int am335x_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int am335x_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static void am335x_prcm_reset(void); static int am335x_clk_cpsw_activate(struct ti_clock_dev *clkdev); static int am335x_clk_musb0_activate(struct ti_clock_dev *clkdev); static int am335x_clk_lcdc_activate(struct ti_clock_dev *clkdev); static int am335x_clk_pruss_activate(struct ti_clock_dev *clkdev); #define AM335X_NOOP_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_noop_activate, \ .clk_deactivate = am335x_clk_noop_deactivate, \ .clk_set_source = am335x_clk_noop_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL \ } #define AM335X_GENERIC_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_generic_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL \ } #define AM335X_GPIO_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_gpio_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL \ } #define AM335X_MMCHS_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_generic_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = am335x_clk_hsmmc_get_source_freq \ } struct ti_clock_dev ti_am335x_clk_devmap[] = { /* System clocks */ { .id = SYS_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_sysclk_freq, }, /* MPU (ARM) core clocks */ { .id = MPU_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_arm_fclk_freq, }, /* CPSW Ethernet Switch core clocks */ { .id = CPSW_CLK, .clk_activate = am335x_clk_cpsw_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, }, /* Mentor USB HS controller core clocks */ { .id = MUSB0_CLK, .clk_activate = am335x_clk_musb0_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, }, /* LCD controller clocks */ { .id = LCDC_CLK, .clk_activate = am335x_clk_lcdc_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_arm_disp_freq, }, /* UART. Uart0 clock cannot be controlled. */ AM335X_NOOP_CLOCK_DEV(UART0_CLK), AM335X_GENERIC_CLOCK_DEV(UART1_CLK), AM335X_GENERIC_CLOCK_DEV(UART2_CLK), AM335X_GENERIC_CLOCK_DEV(UART3_CLK), AM335X_GENERIC_CLOCK_DEV(UART4_CLK), AM335X_GENERIC_CLOCK_DEV(UART5_CLK), /* DMTimer */ AM335X_GENERIC_CLOCK_DEV(DMTIMER2_CLK), AM335X_GENERIC_CLOCK_DEV(DMTIMER3_CLK), AM335X_GENERIC_CLOCK_DEV(DMTIMER4_CLK), AM335X_GENERIC_CLOCK_DEV(DMTIMER5_CLK), AM335X_GENERIC_CLOCK_DEV(DMTIMER6_CLK), AM335X_GENERIC_CLOCK_DEV(DMTIMER7_CLK), /* GPIO */ AM335X_GPIO_CLOCK_DEV(GPIO0_CLK), AM335X_GPIO_CLOCK_DEV(GPIO1_CLK), AM335X_GPIO_CLOCK_DEV(GPIO2_CLK), AM335X_GPIO_CLOCK_DEV(GPIO3_CLK), /* I2C */ AM335X_GENERIC_CLOCK_DEV(I2C0_CLK), AM335X_GENERIC_CLOCK_DEV(I2C1_CLK), AM335X_GENERIC_CLOCK_DEV(I2C2_CLK), /* TSC_ADC */ AM335X_GENERIC_CLOCK_DEV(TSC_ADC_CLK), /* EDMA */ AM335X_GENERIC_CLOCK_DEV(EDMA_TPCC_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC0_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC1_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC2_CLK), /* MMCHS */ AM335X_MMCHS_CLOCK_DEV(MMC0_CLK), AM335X_MMCHS_CLOCK_DEV(MMC1_CLK), AM335X_MMCHS_CLOCK_DEV(MMC2_CLK), /* PWMSS */ AM335X_GENERIC_CLOCK_DEV(PWMSS0_CLK), AM335X_GENERIC_CLOCK_DEV(PWMSS1_CLK), AM335X_GENERIC_CLOCK_DEV(PWMSS2_CLK), /* System Mailbox clock */ AM335X_GENERIC_CLOCK_DEV(MAILBOX0_CLK), /* SPINLOCK */ AM335X_GENERIC_CLOCK_DEV(SPINLOCK0_CLK), /* PRU-ICSS */ { .id = PRUSS_CLK, .clk_activate = am335x_clk_pruss_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, }, /* RTC */ AM335X_GENERIC_CLOCK_DEV(RTC_CLK), { INVALID_CLK_IDENT, NULL, NULL, NULL, NULL } }; struct am335x_clk_details { clk_ident_t id; uint32_t clkctrl_reg; uint32_t clksel_reg; }; #define _CLK_DETAIL(i, c, s) \ { .id = (i), \ .clkctrl_reg = (c), \ .clksel_reg = (s), \ } static struct am335x_clk_details g_am335x_clk_details[] = { /* UART. UART0 clock not controllable. */ _CLK_DETAIL(UART0_CLK, 0, 0), _CLK_DETAIL(UART1_CLK, CM_PER_UART1_CLKCTRL, 0), _CLK_DETAIL(UART2_CLK, CM_PER_UART2_CLKCTRL, 0), _CLK_DETAIL(UART3_CLK, CM_PER_UART3_CLKCTRL, 0), _CLK_DETAIL(UART4_CLK, CM_PER_UART4_CLKCTRL, 0), _CLK_DETAIL(UART5_CLK, CM_PER_UART5_CLKCTRL, 0), /* DMTimer modules */ _CLK_DETAIL(DMTIMER2_CLK, CM_PER_TIMER2_CLKCTRL, CLKSEL_TIMER2_CLK), _CLK_DETAIL(DMTIMER3_CLK, CM_PER_TIMER3_CLKCTRL, CLKSEL_TIMER3_CLK), _CLK_DETAIL(DMTIMER4_CLK, CM_PER_TIMER4_CLKCTRL, CLKSEL_TIMER4_CLK), _CLK_DETAIL(DMTIMER5_CLK, CM_PER_TIMER5_CLKCTRL, CLKSEL_TIMER5_CLK), _CLK_DETAIL(DMTIMER6_CLK, CM_PER_TIMER6_CLKCTRL, CLKSEL_TIMER6_CLK), _CLK_DETAIL(DMTIMER7_CLK, CM_PER_TIMER7_CLKCTRL, CLKSEL_TIMER7_CLK), /* GPIO modules */ _CLK_DETAIL(GPIO0_CLK, CM_WKUP_GPIO0_CLKCTRL, 0), _CLK_DETAIL(GPIO1_CLK, CM_PER_GPIO1_CLKCTRL, 0), _CLK_DETAIL(GPIO2_CLK, CM_PER_GPIO2_CLKCTRL, 0), _CLK_DETAIL(GPIO3_CLK, CM_PER_GPIO3_CLKCTRL, 0), /* I2C modules */ _CLK_DETAIL(I2C0_CLK, CM_WKUP_I2C0_CLKCTRL, 0), _CLK_DETAIL(I2C1_CLK, CM_PER_I2C1_CLKCTRL, 0), _CLK_DETAIL(I2C2_CLK, CM_PER_I2C2_CLKCTRL, 0), /* TSC_ADC module */ _CLK_DETAIL(TSC_ADC_CLK, CM_WKUP_ADC_TSC_CLKCTRL, 0), /* EDMA modules */ _CLK_DETAIL(EDMA_TPCC_CLK, CM_PER_TPCC_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC0_CLK, CM_PER_TPTC0_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC1_CLK, CM_PER_TPTC1_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC2_CLK, CM_PER_TPTC2_CLKCTRL, 0), /* MMCHS modules*/ _CLK_DETAIL(MMC0_CLK, CM_PER_MMC0_CLKCTRL, 0), _CLK_DETAIL(MMC1_CLK, CM_PER_MMC1_CLKCTRL, 0), _CLK_DETAIL(MMC2_CLK, CM_PER_MMC1_CLKCTRL, 0), /* PWMSS modules */ _CLK_DETAIL(PWMSS0_CLK, CM_PER_EPWMSS0_CLKCTRL, 0), _CLK_DETAIL(PWMSS1_CLK, CM_PER_EPWMSS1_CLKCTRL, 0), _CLK_DETAIL(PWMSS2_CLK, CM_PER_EPWMSS2_CLKCTRL, 0), _CLK_DETAIL(MAILBOX0_CLK, CM_PER_MAILBOX0_CLKCTRL, 0), _CLK_DETAIL(SPINLOCK0_CLK, CM_PER_SPINLOCK0_CLKCTRL, 0), /* RTC module */ _CLK_DETAIL(RTC_CLK, CM_RTC_RTC_CLKCTRL, 0), { INVALID_CLK_IDENT, 0}, }; /* Read/Write macros */ #define prcm_read_4(reg) \ bus_space_read_4(am335x_prcm_sc->bst, am335x_prcm_sc->bsh, reg) #define prcm_write_4(reg, val) \ bus_space_write_4(am335x_prcm_sc->bst, am335x_prcm_sc->bsh, reg, val) void am335x_prcm_setup_dmtimer(int); static int am335x_prcm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "am335x,prcm")) { device_set_desc(dev, "AM335x Power and Clock Management"); return(BUS_PROBE_DEFAULT); } return (ENXIO); } static int am335x_prcm_attach(device_t dev) { struct am335x_prcm_softc *sc = device_get_softc(dev); unsigned int sysclk, fclk; if (am335x_prcm_sc) return (ENXIO); if (bus_alloc_resources(dev, am335x_prcm_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); am335x_prcm_sc = sc; ti_cpu_reset = am335x_prcm_reset; am335x_clk_get_sysclk_freq(NULL, &sysclk); am335x_clk_get_arm_fclk_freq(NULL, &fclk); device_printf(dev, "Clocks: System %u.%01u MHz, CPU %u MHz\n", sysclk/1000000, (sysclk % 1000000)/100000, fclk/1000000); return (0); } static device_method_t am335x_prcm_methods[] = { DEVMETHOD(device_probe, am335x_prcm_probe), DEVMETHOD(device_attach, am335x_prcm_attach), { 0, 0 } }; static driver_t am335x_prcm_driver = { "am335x_prcm", am335x_prcm_methods, sizeof(struct am335x_prcm_softc), }; static devclass_t am335x_prcm_devclass; DRIVER_MODULE(am335x_prcm, simplebus, am335x_prcm_driver, am335x_prcm_devclass, 0, 0); MODULE_DEPEND(am335x_prcm, ti_scm, 1, 1, 1); static struct am335x_clk_details* am335x_clk_details(clk_ident_t id) { struct am335x_clk_details *walker; for (walker = g_am335x_clk_details; walker->id != INVALID_CLK_IDENT; walker++) { if (id == walker->id) return (walker); } return NULL; } static int am335x_clk_noop_activate(struct ti_clock_dev *clkdev) { return (0); } static int am335x_clk_generic_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to enable(2) */ prcm_write_4(clk_details->clkctrl_reg, 2); while ((prcm_read_4(clk_details->clkctrl_reg) & 0x3) != 2) DELAY(10); return (0); } static int am335x_clk_gpio_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to enable(2) */ /* set *_CLKCTRL register OPTFCLKEN_GPIO_1_G DBCLK[18] to FCLK_EN(1) */ prcm_write_4(clk_details->clkctrl_reg, 2 | (1 << 18)); while ((prcm_read_4(clk_details->clkctrl_reg) & (3 | (1 << 18) )) != (2 | (1 << 18))) DELAY(10); return (0); } static int am335x_clk_noop_deactivate(struct ti_clock_dev *clkdev) { return(0); } static int am335x_clk_generic_deactivate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to disable(0) */ prcm_write_4(clk_details->clkctrl_reg, 0); while ((prcm_read_4(clk_details->clkctrl_reg) & 0x3) != 0) DELAY(10); return (0); } static int am335x_clk_noop_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { return (0); } static int am335x_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; uint32_t reg; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); switch (clksrc) { case EXT_CLK: reg = 0; /* SEL2: TCLKIN clock */ break; case SYSCLK_CLK: reg = 1; /* SEL1: CLK_M_OSC clock */ break; case F32KHZ_CLK: reg = 2; /* SEL3: CLK_32KHZ clock */ break; default: return (ENXIO); } prcm_write_4(clk_details->clksel_reg, reg); while ((prcm_read_4(clk_details->clksel_reg) & 0x3) != reg) DELAY(10); return (0); } static int am335x_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { *freq = 96000000; return (0); } static int am335x_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t ctrl_status; /* Read the input clock freq from the control module */ /* control_status reg (0x40) */ if (ti_scm_reg_read_4(0x40, &ctrl_status)) return ENXIO; switch ((ctrl_status>>22) & 0x3) { case 0x0: /* 19.2Mhz */ *freq = 19200000; break; case 0x1: /* 24Mhz */ *freq = 24000000; break; case 0x2: /* 25Mhz */ *freq = 25000000; break; case 0x3: /* 26Mhz */ *freq = 26000000; break; } return (0); } #define DPLL_BYP_CLKSEL(reg) ((reg>>23) & 1) #define DPLL_DIV(reg) ((reg & 0x7f)+1) #define DPLL_MULT(reg) ((reg>>8) & 0x7FF) static int am335x_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t reg; uint32_t sysclk; reg = prcm_read_4(CM_WKUP_CM_CLKSEL_DPLL_MPU); /*Check if we are running in bypass */ if (DPLL_BYP_CLKSEL(reg)) return ENXIO; am335x_clk_get_sysclk_freq(NULL, &sysclk); *freq = DPLL_MULT(reg) * (sysclk / DPLL_DIV(reg)); return(0); } static int am335x_clk_get_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t reg; uint32_t sysclk; reg = prcm_read_4(CM_WKUP_CM_CLKSEL_DPLL_DISP); /*Check if we are running in bypass */ if (DPLL_BYP_CLKSEL(reg)) return ENXIO; am335x_clk_get_sysclk_freq(NULL, &sysclk); *freq = DPLL_MULT(reg) * (sysclk / DPLL_DIV(reg)); return(0); } static void am335x_prcm_reset(void) { prcm_write_4(PRM_RSTCTRL, (1<<1)); } static int am335x_clk_cpsw_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return ENXIO; /* set MODULENAME to ENABLE */ prcm_write_4(CM_PER_CPGMAC0_CLKCTRL, 2); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_CPGMAC0_CLKCTRL) & (3<<16)); /*set CLKTRCTRL to SW_WKUP(2) */ prcm_write_4(CM_PER_CPSW_CLKSTCTRL, 2); /* wait for 125 MHz OCP clock to become active */ while((prcm_read_4(CM_PER_CPSW_CLKSTCTRL) & (1<<4)) == 0); return(0); } static int am335x_clk_musb0_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return ENXIO; /* set ST_DPLL_CLKDCOLDO(9) to CLK_GATED(1) */ /* set DPLL_CLKDCOLDO_GATE_CTRL(8) to CLK_ENABLE(1)*/ prcm_write_4(CM_WKUP_CM_CLKDCOLDO_DPLL_PER, 0x300); /*set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_USB0_CLKCTRL, 2); /* wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_USB0_CLKCTRL) & 0x3) != 2) DELAY(10); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_USB0_CLKCTRL) & (3<<16)) DELAY(10); return(0); } static int am335x_clk_lcdc_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return (ENXIO); /* Bypass mode */ prcm_write_4(CM_WKUP_CM_CLKMODE_DPLL_DISP, 0x4); /* Make sure it's in bypass mode */ while (!(prcm_read_4(CM_WKUP_CM_IDLEST_DPLL_DISP) & (1 << 8))) DELAY(10); /* - * For now set frequency to 5xSYSFREQ - * More flexible control might be required + * For now set frequency to 99*SYSFREQ/8 which is twice as + * HDMI 1080p pixel clock (minimum LCDC freq divisor is 2) */ - prcm_write_4(CM_WKUP_CM_CLKSEL_DPLL_DISP, (5 << 8) | 0); + prcm_write_4(CM_WKUP_CM_CLKSEL_DPLL_DISP, (99 << 8) | 8); /* Locked mode */ prcm_write_4(CM_WKUP_CM_CLKMODE_DPLL_DISP, 0x7); int timeout = 10000; while ((!(prcm_read_4(CM_WKUP_CM_IDLEST_DPLL_DISP) & (1 << 0))) && timeout--) DELAY(10); /*set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_LCDC_CLKCTRL, 2); /* wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_LCDC_CLKCTRL) & 0x3) != 2) DELAY(10); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_LCDC_CLKCTRL) & (3<<16)) DELAY(10); return (0); } static int am335x_clk_pruss_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return (ENXIO); /* Set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_PRUSS_CLKCTRL, 2); /* Wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_PRUSS_CLKCTRL) & 0x3) != 2) DELAY(10); /* Set CLKTRCTRL to SW_WKUP(2) */ prcm_write_4(CM_PER_PRUSS_CLKSTCTRL, 2); /* Wait for the 200 MHz OCP clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<4)) == 0) DELAY(10); /* Wait for the 200 MHz IEP clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<5)) == 0) DELAY(10); /* Wait for the 192 MHz UART clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<6)) == 0) DELAY(10); /* Select DISP DPLL as OCP clock */ prcm_write_4(CLKSEL_PRUSS_OCP_CLK, 1); while ((prcm_read_4(CLKSEL_PRUSS_OCP_CLK) & 0x3) != 1) DELAY(10); /* Clear the RESET bit */ prcm_write_4(PRM_PER_RSTCTRL, prcm_read_4(PRM_PER_RSTCTRL) & ~2); return (0); } Index: projects/clang360-import/sys/arm/xilinx/zy7_gpio.c =================================================================== --- projects/clang360-import/sys/arm/xilinx/zy7_gpio.c (revision 277895) +++ projects/clang360-import/sys/arm/xilinx/zy7_gpio.c (revision 277896) @@ -1,388 +1,383 @@ /*- * Copyright (c) 2013 Thomas Skibo * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * A GPIO driver for Xilinx Zynq-7000. * * The GPIO peripheral on Zynq allows controlling 114 general purpose I/Os. * * Pins 53-0 are sent to the MIO. Any MIO pins not used by a PS peripheral are * available as a GPIO pin. Pins 64-127 are sent to the PL (FPGA) section of * Zynq as EMIO signals. * * The hardware provides a way to use IOs as interrupt sources but the * gpio framework doesn't seem to have hooks for this. * * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. * (v1.4) November 16, 2012. Xilinx doc UG585. GPIO is covered in * chater 14. Register definitions are in appendix B.19. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #define NUMBANKS 4 #define MAXPIN (32*NUMBANKS) #define MIO_PIN 0 /* pins 0-53 go to MIO */ #define NUM_MIO_PINS 54 #define EMIO_PIN 64 /* pins 64-127 go to PL */ #define NUM_EMIO_PINS 64 #define VALID_PIN(u) (((u) >= MIO_PIN && (u) < MIO_PIN + NUM_MIO_PINS) || \ ((u) >= EMIO_PIN && (u) < EMIO_PIN + NUM_EMIO_PINS)) #define ZGPIO_LOCK(sc) mtx_lock(&(sc)->sc_mtx) #define ZGPIO_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) #define ZGPIO_LOCK_INIT(sc) \ mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ "gpio", MTX_DEF) #define ZGPIO_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); struct zy7_gpio_softc { device_t dev; struct mtx sc_mtx; struct resource *mem_res; /* Memory resource */ }; #define WR4(sc, off, val) bus_write_4((sc)->mem_res, (off), (val)) #define RD4(sc, off) bus_read_4((sc)->mem_res, (off)) /* Xilinx Zynq-7000 GPIO register definitions: */ #define ZY7_GPIO_MASK_DATA_LSW(b) (0x0000+8*(b)) /* maskable wr lo */ #define ZY7_GPIO_MASK_DATA_MSW(b) (0x0004+8*(b)) /* maskable wr hi */ #define ZY7_GPIO_DATA(b) (0x0040+4*(b)) /* in/out data */ #define ZY7_GPIO_DATA_RO(b) (0x0060+4*(b)) /* input data */ #define ZY7_GPIO_DIRM(b) (0x0204+0x40*(b)) /* direction mode */ #define ZY7_GPIO_OEN(b) (0x0208+0x40*(b)) /* output enable */ #define ZY7_GPIO_INT_MASK(b) (0x020c+0x40*(b)) /* int mask */ #define ZY7_GPIO_INT_EN(b) (0x0210+0x40*(b)) /* int enable */ #define ZY7_GPIO_INT_DIS(b) (0x0214+0x40*(b)) /* int disable */ #define ZY7_GPIO_INT_STAT(b) (0x0218+0x40*(b)) /* int status */ #define ZY7_GPIO_INT_TYPE(b) (0x021c+0x40*(b)) /* int type */ #define ZY7_GPIO_INT_POLARITY(b) (0x0220+0x40*(b)) /* int polarity */ #define ZY7_GPIO_INT_ANY(b) (0x0224+0x40*(b)) /* any edge */ static int zy7_gpio_pin_max(device_t dev, int *maxpin) { *maxpin = MAXPIN; return (0); } /* Get a specific pin's capabilities. */ static int zy7_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { if (!VALID_PIN(pin)) return (EINVAL); *caps = (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | GPIO_PIN_TRISTATE); return (0); } /* Get a specific pin's name. */ static int zy7_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { if (!VALID_PIN(pin)) return (EINVAL); if (pin < NUM_MIO_PINS) { snprintf(name, GPIOMAXNAME, "MIO_%d", pin); name[GPIOMAXNAME - 1] = '\0'; } else { snprintf(name, GPIOMAXNAME, "EMIO_%d", pin - EMIO_PIN); name[GPIOMAXNAME - 1] = '\0'; } return (0); } /* Get a specific pin's current in/out/tri state. */ static int zy7_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct zy7_gpio_softc *sc = device_get_softc(dev); if (!VALID_PIN(pin)) return (EINVAL); ZGPIO_LOCK(sc); if ((RD4(sc, ZY7_GPIO_DIRM(pin >> 5)) & (1 << (pin & 31))) != 0) { /* output */ if ((RD4(sc, ZY7_GPIO_OEN(pin >> 5)) & (1 << (pin & 31))) == 0) *flags = (GPIO_PIN_OUTPUT | GPIO_PIN_TRISTATE); else *flags = GPIO_PIN_OUTPUT; } else /* input */ *flags = GPIO_PIN_INPUT; ZGPIO_UNLOCK(sc); return (0); } /* Set a specific pin's in/out/tri state. */ static int zy7_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct zy7_gpio_softc *sc = device_get_softc(dev); if (!VALID_PIN(pin)) return (EINVAL); ZGPIO_LOCK(sc); if ((flags & GPIO_PIN_OUTPUT) != 0) { /* Output. Set or reset OEN too. */ WR4(sc, ZY7_GPIO_DIRM(pin >> 5), RD4(sc, ZY7_GPIO_DIRM(pin >> 5)) | (1 << (pin & 31))); if ((flags & GPIO_PIN_TRISTATE) != 0) WR4(sc, ZY7_GPIO_OEN(pin >> 5), RD4(sc, ZY7_GPIO_OEN(pin >> 5)) & ~(1 << (pin & 31))); else WR4(sc, ZY7_GPIO_OEN(pin >> 5), RD4(sc, ZY7_GPIO_OEN(pin >> 5)) | (1 << (pin & 31))); } else { /* Input. Turn off OEN. */ WR4(sc, ZY7_GPIO_DIRM(pin >> 5), RD4(sc, ZY7_GPIO_DIRM(pin >> 5)) & ~(1 << (pin & 31))); WR4(sc, ZY7_GPIO_OEN(pin >> 5), RD4(sc, ZY7_GPIO_OEN(pin >> 5)) & ~(1 << (pin & 31))); } ZGPIO_UNLOCK(sc); return (0); } /* Set a specific output pin's value. */ static int zy7_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct zy7_gpio_softc *sc = device_get_softc(dev); if (!VALID_PIN(pin) || value > 1) return (EINVAL); /* Fancy register tricks allow atomic set or reset. */ if ((pin & 16) != 0) WR4(sc, ZY7_GPIO_MASK_DATA_MSW(pin >> 5), (0xffff0000 ^ (0x10000 << (pin & 15))) | (value << (pin & 15))); else WR4(sc, ZY7_GPIO_MASK_DATA_LSW(pin >> 5), (0xffff0000 ^ (0x10000 << (pin & 15))) | (value << (pin & 15))); return (0); } /* Get a specific pin's input value. */ static int zy7_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *value) { struct zy7_gpio_softc *sc = device_get_softc(dev); if (!VALID_PIN(pin)) return (EINVAL); *value = (RD4(sc, ZY7_GPIO_DATA_RO(pin >> 5)) >> (pin & 31)) & 1; return (0); } /* Toggle a pin's output value. */ static int zy7_gpio_pin_toggle(device_t dev, uint32_t pin) { struct zy7_gpio_softc *sc = device_get_softc(dev); if (!VALID_PIN(pin)) return (EINVAL); ZGPIO_LOCK(sc); WR4(sc, ZY7_GPIO_DATA(pin >> 5), RD4(sc, ZY7_GPIO_DATA(pin >> 5)) ^ (1 << (pin & 31))); ZGPIO_UNLOCK(sc); return (0); } static int zy7_gpio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "xlnx,zy7_gpio")) return (ENXIO); device_set_desc(dev, "Zynq-7000 GPIO driver"); return (0); } static void zy7_gpio_hw_reset(struct zy7_gpio_softc *sc) { int i; for (i = 0; i < NUMBANKS; i++) { WR4(sc, ZY7_GPIO_DATA(i), 0); WR4(sc, ZY7_GPIO_DIRM(i), 0); WR4(sc, ZY7_GPIO_OEN(i), 0); WR4(sc, ZY7_GPIO_INT_DIS(i), 0xffffffff); WR4(sc, ZY7_GPIO_INT_POLARITY(i), 0); WR4(sc, ZY7_GPIO_INT_TYPE(i), i == 1 ? 0x003fffff : 0xffffffff); WR4(sc, ZY7_GPIO_INT_ANY(i), 0); WR4(sc, ZY7_GPIO_INT_STAT(i), 0xffffffff); } } static int zy7_gpio_detach(device_t dev); static int zy7_gpio_attach(device_t dev) { struct zy7_gpio_softc *sc = device_get_softc(dev); int rid; sc->dev = dev; ZGPIO_LOCK_INIT(sc); /* Allocate memory. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Can't allocate memory for device"); zy7_gpio_detach(dev); return (ENOMEM); } /* Completely reset. */ zy7_gpio_hw_reset(sc); device_add_child(dev, "gpioc", -1); device_add_child(dev, "gpiobus", -1); return (bus_generic_attach(dev)); } static int zy7_gpio_detach(device_t dev) { struct zy7_gpio_softc *sc = device_get_softc(dev); bus_generic_detach(dev); if (sc->mem_res != NULL) { /* Release memory resource. */ bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); } ZGPIO_LOCK_DESTROY(sc); return (0); } static device_method_t zy7_gpio_methods[] = { /* device_if */ DEVMETHOD(device_probe, zy7_gpio_probe), DEVMETHOD(device_attach, zy7_gpio_attach), DEVMETHOD(device_detach, zy7_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_pin_max, zy7_gpio_pin_max), DEVMETHOD(gpio_pin_getname, zy7_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, zy7_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, zy7_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, zy7_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, zy7_gpio_pin_get), DEVMETHOD(gpio_pin_set, zy7_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, zy7_gpio_pin_toggle), DEVMETHOD_END }; static driver_t zy7_gpio_driver = { - "zy7_gpio", + "gpio", zy7_gpio_methods, sizeof(struct zy7_gpio_softc), }; static devclass_t zy7_gpio_devclass; -extern devclass_t gpiobus_devclass, gpioc_devclass; -extern driver_t gpiobus_driver, gpioc_driver; - DRIVER_MODULE(zy7_gpio, simplebus, zy7_gpio_driver, zy7_gpio_devclass, \ NULL, NULL); -DRIVER_MODULE(gpiobus, zy7_gpio, gpiobus_driver, gpiobus_devclass, 0, 0); -DRIVER_MODULE(gpioc, zy7_gpio, gpioc_driver, gpioc_devclass, 0, 0); Index: projects/clang360-import/sys/arm/xscale/ixp425/avila_gpio.c =================================================================== --- projects/clang360-import/sys/arm/xscale/ixp425/avila_gpio.c (revision 277895) +++ projects/clang360-import/sys/arm/xscale/ixp425/avila_gpio.c (revision 277896) @@ -1,358 +1,354 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2009, Luiz Otavio O Souza. * Copyright (c) 2010, Andrew Thompson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * GPIO driver for Gateworks Avilia */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "gpio_if.h" #define GPIO_SET_BITS(sc, reg, bits) \ GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, (reg)) | (bits)) #define GPIO_CLEAR_BITS(sc, reg, bits) \ GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, (reg)) & ~(bits)) struct avila_gpio_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_gpio_ioh; uint32_t sc_valid; struct gpio_pin sc_pins[IXP4XX_GPIO_PINS]; }; struct avila_gpio_pin { const char *name; int pin; int caps; }; #define GPIO_PIN_IO (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT) static struct avila_gpio_pin avila_gpio_pins[] = { { "GPIO0", 0, GPIO_PIN_IO }, { "GPIO1", 1, GPIO_PIN_IO }, { "GPIO2", 2, GPIO_PIN_IO }, { "GPIO3", 3, GPIO_PIN_IO }, { "GPIO4", 4, GPIO_PIN_IO }, /* * The following pins are connected to system devices and should not * really be frobbed. */ #if 0 { "SER_ENA", 5, GPIO_PIN_IO }, { "I2C_SCL", 6, GPIO_PIN_IO }, { "I2C_SDA", 7, GPIO_PIN_IO }, { "PCI_INTD", 8, GPIO_PIN_IO }, { "PCI_INTC", 9, GPIO_PIN_IO }, { "PCI_INTB", 10, GPIO_PIN_IO }, { "PCI_INTA", 11, GPIO_PIN_IO }, { "ATA_INT", 12, GPIO_PIN_IO }, { "PCI_RST", 13, GPIO_PIN_IO }, { "PCI_CLK", 14, GPIO_PIN_OUTPUT }, { "EX_CLK", 15, GPIO_PIN_OUTPUT }, #endif }; #undef GPIO_PIN_IO /* * Helpers */ static void avila_gpio_pin_configure(struct avila_gpio_softc *sc, struct gpio_pin *pin, uint32_t flags); static int avila_gpio_pin_flags(struct avila_gpio_softc *sc, uint32_t pin); /* * Driver stuff */ static int avila_gpio_probe(device_t dev); static int avila_gpio_attach(device_t dev); static int avila_gpio_detach(device_t dev); /* * GPIO interface */ static int avila_gpio_pin_max(device_t dev, int *maxpin); static int avila_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps); static int avila_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags); static int avila_gpio_pin_getname(device_t dev, uint32_t pin, char *name); static int avila_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags); static int avila_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value); static int avila_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val); static int avila_gpio_pin_toggle(device_t dev, uint32_t pin); static int avila_gpio_pin_flags(struct avila_gpio_softc *sc, uint32_t pin) { uint32_t v; v = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & (1 << pin); return (v ? GPIO_PIN_INPUT : GPIO_PIN_OUTPUT); } static void avila_gpio_pin_configure(struct avila_gpio_softc *sc, struct gpio_pin *pin, unsigned int flags) { uint32_t mask; mask = 1 << pin->gp_pin; /* * Manage input/output */ if (flags & (GPIO_PIN_INPUT|GPIO_PIN_OUTPUT)) { IXP4XX_GPIO_LOCK(); pin->gp_flags &= ~(GPIO_PIN_INPUT|GPIO_PIN_OUTPUT); if (flags & GPIO_PIN_OUTPUT) { pin->gp_flags |= GPIO_PIN_OUTPUT; GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOER, mask); } else { pin->gp_flags |= GPIO_PIN_INPUT; GPIO_SET_BITS(sc, IXP425_GPIO_GPOER, mask); } IXP4XX_GPIO_UNLOCK(); } } static int avila_gpio_pin_max(device_t dev, int *maxpin) { *maxpin = IXP4XX_GPIO_PINS - 1; return (0); } static int avila_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct avila_gpio_softc *sc = device_get_softc(dev); if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin))) return (EINVAL); *caps = sc->sc_pins[pin].gp_caps; return (0); } static int avila_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct avila_gpio_softc *sc = device_get_softc(dev); if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin))) return (EINVAL); IXP4XX_GPIO_LOCK(); /* refresh since we do not own all the pins */ sc->sc_pins[pin].gp_flags = avila_gpio_pin_flags(sc, pin); *flags = sc->sc_pins[pin].gp_flags; IXP4XX_GPIO_UNLOCK(); return (0); } static int avila_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct avila_gpio_softc *sc = device_get_softc(dev); if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin))) return (EINVAL); memcpy(name, sc->sc_pins[pin].gp_name, GPIOMAXNAME); return (0); } static int avila_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct avila_gpio_softc *sc = device_get_softc(dev); uint32_t mask = 1 << pin; if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask)) return (EINVAL); avila_gpio_pin_configure(sc, &sc->sc_pins[pin], flags); return (0); } static int avila_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct avila_gpio_softc *sc = device_get_softc(dev); uint32_t mask = 1 << pin; if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask)) return (EINVAL); IXP4XX_GPIO_LOCK(); if (value) GPIO_SET_BITS(sc, IXP425_GPIO_GPOUTR, mask); else GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOUTR, mask); IXP4XX_GPIO_UNLOCK(); return (0); } static int avila_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct avila_gpio_softc *sc = device_get_softc(dev); if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & (1 << pin))) return (EINVAL); IXP4XX_GPIO_LOCK(); *val = (GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & (1 << pin)) ? 1 : 0; IXP4XX_GPIO_UNLOCK(); return (0); } static int avila_gpio_pin_toggle(device_t dev, uint32_t pin) { struct avila_gpio_softc *sc = device_get_softc(dev); uint32_t mask = 1 << pin; int res; if (pin >= IXP4XX_GPIO_PINS || !(sc->sc_valid & mask)) return (EINVAL); IXP4XX_GPIO_LOCK(); res = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR) & mask; if (res) GPIO_CLEAR_BITS(sc, IXP425_GPIO_GPOUTR, mask); else GPIO_SET_BITS(sc, IXP425_GPIO_GPOUTR, mask); IXP4XX_GPIO_UNLOCK(); return (0); } static int avila_gpio_probe(device_t dev) { device_set_desc(dev, "Gateworks Avila GPIO driver"); return (0); } static int avila_gpio_attach(device_t dev) { #define N(a) (sizeof(a) / sizeof(a[0])) struct avila_gpio_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); int i; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; sc->sc_gpio_ioh = sa->sc_gpio_ioh; for (i = 0; i < N(avila_gpio_pins); i++) { struct avila_gpio_pin *p = &avila_gpio_pins[i]; strncpy(sc->sc_pins[p->pin].gp_name, p->name, GPIOMAXNAME); sc->sc_pins[p->pin].gp_pin = p->pin; sc->sc_pins[p->pin].gp_caps = p->caps; sc->sc_pins[p->pin].gp_flags = avila_gpio_pin_flags(sc, p->pin); sc->sc_valid |= 1 << p->pin; } device_add_child(dev, "gpioc", -1); device_add_child(dev, "gpiobus", -1); return (bus_generic_attach(dev)); #undef N } static int avila_gpio_detach(device_t dev) { bus_generic_detach(dev); return(0); } static device_method_t gpio_avila_methods[] = { DEVMETHOD(device_probe, avila_gpio_probe), DEVMETHOD(device_attach, avila_gpio_attach), DEVMETHOD(device_detach, avila_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_pin_max, avila_gpio_pin_max), DEVMETHOD(gpio_pin_getname, avila_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, avila_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, avila_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, avila_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, avila_gpio_pin_get), DEVMETHOD(gpio_pin_set, avila_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, avila_gpio_pin_toggle), {0, 0}, }; static driver_t gpio_avila_driver = { - "gpio_avila", + "gpio", gpio_avila_methods, sizeof(struct avila_gpio_softc), }; static devclass_t gpio_avila_devclass; -extern devclass_t gpiobus_devclass, gpioc_devclass; -extern driver_t gpiobus_driver, gpioc_driver; DRIVER_MODULE(gpio_avila, ixp, gpio_avila_driver, gpio_avila_devclass, 0, 0); -DRIVER_MODULE(gpiobus, gpio_avila, gpiobus_driver, gpiobus_devclass, 0, 0); -DRIVER_MODULE(gpioc, gpio_avila, gpioc_driver, gpioc_devclass, 0, 0); MODULE_VERSION(gpio_avila, 1); Index: projects/clang360-import/sys/arm/xscale/ixp425/cambria_gpio.c =================================================================== --- projects/clang360-import/sys/arm/xscale/ixp425/cambria_gpio.c (revision 277895) +++ projects/clang360-import/sys/arm/xscale/ixp425/cambria_gpio.c (revision 277896) @@ -1,491 +1,487 @@ /*- * Copyright (c) 2010, Andrew Thompson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * GPIO driver for Gateworks Cambria * * Note: * The Cambria PLD does not set the i2c ack bit after each write, if we used the * regular iicbus interface it would abort the xfer after the address byte * times out and not write our latch. To get around this we grab the iicbus and * then do our own bit banging. This is a comprimise to changing all the iicbb * device methods to allow a flag to be passed down and is similir to how Linux * does it. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iicbb_if.h" #include "gpio_if.h" #define IIC_M_WR 0 /* write operation */ #define PLD_ADDR 0xac /* slave address */ #define I2C_DELAY 10 #define GPIO_CONF_CLR(sc, reg, mask) \ GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) &~ (mask)) #define GPIO_CONF_SET(sc, reg, mask) \ GPIO_CONF_WRITE_4(sc, reg, GPIO_CONF_READ_4(sc, reg) | (mask)) #define GPIO_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define GPIO_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define GPIO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) #define GPIO_PINS 5 struct cambria_gpio_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_gpio_ioh; struct mtx sc_mtx; struct gpio_pin sc_pins[GPIO_PINS]; uint8_t sc_latch; uint8_t sc_val; }; struct cambria_gpio_pin { const char *name; int pin; int flags; }; extern struct ixp425_softc *ixp425_softc; static struct cambria_gpio_pin cambria_gpio_pins[GPIO_PINS] = { { "PLD0", 0, GPIO_PIN_OUTPUT }, { "PLD1", 1, GPIO_PIN_OUTPUT }, { "PLD2", 2, GPIO_PIN_OUTPUT }, { "PLD3", 3, GPIO_PIN_OUTPUT }, { "PLD4", 4, GPIO_PIN_OUTPUT }, }; /* * Helpers */ static int cambria_gpio_read(struct cambria_gpio_softc *, uint32_t, unsigned int *); static int cambria_gpio_write(struct cambria_gpio_softc *); /* * Driver stuff */ static int cambria_gpio_probe(device_t dev); static int cambria_gpio_attach(device_t dev); static int cambria_gpio_detach(device_t dev); /* * GPIO interface */ static int cambria_gpio_pin_max(device_t dev, int *maxpin); static int cambria_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps); static int cambria_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags); static int cambria_gpio_pin_getname(device_t dev, uint32_t pin, char *name); static int cambria_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags); static int cambria_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value); static int cambria_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val); static int cambria_gpio_pin_toggle(device_t dev, uint32_t pin); static int i2c_getsda(struct cambria_gpio_softc *sc) { uint32_t reg; IXP4XX_GPIO_LOCK(); GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); reg = GPIO_CONF_READ_4(sc, IXP425_GPIO_GPINR); IXP4XX_GPIO_UNLOCK(); return (reg & GPIO_I2C_SDA_BIT); } static void i2c_setsda(struct cambria_gpio_softc *sc, int val) { IXP4XX_GPIO_LOCK(); GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SDA_BIT); if (val) GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); else GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SDA_BIT); IXP4XX_GPIO_UNLOCK(); DELAY(I2C_DELAY); } static void i2c_setscl(struct cambria_gpio_softc *sc, int val) { IXP4XX_GPIO_LOCK(); GPIO_CONF_CLR(sc, IXP425_GPIO_GPOUTR, GPIO_I2C_SCL_BIT); if (val) GPIO_CONF_SET(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT); else GPIO_CONF_CLR(sc, IXP425_GPIO_GPOER, GPIO_I2C_SCL_BIT); IXP4XX_GPIO_UNLOCK(); DELAY(I2C_DELAY); } static void i2c_sendstart(struct cambria_gpio_softc *sc) { i2c_setsda(sc, 1); i2c_setscl(sc, 1); i2c_setsda(sc, 0); i2c_setscl(sc, 0); } static void i2c_sendstop(struct cambria_gpio_softc *sc) { i2c_setscl(sc, 1); i2c_setsda(sc, 1); i2c_setscl(sc, 0); i2c_setsda(sc, 0); } static void i2c_sendbyte(struct cambria_gpio_softc *sc, u_char data) { int i; for (i=7; i>=0; i--) { i2c_setsda(sc, data & (1<=0; i--) { i2c_setscl(sc, 1); if (i2c_getsda(sc)) data |= (1<sc_dev; int error; error = iicbus_request_bus(device_get_parent(dev), dev, IIC_DONTWAIT); if (error) return (error); i2c_sendstart(sc); i2c_sendbyte(sc, PLD_ADDR | LSB); *val = (i2c_readbyte(sc) & (1 << pin)) != 0; i2c_sendstop(sc); iicbus_release_bus(device_get_parent(dev), dev); return (0); } static int cambria_gpio_write(struct cambria_gpio_softc *sc) { device_t dev = sc->sc_dev; int error; error = iicbus_request_bus(device_get_parent(dev), dev, IIC_DONTWAIT); if (error) return (error); i2c_sendstart(sc); i2c_sendbyte(sc, PLD_ADDR & ~LSB); i2c_sendbyte(sc, sc->sc_latch); i2c_sendstop(sc); iicbus_release_bus(device_get_parent(dev), dev); return (0); } static int cambria_gpio_pin_max(device_t dev, int *maxpin) { *maxpin = GPIO_PINS - 1; return (0); } static int cambria_gpio_pin_getcaps(device_t dev, uint32_t pin, uint32_t *caps) { struct cambria_gpio_softc *sc = device_get_softc(dev); if (pin >= GPIO_PINS) return (EINVAL); *caps = sc->sc_pins[pin].gp_caps; return (0); } static int cambria_gpio_pin_getflags(device_t dev, uint32_t pin, uint32_t *flags) { struct cambria_gpio_softc *sc = device_get_softc(dev); if (pin >= GPIO_PINS) return (EINVAL); *flags = sc->sc_pins[pin].gp_flags; return (0); } static int cambria_gpio_pin_getname(device_t dev, uint32_t pin, char *name) { struct cambria_gpio_softc *sc = device_get_softc(dev); if (pin >= GPIO_PINS) return (EINVAL); memcpy(name, sc->sc_pins[pin].gp_name, GPIOMAXNAME); return (0); } static int cambria_gpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags) { struct cambria_gpio_softc *sc = device_get_softc(dev); int error; uint8_t mask; mask = 1 << pin; if (pin >= GPIO_PINS) return (EINVAL); GPIO_LOCK(sc); sc->sc_pins[pin].gp_flags = flags; /* * Writing a logical one sets the signal high and writing a logical * zero sets the signal low. To configure a digital I/O signal as an * input, a logical one must first be written to the data bit to * three-state the associated output. */ if (flags & GPIO_PIN_INPUT || sc->sc_val & mask) sc->sc_latch |= mask; /* input or output & high */ else sc->sc_latch &= ~mask; error = cambria_gpio_write(sc); GPIO_UNLOCK(sc); return (error); } static int cambria_gpio_pin_set(device_t dev, uint32_t pin, unsigned int value) { struct cambria_gpio_softc *sc = device_get_softc(dev); int error; uint8_t mask; mask = 1 << pin; if (pin >= GPIO_PINS) return (EINVAL); GPIO_LOCK(sc); if (value) sc->sc_val |= mask; else sc->sc_val &= ~mask; if (sc->sc_pins[pin].gp_flags != GPIO_PIN_OUTPUT) { /* just save, altering the latch will disable input */ GPIO_UNLOCK(sc); return (0); } if (value) sc->sc_latch |= mask; else sc->sc_latch &= ~mask; error = cambria_gpio_write(sc); GPIO_UNLOCK(sc); return (error); } static int cambria_gpio_pin_get(device_t dev, uint32_t pin, unsigned int *val) { struct cambria_gpio_softc *sc = device_get_softc(dev); int error = 0; if (pin >= GPIO_PINS) return (EINVAL); GPIO_LOCK(sc); if (sc->sc_pins[pin].gp_flags == GPIO_PIN_OUTPUT) *val = (sc->sc_latch & (1 << pin)) ? 1 : 0; else error = cambria_gpio_read(sc, pin, val); GPIO_UNLOCK(sc); return (error); } static int cambria_gpio_pin_toggle(device_t dev, uint32_t pin) { struct cambria_gpio_softc *sc = device_get_softc(dev); int error = 0; if (pin >= GPIO_PINS) return (EINVAL); GPIO_LOCK(sc); sc->sc_val ^= (1 << pin); if (sc->sc_pins[pin].gp_flags == GPIO_PIN_OUTPUT) { sc->sc_latch ^= (1 << pin); error = cambria_gpio_write(sc); } GPIO_UNLOCK(sc); return (error); } static int cambria_gpio_probe(device_t dev) { device_set_desc(dev, "Gateworks Cambria GPIO driver"); return (0); } static int cambria_gpio_attach(device_t dev) { struct cambria_gpio_softc *sc = device_get_softc(dev); int pin; sc->sc_dev = dev; sc->sc_iot = ixp425_softc->sc_iot; sc->sc_gpio_ioh = ixp425_softc->sc_gpio_ioh; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); for (pin = 0; pin < GPIO_PINS; pin++) { struct cambria_gpio_pin *p = &cambria_gpio_pins[pin]; strncpy(sc->sc_pins[pin].gp_name, p->name, GPIOMAXNAME); sc->sc_pins[pin].gp_pin = pin; sc->sc_pins[pin].gp_caps = GPIO_PIN_INPUT|GPIO_PIN_OUTPUT; sc->sc_pins[pin].gp_flags = 0; cambria_gpio_pin_setflags(dev, pin, p->flags); } device_add_child(dev, "gpioc", -1); device_add_child(dev, "gpiobus", -1); return (bus_generic_attach(dev)); } static int cambria_gpio_detach(device_t dev) { struct cambria_gpio_softc *sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sc_mtx), ("gpio mutex not initialized")); bus_generic_detach(dev); mtx_destroy(&sc->sc_mtx); return(0); } static device_method_t cambria_gpio_methods[] = { DEVMETHOD(device_probe, cambria_gpio_probe), DEVMETHOD(device_attach, cambria_gpio_attach), DEVMETHOD(device_detach, cambria_gpio_detach), /* GPIO protocol */ DEVMETHOD(gpio_pin_max, cambria_gpio_pin_max), DEVMETHOD(gpio_pin_getname, cambria_gpio_pin_getname), DEVMETHOD(gpio_pin_getflags, cambria_gpio_pin_getflags), DEVMETHOD(gpio_pin_getcaps, cambria_gpio_pin_getcaps), DEVMETHOD(gpio_pin_setflags, cambria_gpio_pin_setflags), DEVMETHOD(gpio_pin_get, cambria_gpio_pin_get), DEVMETHOD(gpio_pin_set, cambria_gpio_pin_set), DEVMETHOD(gpio_pin_toggle, cambria_gpio_pin_toggle), {0, 0}, }; static driver_t cambria_gpio_driver = { - "gpio_cambria", + "gpio", cambria_gpio_methods, sizeof(struct cambria_gpio_softc), }; static devclass_t cambria_gpio_devclass; -extern devclass_t gpiobus_devclass, gpioc_devclass; -extern driver_t gpiobus_driver, gpioc_driver; DRIVER_MODULE(gpio_cambria, iicbus, cambria_gpio_driver, cambria_gpio_devclass, 0, 0); -DRIVER_MODULE(gpiobus, gpio_cambria, gpiobus_driver, gpiobus_devclass, 0, 0); -DRIVER_MODULE(gpioc, gpio_cambria, gpioc_driver, gpioc_devclass, 0, 0); MODULE_VERSION(gpio_cambria, 1); MODULE_DEPEND(gpio_cambria, iicbus, 1, 1, 1); Index: projects/clang360-import/sys/dev/sfxge/common/efsys.h =================================================================== --- projects/clang360-import/sys/dev/sfxge/common/efsys.h (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/common/efsys.h (revision 277896) @@ -1,833 +1,832 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EFSYS_H #define _SYS_EFSYS_H #ifdef __cplusplus extern "C" { #endif #include #include #include #include #include #include #include #include #include #include #include #include #define EFSYS_HAS_UINT64 1 #define EFSYS_USE_UINT64 0 #if _BYTE_ORDER == _BIG_ENDIAN #define EFSYS_IS_BIG_ENDIAN 1 #define EFSYS_IS_LITTLE_ENDIAN 0 #elif _BYTE_ORDER == _LITTLE_ENDIAN #define EFSYS_IS_BIG_ENDIAN 0 #define EFSYS_IS_LITTLE_ENDIAN 1 #endif #include "efx_types.h" /* Common code requires this */ #if __FreeBSD_version < 800068 #define memmove(d, s, l) bcopy(s, d, l) #endif /* FreeBSD equivalents of Solaris things */ #ifndef _NOTE #define _NOTE(s) #endif #ifndef B_FALSE #define B_FALSE FALSE #endif #ifndef B_TRUE #define B_TRUE TRUE #endif #ifndef IS_P2ALIGNED #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) #endif #ifndef P2ROUNDUP #define P2ROUNDUP(x, align) (-(-(x) & -(align))) #endif #ifndef IS2P #define ISP2(x) (((x) & ((x) - 1)) == 0) #endif #define ENOTACTIVE EINVAL /* Memory type to use on FreeBSD */ MALLOC_DECLARE(M_SFXGE); /* Machine dependend prefetch wrappers */ #if defined(__i386__) || defined(__amd64__) static __inline void prefetch_read_many(void *addr) { __asm__( "prefetcht0 (%0)" : : "r" (addr)); } static __inline void prefetch_read_once(void *addr) { __asm__( "prefetchnta (%0)" : : "r" (addr)); } #elif defined(__sparc64__) static __inline void prefetch_read_many(void *addr) { __asm__( "prefetch [%0], 0" : : "r" (addr)); } static __inline void prefetch_read_once(void *addr) { __asm__( "prefetch [%0], 1" : : "r" (addr)); } #else static __inline void prefetch_read_many(void *addr) { } static __inline void prefetch_read_once(void *addr) { } #endif #if defined(__i386__) || defined(__amd64__) #include #include #endif static __inline void sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf *m, bus_dma_segment_t *seg) { #if defined(__i386__) || defined(__amd64__) seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t)); seg->ds_len = m->m_len; #else int nsegstmp; bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0); #endif } /* Modifiers used for DOS builds */ #define __cs #define __far /* Modifiers used for Windows builds */ #define __in #define __in_opt #define __in_ecount(_n) #define __in_ecount_opt(_n) #define __in_bcount(_n) #define __in_bcount_opt(_n) #define __out #define __out_opt #define __out_ecount(_n) #define __out_ecount_opt(_n) #define __out_bcount(_n) #define __out_bcount_opt(_n) #define __deref_out #define __inout #define __inout_opt #define __inout_ecount(_n) #define __inout_ecount_opt(_n) #define __inout_bcount(_n) #define __inout_bcount_opt(_n) #define __inout_bcount_full_opt(_n) #define __deref_out_bcount_opt(n) #define __checkReturn #define __drv_when(_p, _c) /* Code inclusion options */ #define EFSYS_OPT_NAMES 1 #define EFSYS_OPT_FALCON 0 #define EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0 #define EFSYS_OPT_SIENA 1 #ifdef DEBUG #define EFSYS_OPT_CHECK_REG 1 #else #define EFSYS_OPT_CHECK_REG 0 #endif #define EFSYS_OPT_MCDI 1 #define EFSYS_OPT_MAC_FALCON_GMAC 0 #define EFSYS_OPT_MAC_FALCON_XMAC 0 #define EFSYS_OPT_MAC_STATS 1 #define EFSYS_OPT_LOOPBACK 0 #define EFSYS_OPT_MON_NULL 0 #define EFSYS_OPT_MON_LM87 0 #define EFSYS_OPT_MON_MAX6647 0 #define EFSYS_OPT_MON_SIENA 0 #define EFSYS_OPT_MON_STATS 0 #define EFSYS_OPT_PHY_NULL 0 #define EFSYS_OPT_PHY_QT2022C2 0 #define EFSYS_OPT_PHY_SFX7101 0 #define EFSYS_OPT_PHY_TXC43128 0 #define EFSYS_OPT_PHY_PM8358 0 #define EFSYS_OPT_PHY_SFT9001 0 #define EFSYS_OPT_PHY_QT2025C 0 #define EFSYS_OPT_PHY_STATS 1 #define EFSYS_OPT_PHY_PROPS 0 #define EFSYS_OPT_PHY_BIST 1 #define EFSYS_OPT_PHY_LED_CONTROL 1 #define EFSYS_OPT_PHY_FLAGS 0 #define EFSYS_OPT_VPD 1 #define EFSYS_OPT_NVRAM 1 #define EFSYS_OPT_NVRAM_FALCON_BOOTROM 0 #define EFSYS_OPT_NVRAM_SFT9001 0 #define EFSYS_OPT_NVRAM_SFX7101 0 #define EFSYS_OPT_BOOTCFG 0 #define EFSYS_OPT_PCIE_TUNE 0 #define EFSYS_OPT_DIAG 0 #define EFSYS_OPT_WOL 1 #define EFSYS_OPT_RX_SCALE 1 #define EFSYS_OPT_QSTATS 1 #define EFSYS_OPT_FILTER 0 #define EFSYS_OPT_RX_SCATTER 0 #define EFSYS_OPT_RX_HDR_SPLIT 0 #define EFSYS_OPT_EV_PREFETCH 0 #define EFSYS_OPT_DECODE_INTR_FATAL 1 /* ID */ typedef struct __efsys_identifier_s efsys_identifier_t; /* PROBE */ #ifndef DTRACE_PROBE #define EFSYS_PROBE(_name) #define EFSYS_PROBE1(_name, _type1, _arg1) #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) #else /* DTRACE_PROBE */ #define EFSYS_PROBE(_name) \ DTRACE_PROBE(_name) #define EFSYS_PROBE1(_name, _type1, _arg1) \ DTRACE_PROBE1(_name, _type1, _arg1) #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \ DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2) #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) \ DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3) #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) \ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #ifdef DTRACE_PROBE5 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) \ DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #else #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) \ DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4) #endif #ifdef DTRACE_PROBE6 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) \ DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #else #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) \ EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5) #endif #ifdef DTRACE_PROBE7 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) \ DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) #else #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6, _type7, _arg7) \ EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \ _type3, _arg3, _type4, _arg4, _type5, _arg5, \ _type6, _arg6) #endif #endif /* DTRACE_PROBE */ /* DMA */ typedef uint64_t efsys_dma_addr_t; typedef struct efsys_mem_s { bus_dma_tag_t esm_tag; bus_dmamap_t esm_map; caddr_t esm_base; efsys_dma_addr_t esm_addr; - size_t esm_size; } efsys_mem_t; #define EFSYS_MEM_ZERO(_esmp, _size) \ do { \ (void) memset((_esmp)->esm_base, 0, (_size)); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_READD(_esmp, _offset, _edp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_edp)->ed_u32[0] = *addr; \ \ EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eqp)->eq_u32[0] = *addr++; \ (_eqp)->eq_u32[1] = *addr; \ \ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_READO(_esmp, _offset, _eop) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ (_eop)->eo_u32[0] = *addr++; \ (_eop)->eo_u32[1] = *addr++; \ (_eop)->eo_u32[2] = *addr++; \ (_eop)->eo_u32[3] = *addr; \ \ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr = (_edp)->ed_u32[0]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr++ = (_eqp)->eq_u32[0]; \ *addr = (_eqp)->eq_u32[1]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \ do { \ uint32_t *addr; \ \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ addr = (void *)((_esmp)->esm_base + (_offset)); \ \ *addr++ = (_eop)->eo_u32[0]; \ *addr++ = (_eop)->eo_u32[1]; \ *addr++ = (_eop)->eo_u32[2]; \ *addr = (_eop)->eo_u32[3]; \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_MEM_ADDR(_esmp) \ ((_esmp)->esm_addr) /* BAR */ typedef struct efsys_bar_s { struct mtx esb_lock; bus_space_tag_t esb_tag; bus_space_handle_t esb_handle; int esb_rid; struct resource *esb_res; } efsys_bar_t; #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_lock(&((_esbp)->esb_lock)); \ \ (_edp)->ed_u32[0] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset)); \ \ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ mtx_lock(&((_esbp)->esb_lock)); \ \ (_eqp)->eq_u32[0] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset)); \ (_eqp)->eq_u32[1] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset+4)); \ \ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_lock(&((_esbp)->esb_lock)); \ \ (_eop)->eo_u32[0] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset)); \ (_eop)->eo_u32[1] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset+4)); \ (_eop)->eo_u32[2] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset+8)); \ (_eop)->eo_u32[3] = bus_space_read_4((_esbp)->esb_tag, \ (_esbp)->esb_handle, (_offset+12)); \ \ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_lock(&((_esbp)->esb_lock)); \ \ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \ uint32_t, (_edp)->ed_u32[0]); \ \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), (_edp)->ed_u32[0]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \ ("not power of 2 aligned")); \ \ mtx_lock(&((_esbp)->esb_lock)); \ \ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \ uint32_t, (_eqp)->eq_u32[1], \ uint32_t, (_eqp)->eq_u32[0]); \ \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), (_eqp)->eq_u32[0]); \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset+4), (_eqp)->eq_u32[1]); \ \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \ do { \ _NOTE(CONSTANTCONDITION) \ KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \ ("not power of 2 aligned")); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_lock(&((_esbp)->esb_lock)); \ \ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \ uint32_t, (_eop)->eo_u32[3], \ uint32_t, (_eop)->eo_u32[2], \ uint32_t, (_eop)->eo_u32[1], \ uint32_t, (_eop)->eo_u32[0]); \ \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset), (_eop)->eo_u32[0]); \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset+4), (_eop)->eo_u32[1]); \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset+8), (_eop)->eo_u32[2]); \ bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\ (_offset+12), (_eop)->eo_u32[3]); \ \ _NOTE(CONSTANTCONDITION) \ if (_lock) \ mtx_unlock(&((_esbp)->esb_lock)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* SPIN */ #define EFSYS_SPIN(_us) \ do { \ DELAY(_us); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_SLEEP EFSYS_SPIN /* BARRIERS */ /* Strict ordering guaranteed by devacc.devacc_attr_dataorder */ #define EFSYS_MEM_READ_BARRIER() #define EFSYS_PIO_WRITE_BARRIER() /* TIMESTAMP */ typedef clock_t efsys_timestamp_t; #define EFSYS_TIMESTAMP(_usp) \ do { \ clock_t now; \ \ now = ticks; \ *(_usp) = now * hz / 1000000; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* KMEM */ #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \ do { \ (_esip) = (_esip); \ (_p) = malloc((_size), M_SFXGE, M_WAITOK|M_ZERO); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_KMEM_FREE(_esip, _size, _p) \ do { \ (void) (_esip); \ (void) (_size); \ free((_p), M_SFXGE); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* LOCK */ typedef struct mtx efsys_lock_t; #define EFSYS_LOCK_MAGIC 0x000010c4 #define EFSYS_LOCK(_lockp, _state) \ do { \ mtx_lock(_lockp); \ (_state) = EFSYS_LOCK_MAGIC; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_UNLOCK(_lockp, _state) \ do { \ if ((_state) != EFSYS_LOCK_MAGIC) \ KASSERT(B_FALSE, ("not locked")); \ mtx_unlock(_lockp); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* PREEMPT */ #define EFSYS_PREEMPT_DISABLE(_state) \ do { \ (_state) = (_state); \ critical_enter(); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_PREEMPT_ENABLE(_state) \ do { \ (_state) = (_state); \ critical_exit(_state); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* STAT */ typedef uint64_t efsys_stat_t; #define EFSYS_STAT_INCR(_knp, _delta) \ do { \ *(_knp) += (_delta); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_DECR(_knp, _delta) \ do { \ *(_knp) -= (_delta); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET(_knp, _val) \ do { \ *(_knp) = (_val); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET_QWORD(_knp, _valp) \ do { \ *(_knp) = le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SET_DWORD(_knp, _valp) \ do { \ *(_knp) = le32toh((_valp)->ed_u32[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \ do { \ *(_knp) += le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \ do { \ *(_knp) -= le64toh((_valp)->eq_u64[0]); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) /* ERR */ extern void sfxge_err(efsys_identifier_t *, unsigned int, uint32_t, uint32_t); #if EFSYS_OPT_DECODE_INTR_FATAL #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \ do { \ sfxge_err((_esip), (_code), (_dword0), (_dword1)); \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #endif /* ASSERT */ #define EFSYS_ASSERT(_exp) do { \ if (!(_exp)) \ panic(#_exp); \ } while (0) #define EFSYS_ASSERT3(_x, _op, _y, _t) do { \ const _t __x = (_t)(_x); \ const _t __y = (_t)(_y); \ if (!(__x _op __y)) \ panic("assertion failed at %s:%u", __FILE__, __LINE__); \ } while(0) #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t) #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t) #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t) #ifdef __cplusplus } #endif #endif /* _SYS_EFSYS_H */ Index: projects/clang360-import/sys/dev/sfxge/common/efx_ev.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/common/efx_ev.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/common/efx_ev.c (revision 277896) @@ -1,1115 +1,1117 @@ /*- * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_QSTATS #define EFX_EV_QSTAT_INCR(_eep, _stat) \ do { \ (_eep)->ee_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_EV_QSTAT_INCR(_eep, _stat) #endif __checkReturn int efx_ev_init( __in efx_nic_t *enp) { efx_oword_t oword; int rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); if (enp->en_mod_flags & EFX_MOD_EV) { rc = EINVAL; goto fail1; } EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); /* * Program the event queue for receive and transmit queue * flush events. */ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); enp->en_mod_flags |= EFX_MOD_EV; return (0); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } static __checkReturn boolean_t efx_ev_rx_not_ok( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in uint32_t label, __in uint32_t id, __inout uint16_t *flagsp) { boolean_t ignore = B_FALSE; if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); EFSYS_PROBE(tobe_disc); /* Assume this is a unicast address mismatch, unless below * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or * EV_RX_PAUSE_FRM_ERR is set. */ (*flagsp) |= EFX_ADDR_MISMATCH; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); (*flagsp) |= EFX_DISCARD; #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) /* Lookout for payload queue ran dry errors and ignore them. * * Sadly for the header/data split cases, the descriptor * pointer in this event refers to the header queue and * therefore cannot be easily detected as duplicate. * So we drop these and rely on the receive processing seeing * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard * the partially received packet. */ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) ignore = B_TRUE; #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); EFSYS_PROBE(crc_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); EFSYS_PROBE(pause_frm_err); (*flagsp) &= ~EFX_ADDR_MISMATCH; (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); EFSYS_PROBE(owner_id_err); (*flagsp) |= EFX_DISCARD; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); EFSYS_PROBE(ipv4_err); (*flagsp) &= ~EFX_CKSUM_IPV4; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); EFSYS_PROBE(udp_chk_err); (*flagsp) &= ~EFX_CKSUM_TCPUDP; } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); /* * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error * condition. */ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); } return (ignore); } static __checkReturn boolean_t efx_ev_rx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; uint32_t id; uint32_t size; uint32_t label; boolean_t ok; #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) boolean_t sop; boolean_t jumbo_cont; #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ uint32_t hdr_type; boolean_t is_v6; uint16_t flags; boolean_t ignore; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_RX); /* Basic packet information */ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); #if (EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER) sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); #endif /* EFSYS_OPT_RX_HDR_SPLIT || EFSYS_OPT_RX_SCATTER */ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); is_v6 = (enp->en_family != EFX_FAMILY_FALCON && EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); /* * If packet is marked as OK and packet type is TCP/IP or * UDP/IP or other IP, then we can rely on the hardware checksums. */ switch (hdr_type) { case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); flags |= EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: if (is_v6) { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); flags = EFX_PKT_IPV6; } else { EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; } break; case FSE_AZ_RX_EV_HDR_TYPE_OTHER: EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); flags = 0; break; default: EFSYS_ASSERT(B_FALSE); flags = 0; break; } #if EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT /* Report scatter and header/lookahead split buffer flags */ if (sop) flags |= EFX_PKT_START; if (jumbo_cont) flags |= EFX_PKT_CONT; #endif /* EFSYS_OPT_RX_SCATTER || EFSYS_OPT_RX_HDR_SPLIT */ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ if (!ok) { ignore = efx_ev_rx_not_ok(eep, eqp, label, id, &flags); if (ignore) { EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); return (B_FALSE); } } /* If we're not discarding the packet then it is ok */ if (~flags & EFX_DISCARD) EFX_EV_QSTAT_INCR(eep, EV_RX_OK); /* Detect multicast packets that didn't match the filter */ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); } else { EFSYS_PROBE(mcast_mismatch); flags |= EFX_ADDR_MISMATCH; } } else { flags |= EFX_PKT_UNICAST; } /* * The packet parser in Siena can abort parsing packets under * certain error conditions, setting the PKT_NOT_PARSED bit * (which clears PKT_OK). If this is set, then don't trust * the PKT_TYPE field. */ if (enp->en_family != EFX_FAMILY_FALCON && !ok) { uint32_t parse_err; parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); if (parse_err != 0) flags |= EFX_CHECK_VLAN; } if (~flags & EFX_CHECK_VLAN) { uint32_t pkt_type; pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) flags |= EFX_PKT_VLAN_TAGGED; } EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, uint32_t, size, uint16_t, flags); EFSYS_ASSERT(eecp->eec_rx != NULL); should_abort = eecp->eec_rx(arg, label, id, size, flags); return (should_abort); } static __checkReturn boolean_t efx_ev_tx( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t id; uint32_t label; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_TX); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); EFSYS_ASSERT(eecp->eec_tx != NULL); should_abort = eecp->eec_tx(arg, label, id); return (should_abort); } if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); return (B_FALSE); } static __checkReturn boolean_t efx_ev_global( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; efx_port_t *epp = &(enp->en_port); boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); should_abort = B_FALSE; /* Check for a link management event */ if (EFX_QWORD_FIELD(*eqp, FSF_BZ_GLB_EV_XG_MNT_INTR) != 0) { EFX_EV_QSTAT_INCR(eep, EV_GLOBAL_MNT); EFSYS_PROBE(xg_mgt); epp->ep_mac_poll_needed = B_TRUE; } return (should_abort); } static __checkReturn boolean_t efx_ev_driver( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRIVER); should_abort = B_FALSE; switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { uint32_t txq_index; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); should_abort = eecp->eec_txq_flush_done(arg, txq_index); break; } case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { uint32_t rxq_index; uint32_t failed; rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); if (failed) { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_failed(arg, rxq_index); } else { EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); } break; } case FSE_AZ_EVQ_INIT_DONE_EV: EFSYS_ASSERT(eecp->eec_initialized != NULL); should_abort = eecp->eec_initialized(arg); break; case FSE_AZ_EVQ_NOT_EN_EV: EFSYS_PROBE(evq_not_en); break; case FSE_AZ_SRM_UPD_DONE_EV: { uint32_t code; EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_sram != NULL); should_abort = eecp->eec_sram(arg, code); break; } case FSE_AZ_WAKE_UP_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_wake_up != NULL); should_abort = eecp->eec_wake_up(arg, id); break; } case FSE_AZ_TX_PKT_NON_TCP_UDP: EFSYS_PROBE(tx_pkt_non_tcp_udp); break; case FSE_AZ_TIMER_EV: { uint32_t id; id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); EFSYS_ASSERT(eecp->eec_timer != NULL); should_abort = eecp->eec_timer(arg, id); break; } case FSE_AZ_RX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); EFSYS_PROBE(rx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_RX_DSC_ERROR, 0); break; case FSE_AZ_TX_DSC_ERROR_EV: EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); EFSYS_PROBE(tx_dsc_error); EFSYS_ASSERT(eecp->eec_exception != NULL); should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_TX_DSC_ERROR, 0); break; default: break; } return (should_abort); } static __checkReturn boolean_t efx_ev_drv_gen( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { uint32_t data; boolean_t should_abort; EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); if (data >= ((uint32_t)1 << 16)) { EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); return (B_TRUE); } EFSYS_ASSERT(eecp->eec_software != NULL); should_abort = eecp->eec_software(arg, (uint16_t)data); return (should_abort); } #if EFSYS_OPT_MCDI static __checkReturn boolean_t efx_ev_mcdi( __in efx_evq_t *eep, __in efx_qword_t *eqp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_nic_t *enp = eep->ee_enp; unsigned code; boolean_t should_abort = B_FALSE; EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); if (enp->en_family != EFX_FAMILY_SIENA) goto out; EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); switch (code) { case MCDI_EVENT_CODE_BADSSERT: efx_mcdi_ev_death(enp, EINTR); break; case MCDI_EVENT_CODE_CMDDONE: efx_mcdi_ev_cpl(enp, MCDI_EV_FIELD(*eqp, CMDDONE_SEQ), MCDI_EV_FIELD(*eqp, CMDDONE_DATALEN), MCDI_EV_FIELD(*eqp, CMDDONE_ERRNO)); break; case MCDI_EVENT_CODE_LINKCHANGE: { efx_link_mode_t link_mode; siena_phy_link_ev(enp, eqp, &link_mode); should_abort = eecp->eec_link_change(arg, link_mode); break; } case MCDI_EVENT_CODE_SENSOREVT: { #if EFSYS_OPT_MON_STATS efx_mon_stat_t id; efx_mon_stat_value_t value; int rc; if ((rc = siena_mon_ev(enp, eqp, &id, &value)) == 0) should_abort = eecp->eec_monitor(arg, id, value); else if (rc == ENOTSUP) { should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_SENSOREVT, MCDI_EV_FIELD(eqp, DATA)); } else EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ #else should_abort = B_FALSE; #endif break; } case MCDI_EVENT_CODE_SCHEDERR: /* Informational only */ break; case MCDI_EVENT_CODE_REBOOT: efx_mcdi_ev_death(enp, EIO); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: #if EFSYS_OPT_MAC_STATS if (eecp->eec_mac_stats != NULL) { eecp->eec_mac_stats(arg, MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); } #endif break; case MCDI_EVENT_CODE_FWALERT: { uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_FWALERT_SRAM, MCDI_EV_FIELD(eqp, FWALERT_DATA)); else should_abort = eecp->eec_exception(arg, EFX_EXCEPTION_UNKNOWN_FWALERT, MCDI_EV_FIELD(eqp, DATA)); break; } default: EFSYS_PROBE1(mc_pcol_error, int, code); break; } out: return (should_abort); } #endif /* EFSYS_OPT_SIENA */ __checkReturn int efx_ev_qprime( __in efx_evq_t *eep, __in unsigned int count) { efx_nic_t *enp = eep->ee_enp; uint32_t rptr; efx_dword_t dword; int rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if (!(enp->en_mod_flags & EFX_MOD_INTR)) { rc = EINVAL; goto fail1; } rptr = count & eep->ee_mask; EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, &dword, B_FALSE); return (0); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } __checkReturn boolean_t efx_ev_qpending( __in efx_evq_t *eep, __in unsigned int count) { size_t offset; efx_qword_t qword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); return (EFX_QWORD_FIELD(qword, EFX_DWORD_0) != 0xffffffff && EFX_QWORD_FIELD(qword, EFX_DWORD_1) != 0xffffffff); } #if EFSYS_OPT_EV_PREFETCH void efx_ev_qprefetch( __in efx_evq_t *eep, __in unsigned int count) { unsigned int offset; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ #define EFX_EV_BATCH 8 #define EFX_EV_PRESENT(_qword) \ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) void efx_ev_qpoll( __in efx_evq_t *eep, __inout unsigned int *countp, __in const efx_ev_callbacks_t *eecp, __in_opt void *arg) { efx_qword_t ev[EFX_EV_BATCH]; unsigned int batch; unsigned int total; unsigned int count; unsigned int index; size_t offset; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(countp != NULL); EFSYS_ASSERT(eecp != NULL); count = *countp; do { /* Read up until the end of the batch period */ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (total = 0; total < batch; ++total) { EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); if (!EFX_EV_PRESENT(ev[total])) break; EFSYS_PROBE3(event, unsigned int, eep->ee_index, uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); offset += sizeof (efx_qword_t); } #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) /* * Prefetch the next batch when we get within PREFETCH_PERIOD * of a completed batch. If the batch is smaller, then prefetch * immediately. */ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); #endif /* EFSYS_OPT_EV_PREFETCH */ /* Process the batch of events */ for (index = 0; index < total; ++index) { boolean_t should_abort; uint32_t code; efx_ev_handler_t handler; #if EFSYS_OPT_EV_PREFETCH /* Prefetch if we've now reached the batch period */ if (total == batch && index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { offset = (count + batch) & eep->ee_mask; offset *= sizeof (efx_qword_t); EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); } #endif /* EFSYS_OPT_EV_PREFETCH */ EFX_EV_QSTAT_INCR(eep, EV_ALL); code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); handler = eep->ee_handler[code]; EFSYS_ASSERT(handler != NULL); should_abort = handler(eep, &(ev[index]), eecp, arg); if (should_abort) { /* Ignore subsequent events */ total = index + 1; break; } } /* * Now that the hardware has most likely moved onto dma'ing * into the next cache line, clear the processed events. Take * care to only clear out events that we've processed */ EFX_SET_QWORD(ev[0]); offset = (count & eep->ee_mask) * sizeof (efx_qword_t); for (index = 0; index < total; ++index) { EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); offset += sizeof (efx_qword_t); } count += total; } while (total == batch); *countp = count; } void efx_ev_qpost( __in efx_evq_t *eep, __in uint16_t data) { efx_nic_t *enp = eep->ee_enp; efx_qword_t ev; efx_oword_t oword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, FSF_AZ_EV_DATA_DW0, (uint32_t)data); EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); } __checkReturn int efx_ev_qmoderate( __in efx_evq_t *eep, __in unsigned int us) { efx_nic_t *enp = eep->ee_enp; unsigned int locked; efx_dword_t dword; int rc; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); if (us > enp->en_nic_cfg.enc_evq_moderation_max) { rc = EINVAL; goto fail1; } /* If the value is zero then disable the timer */ if (us == 0) { if (enp->en_family == EFX_FAMILY_FALCON) EFX_POPULATE_DWORD_2(dword, FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_DIS, FRF_AB_TC_TIMER_VAL, 0); else EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, FRF_CZ_TC_TIMER_VAL, 0); } else { uint32_t timer_val; /* Calculate the timer value in quanta */ us -= (us % EFX_EV_TIMER_QUANTUM); if (us < EFX_EV_TIMER_QUANTUM) us = EFX_EV_TIMER_QUANTUM; timer_val = us / EFX_EV_TIMER_QUANTUM; /* Moderation value is base 0 so we need to deduct 1 */ if (enp->en_family == EFX_FAMILY_FALCON) EFX_POPULATE_DWORD_2(dword, FRF_AB_TC_TIMER_MODE, FFE_AB_TIMER_MODE_INT_HLDOFF, FRF_AB_TIMER_VAL, timer_val - 1); else EFX_POPULATE_DWORD_2(dword, FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, FRF_CZ_TC_TIMER_VAL, timer_val - 1); } locked = (eep->ee_index == 0) ? 1 : 0; EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, eep->ee_index, &dword, locked); return (0); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } __checkReturn int efx_ev_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __deref_out efx_evq_t **eepp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); uint32_t size; efx_evq_t *eep; efx_oword_t oword; int rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit); if (!ISP2(n) || !(n & EFX_EVQ_NEVS_MASK)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_evq_limit) { rc = EINVAL; goto fail2; } #if EFSYS_OPT_RX_SCALE if (enp->en_intr.ei_type == EFX_INTR_LINE && index >= EFX_MAXRSS_LEGACY) { rc = EINVAL; goto fail3; } #endif for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS); size++) if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail4; } /* Allocate an EVQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); if (eep == NULL) { rc = ENOMEM; goto fail5; } eep->ee_magic = EFX_EVQ_MAGIC; eep->ee_enp = enp; eep->ee_index = index; eep->ee_mask = n - 1; eep->ee_esmp = esmp; /* Set up the handler table */ eep->ee_handler[FSE_AZ_EV_CODE_RX_EV] = efx_ev_rx; eep->ee_handler[FSE_AZ_EV_CODE_TX_EV] = efx_ev_tx; eep->ee_handler[FSE_AZ_EV_CODE_DRIVER_EV] = efx_ev_driver; eep->ee_handler[FSE_AZ_EV_CODE_GLOBAL_EV] = efx_ev_global; eep->ee_handler[FSE_AZ_EV_CODE_DRV_GEN_EV] = efx_ev_drv_gen; #if EFSYS_OPT_MCDI eep->ee_handler[FSE_AZ_EV_CODE_MCDI_EVRESPONSE] = efx_ev_mcdi; #endif /* EFSYS_OPT_SIENA */ /* Set up the new event queue */ if (enp->en_family != EFX_FAMILY_FALCON) { EFX_POPULATE_OWORD_1(oword, FRF_CZ_TIMER_Q_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword); } EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, FRF_AZ_EVQ_BUF_BASE_ID, id); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword); enp->en_ev_qcount++; *eepp = eep; return (0); fail5: EFSYS_PROBE(fail5); fail4: EFSYS_PROBE(fail4); #if EFSYS_OPT_RX_SCALE fail3: EFSYS_PROBE(fail3); #endif fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } +#if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock 67e9bdcd920059bd */ static const char __cs * __cs __efx_ev_qstat_name[] = { "all", "rx", "rx_ok", "rx_recovery", "rx_frm_trunc", "rx_tobe_disc", "rx_pause_frm_err", "rx_buf_owner_id_err", "rx_ipv4_hdr_chksum_err", "rx_tcp_udp_chksum_err", "rx_eth_crc_err", "rx_ip_frag_err", "rx_mcast_pkt", "rx_mcast_hash_match", "rx_tcp_ipv4", "rx_tcp_ipv6", "rx_udp_ipv4", "rx_udp_ipv6", "rx_other_ipv4", "rx_other_ipv6", "rx_non_ip", "rx_overrun", "tx", "tx_wq_ff_full", "tx_pkt_err", "tx_pkt_too_big", "tx_unexpected", "global", "global_phy", "global_mnt", "global_rx_recovery", "driver", "driver_srm_upd_done", "driver_tx_descq_fls_done", "driver_rx_descq_fls_done", "driver_rx_descq_fls_failed", "driver_rx_dsc_error", "driver_tx_dsc_error", "drv_gen", "mcdi_response", }; /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ const char __cs * efx_ev_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, EV_NQSTATS); return (__efx_ev_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ +#endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_QSTATS void efx_ev_qstats_update( __in efx_evq_t *eep, __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) { unsigned int id; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); for (id = 0; id < EV_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, eep->ee_stat[id]); eep->ee_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ void efx_ev_qdestroy( __in efx_evq_t *eep) { efx_nic_t *enp = eep->ee_enp; efx_oword_t oword; EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); EFSYS_ASSERT(enp->en_ev_qcount != 0); --enp->en_ev_qcount; /* Purge event queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, eep->ee_index, &oword); if (enp->en_family != EFX_FAMILY_FALCON) { EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword); } /* Free the EVQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); } void efx_ev_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); enp->en_mod_flags &= ~EFX_MOD_EV; } Index: projects/clang360-import/sys/dev/sfxge/common/efx_tx.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/common/efx_tx.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/common/efx_tx.c (revision 277896) @@ -1,433 +1,435 @@ /*- * Copyright 2007-2009 Solarflare Communications Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "efsys.h" #include "efx.h" #include "efx_types.h" #include "efx_regs.h" #include "efx_impl.h" #if EFSYS_OPT_QSTATS #define EFX_TX_QSTAT_INCR(_etp, _stat) \ do { \ (_etp)->et_stat[_stat]++; \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) #else #define EFX_TX_QSTAT_INCR(_etp, _stat) #endif __checkReturn int efx_tx_init( __in efx_nic_t *enp) { efx_oword_t oword; int rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); if (!(enp->en_mod_flags & EFX_MOD_EV)) { rc = EINVAL; goto fail1; } if (enp->en_mod_flags & EFX_MOD_TX) { rc = EINVAL; goto fail2; } EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); /* * Disable the timer-based TX DMA backoff and allow TX DMA to be * controlled by the RX FIFO fill level (although always allow a * minimal trickle). */ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); /* * Filter all packets less than 14 bytes to avoid parsing * errors. */ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword); /* * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0); EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword); enp->en_mod_flags |= EFX_MOD_TX; return (0); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } #if EFSYS_OPT_FILTER extern __checkReturn int efx_tx_filter_insert( __in efx_txq_t *etp, __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_dmaq_id = (uint16_t)etp->et_index; return efx_filter_insert_filter(etp->et_enp, spec, B_FALSE); } #endif #if EFSYS_OPT_FILTER extern __checkReturn int efx_tx_filter_remove( __in efx_txq_t *etp, __inout efx_filter_spec_t *spec) { EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT3P(spec, !=, NULL); spec->efs_dmaq_id = (uint16_t)etp->et_index; return efx_filter_remove_filter(etp->et_enp, spec); } #endif #define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \ do { \ unsigned int id; \ size_t offset; \ efx_qword_t qword; \ \ id = (_added)++ & (_etp)->et_mask; \ offset = id * sizeof (efx_qword_t); \ \ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \ unsigned int, id, efsys_dma_addr_t, (_addr), \ size_t, (_size), boolean_t, (_eop)); \ \ EFX_POPULATE_QWORD_4(qword, \ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \ FSF_AZ_TX_KER_BUF_ADDR_DW0, \ (uint32_t)((_addr) & 0xffffffff), \ FSF_AZ_TX_KER_BUF_ADDR_DW1, \ (uint32_t)((_addr) >> 32)); \ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \ \ _NOTE(CONSTANTCONDITION) \ } while (B_FALSE) __checkReturn int efx_tx_qpost( __in efx_txq_t *etp, __in_ecount(n) efx_buffer_t *eb, __in unsigned int n, __in unsigned int completed, __inout unsigned int *addedp) { unsigned int added = *addedp; unsigned int i; int rc = ENOSPC; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) goto fail1; for (i = 0; i < n; i++) { efx_buffer_t *ebp = &eb[i]; efsys_dma_addr_t start = ebp->eb_addr; size_t size = ebp->eb_size; efsys_dma_addr_t end = start + size; /* Fragments must not span 4k boundaries. */ EFSYS_ASSERT(P2ROUNDUP(start + 1, 4096) >= end); EFX_TX_DESC(etp, start, size, ebp->eb_eop, added); } EFX_TX_QSTAT_INCR(etp, TX_POST); *addedp = added; return (0); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } void efx_tx_qpush( __in efx_txq_t *etp, __in unsigned int added) { efx_nic_t *enp = etp->et_enp; uint32_t wptr; efx_dword_t dword; efx_oword_t oword; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */ EFSYS_PIO_WRITE_BARRIER(); /* Push the populated descriptors out */ wptr = added & etp->et_mask; EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr); /* Only write the third DWORD */ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3)); EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0, etp->et_index, &dword, B_FALSE); } void efx_tx_qflush( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; uint32_t label; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); label = etp->et_index; /* Flush the queue */ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, FRF_AZ_TX_FLUSH_DESCQ, label); EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword); } void efx_tx_qenable( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword); EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index, uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1), uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0)); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0); EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword); } __checkReturn int efx_tx_qcreate( __in efx_nic_t *enp, __in unsigned int index, __in unsigned int label, __in efsys_mem_t *esmp, __in size_t n, __in uint32_t id, __in uint16_t flags, __in efx_evq_t *eep, __deref_out efx_txq_t **etpp) { efx_nic_cfg_t *encp = &(enp->en_nic_cfg); efx_txq_t *etp; efx_oword_t oword; uint32_t size; int rc; EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH)); /* EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);*/ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit); if (!ISP2(n) || !(n & EFX_TXQ_NDESCS_MASK)) { rc = EINVAL; goto fail1; } if (index >= encp->enc_txq_limit) { rc = EINVAL; goto fail2; } for (size = 0; (1 << size) <= (EFX_TXQ_MAXNDESCS / EFX_TXQ_MINNDESCS); size++) if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS)) break; if (id + (1 << size) >= encp->enc_buftbl_limit) { rc = EINVAL; goto fail3; } /* Allocate an TXQ object */ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp); if (etp == NULL) { rc = ENOMEM; goto fail4; } etp->et_magic = EFX_TXQ_MAGIC; etp->et_enp = enp; etp->et_index = index; etp->et_mask = n - 1; etp->et_esmp = esmp; /* Set up the new descriptor queue */ EFX_POPULATE_OWORD_6(oword, FRF_AZ_TX_DESCQ_BUF_BASE_ID, id, FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index, FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_LABEL, label, FRF_AZ_TX_DESCQ_SIZE, size, FRF_AZ_TX_DESCQ_TYPE, 0); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS, (flags & EFX_CKSUM_IPV4) ? 0 : 1); EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS, (flags & EFX_CKSUM_TCPUDP) ? 0 : 1); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword); enp->en_tx_qcount++; *etpp = etp; return (0); fail4: EFSYS_PROBE(fail4); fail3: EFSYS_PROBE(fail3); fail2: EFSYS_PROBE(fail2); fail1: EFSYS_PROBE1(fail1, int, rc); return (rc); } +#if EFSYS_OPT_QSTATS #if EFSYS_OPT_NAMES /* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 78ca9ab00287fffb */ static const char __cs * __cs __efx_tx_qstat_name[] = { "post", "unaligned_split", }; /* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */ const char __cs * efx_tx_qstat_name( __in efx_nic_t *enp, __in unsigned int id) { _NOTE(ARGUNUSED(enp)) EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(id, <, TX_NQSTATS); return (__efx_tx_qstat_name[id]); } #endif /* EFSYS_OPT_NAMES */ +#endif /* EFSYS_OPT_QSTATS */ #if EFSYS_OPT_QSTATS void efx_tx_qstats_update( __in efx_txq_t *etp, __inout_ecount(TX_NQSTATS) efsys_stat_t *stat) { unsigned int id; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); for (id = 0; id < TX_NQSTATS; id++) { efsys_stat_t *essp = &stat[id]; EFSYS_STAT_INCR(essp, etp->et_stat[id]); etp->et_stat[id] = 0; } } #endif /* EFSYS_OPT_QSTATS */ void efx_tx_qdestroy( __in efx_txq_t *etp) { efx_nic_t *enp = etp->et_enp; efx_oword_t oword; EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC); EFSYS_ASSERT(enp->en_tx_qcount != 0); --enp->en_tx_qcount; /* Purge descriptor queue */ EFX_ZERO_OWORD(oword); EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL, etp->et_index, &oword); /* Free the TXQ object */ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp); } void efx_tx_fini( __in efx_nic_t *enp) { EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC); EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX); EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0); enp->en_mod_flags &= ~EFX_MOD_TX; } Index: projects/clang360-import/sys/dev/sfxge/sfxge.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge.c (revision 277896) @@ -1,814 +1,821 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" #include "sfxge_rx.h" #define SFXGE_CAP (IFCAP_VLAN_MTU | \ IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | \ IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE) #define SFXGE_CAP_ENABLE SFXGE_CAP #define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \ IFCAP_JUMBO_MTU | IFCAP_LINKSTATE) MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver"); SYSCTL_NODE(_hw, OID_AUTO, sfxge, CTLFLAG_RD, 0, "SFXGE driver parameters"); #define SFXGE_PARAM_RX_RING SFXGE_PARAM(rx_ring) static int sfxge_rx_ring_entries = SFXGE_NDESCS; TUNABLE_INT(SFXGE_PARAM_RX_RING, &sfxge_rx_ring_entries); SYSCTL_INT(_hw_sfxge, OID_AUTO, rx_ring, CTLFLAG_RDTUN, &sfxge_rx_ring_entries, 0, "Maximum number of descriptors in a receive ring"); #define SFXGE_PARAM_TX_RING SFXGE_PARAM(tx_ring) static int sfxge_tx_ring_entries = SFXGE_NDESCS; TUNABLE_INT(SFXGE_PARAM_TX_RING, &sfxge_tx_ring_entries); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_ring, CTLFLAG_RDTUN, &sfxge_tx_ring_entries, 0, "Maximum number of descriptors in a transmit ring"); static void sfxge_reset(void *arg, int npending); static int sfxge_start(struct sfxge_softc *sc) { int rc; sx_assert(&sc->softc_lock, LA_XLOCKED); if (sc->init_state == SFXGE_STARTED) return (0); if (sc->init_state != SFXGE_REGISTERED) { rc = EINVAL; goto fail; } if ((rc = efx_nic_init(sc->enp)) != 0) goto fail; /* Start processing interrupts. */ if ((rc = sfxge_intr_start(sc)) != 0) goto fail2; /* Start processing events. */ if ((rc = sfxge_ev_start(sc)) != 0) goto fail3; /* Start the receiver side. */ if ((rc = sfxge_rx_start(sc)) != 0) goto fail4; /* Start the transmitter side. */ if ((rc = sfxge_tx_start(sc)) != 0) goto fail5; /* Fire up the port. */ if ((rc = sfxge_port_start(sc)) != 0) goto fail6; sc->init_state = SFXGE_STARTED; /* Tell the stack we're running. */ sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING; sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE; return (0); fail6: sfxge_tx_stop(sc); fail5: sfxge_rx_stop(sc); fail4: sfxge_ev_stop(sc); fail3: sfxge_intr_stop(sc); fail2: efx_nic_fini(sc->enp); fail: device_printf(sc->dev, "sfxge_start: %d\n", rc); return (rc); } static void sfxge_if_init(void *arg) { struct sfxge_softc *sc; sc = (struct sfxge_softc *)arg; sx_xlock(&sc->softc_lock); (void)sfxge_start(sc); sx_xunlock(&sc->softc_lock); } static void sfxge_stop(struct sfxge_softc *sc) { sx_assert(&sc->softc_lock, LA_XLOCKED); if (sc->init_state != SFXGE_STARTED) return; sc->init_state = SFXGE_REGISTERED; /* Stop the port. */ sfxge_port_stop(sc); /* Stop the transmitter. */ sfxge_tx_stop(sc); /* Stop the receiver. */ sfxge_rx_stop(sc); /* Stop processing events. */ sfxge_ev_stop(sc); /* Stop processing interrupts. */ sfxge_intr_stop(sc); efx_nic_fini(sc->enp); sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; } static int sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) { struct sfxge_softc *sc; struct ifreq *ifr; int error; ifr = (struct ifreq *)data; sc = ifp->if_softc; error = 0; switch (command) { case SIOCSIFFLAGS: sx_xlock(&sc->softc_lock); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { sfxge_mac_filter_set(sc); } } else sfxge_start(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) sfxge_stop(sc); sc->if_flags = ifp->if_flags; sx_xunlock(&sc->softc_lock); break; case SIOCSIFMTU: if (ifr->ifr_mtu == ifp->if_mtu) { /* Nothing to do */ error = 0; } else if (ifr->ifr_mtu > SFXGE_MAX_MTU) { error = EINVAL; } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_mtu = ifr->ifr_mtu; error = 0; } else { /* Restart required */ sx_xlock(&sc->softc_lock); sfxge_stop(sc); ifp->if_mtu = ifr->ifr_mtu; error = sfxge_start(sc); sx_xunlock(&sc->softc_lock); if (error != 0) { ifp->if_flags &= ~IFF_UP; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; if_down(ifp); } } break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) sfxge_mac_filter_set(sc); break; case SIOCSIFCAP: sx_xlock(&sc->softc_lock); /* * The networking core already rejects attempts to * enable capabilities we don't have. We still have * to reject attempts to disable capabilities that we * can't (yet) disable. */ if (~ifr->ifr_reqcap & SFXGE_CAP_FIXED) { error = EINVAL; sx_xunlock(&sc->softc_lock); break; } ifp->if_capenable = ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP); else ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP); if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; sx_xunlock(&sc->softc_lock); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static void sfxge_ifnet_fini(struct ifnet *ifp) { struct sfxge_softc *sc = ifp->if_softc; sx_xlock(&sc->softc_lock); sfxge_stop(sc); sx_xunlock(&sc->softc_lock); ifmedia_removeall(&sc->media); ether_ifdetach(ifp); if_free(ifp); } static int sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp); device_t dev; int rc; dev = sc->dev; sc->ifnet = ifp; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = sfxge_if_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sfxge_if_ioctl; ifp->if_capabilities = SFXGE_CAP; ifp->if_capenable = SFXGE_CAP_ENABLE; ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO; ether_ifattach(ifp, encp->enc_mac_addr); #ifdef SFXGE_HAVE_MQ ifp->if_transmit = sfxge_if_transmit; ifp->if_qflush = sfxge_if_qflush; #else ifp->if_start = sfxge_if_start; IFQ_SET_MAXLEN(&ifp->if_snd, sc->txq_entries - 1); ifp->if_snd.ifq_drv_maxlen = sc->txq_entries - 1; IFQ_SET_READY(&ifp->if_snd); mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF); #endif if ((rc = sfxge_port_ifmedia_init(sc)) != 0) goto fail; return (0); fail: ether_ifdetach(sc->ifnet); return (rc); } void sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp) { KASSERT(sc->buffer_table_next + n <= efx_nic_cfg_get(sc->enp)->enc_buftbl_limit, ("buffer table full")); *idp = sc->buffer_table_next; sc->buffer_table_next += n; } static int sfxge_bar_init(struct sfxge_softc *sc) { efsys_bar_t *esbp = &sc->bar; esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR); if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &esbp->esb_rid, RF_ACTIVE)) == NULL) { device_printf(sc->dev, "Cannot allocate BAR region %d\n", EFX_MEM_BAR); return (ENXIO); } esbp->esb_tag = rman_get_bustag(esbp->esb_res); esbp->esb_handle = rman_get_bushandle(esbp->esb_res); mtx_init(&esbp->esb_lock, "sfxge_efsys_bar", NULL, MTX_DEF); return (0); } static void sfxge_bar_fini(struct sfxge_softc *sc) { efsys_bar_t *esbp = &sc->bar; bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid, esbp->esb_res); mtx_destroy(&esbp->esb_lock); } static int sfxge_create(struct sfxge_softc *sc) { device_t dev; efx_nic_t *enp; int error; + char rss_param_name[sizeof(SFXGE_PARAM(%d.max_rss_channels))]; dev = sc->dev; sx_init(&sc->softc_lock, "sfxge_softc"); + + sc->max_rss_channels = 0; + snprintf(rss_param_name, sizeof(rss_param_name), + SFXGE_PARAM(%d.max_rss_channels), + (int)device_get_unit(dev)); + TUNABLE_INT_FETCH(rss_param_name, &sc->max_rss_channels); sc->stats_node = SYSCTL_ADD_NODE( device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics"); if (sc->stats_node == NULL) { error = ENOMEM; goto fail; } TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc); (void) pci_enable_busmaster(dev); /* Initialize DMA mappings. */ if ((error = sfxge_dma_init(sc)) != 0) goto fail; /* Map the device registers. */ if ((error = sfxge_bar_init(sc)) != 0) goto fail; error = efx_family(pci_get_vendor(dev), pci_get_device(dev), &sc->family); KASSERT(error == 0, ("Family should be filtered by sfxge_probe()")); /* Create the common code nic object. */ mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF); if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc, &sc->bar, &sc->enp_lock, &enp)) != 0) goto fail3; sc->enp = enp; if (!ISP2(sfxge_rx_ring_entries) || !(sfxge_rx_ring_entries & EFX_RXQ_NDESCS_MASK)) { log(LOG_ERR, "%s=%d must be power of 2 from %u to %u", SFXGE_PARAM_RX_RING, sfxge_rx_ring_entries, EFX_RXQ_MINNDESCS, EFX_RXQ_MAXNDESCS); error = EINVAL; goto fail_rx_ring_entries; } sc->rxq_entries = sfxge_rx_ring_entries; if (!ISP2(sfxge_tx_ring_entries) || !(sfxge_tx_ring_entries & EFX_TXQ_NDESCS_MASK)) { log(LOG_ERR, "%s=%d must be power of 2 from %u to %u", SFXGE_PARAM_TX_RING, sfxge_tx_ring_entries, EFX_TXQ_MINNDESCS, EFX_TXQ_MAXNDESCS); error = EINVAL; goto fail_tx_ring_entries; } sc->txq_entries = sfxge_tx_ring_entries; /* Initialize MCDI to talk to the microcontroller. */ if ((error = sfxge_mcdi_init(sc)) != 0) goto fail4; /* Probe the NIC and build the configuration data area. */ if ((error = efx_nic_probe(enp)) != 0) goto fail5; /* Initialize the NVRAM. */ if ((error = efx_nvram_init(enp)) != 0) goto fail6; /* Initialize the VPD. */ if ((error = efx_vpd_init(enp)) != 0) goto fail7; /* Reset the NIC. */ if ((error = efx_nic_reset(enp)) != 0) goto fail8; /* Initialize buffer table allocation. */ sc->buffer_table_next = 0; /* Set up interrupts. */ if ((error = sfxge_intr_init(sc)) != 0) goto fail8; /* Initialize event processing state. */ if ((error = sfxge_ev_init(sc)) != 0) goto fail11; /* Initialize receive state. */ if ((error = sfxge_rx_init(sc)) != 0) goto fail12; /* Initialize transmit state. */ if ((error = sfxge_tx_init(sc)) != 0) goto fail13; /* Initialize port state. */ if ((error = sfxge_port_init(sc)) != 0) goto fail14; sc->init_state = SFXGE_INITIALIZED; return (0); fail14: sfxge_tx_fini(sc); fail13: sfxge_rx_fini(sc); fail12: sfxge_ev_fini(sc); fail11: sfxge_intr_fini(sc); fail8: efx_vpd_fini(enp); fail7: efx_nvram_fini(enp); fail6: efx_nic_unprobe(enp); fail5: sfxge_mcdi_fini(sc); fail4: fail_tx_ring_entries: fail_rx_ring_entries: sc->enp = NULL; efx_nic_destroy(enp); mtx_destroy(&sc->enp_lock); fail3: sfxge_bar_fini(sc); (void) pci_disable_busmaster(sc->dev); fail: sc->dev = NULL; sx_destroy(&sc->softc_lock); return (error); } static void sfxge_destroy(struct sfxge_softc *sc) { efx_nic_t *enp; /* Clean up port state. */ sfxge_port_fini(sc); /* Clean up transmit state. */ sfxge_tx_fini(sc); /* Clean up receive state. */ sfxge_rx_fini(sc); /* Clean up event processing state. */ sfxge_ev_fini(sc); /* Clean up interrupts. */ sfxge_intr_fini(sc); /* Tear down common code subsystems. */ efx_nic_reset(sc->enp); efx_vpd_fini(sc->enp); efx_nvram_fini(sc->enp); efx_nic_unprobe(sc->enp); /* Tear down MCDI. */ sfxge_mcdi_fini(sc); /* Destroy common code context. */ enp = sc->enp; sc->enp = NULL; efx_nic_destroy(enp); /* Free DMA memory. */ sfxge_dma_fini(sc); /* Free mapped BARs. */ sfxge_bar_fini(sc); (void) pci_disable_busmaster(sc->dev); taskqueue_drain(taskqueue_thread, &sc->task_reset); /* Destroy the softc lock. */ sx_destroy(&sc->softc_lock); } static int sfxge_vpd_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; efx_vpd_value_t value; int rc; value.evv_tag = arg2 >> 16; value.evv_keyword = arg2 & 0xffff; if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value)) != 0) return (rc); return (SYSCTL_OUT(req, value.evv_value, value.evv_length)); } static void sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list, efx_vpd_tag_t tag, const char *keyword) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); efx_vpd_value_t value; /* Check whether VPD tag/keyword is present */ value.evv_tag = tag; value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]); if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0) return; SYSCTL_ADD_PROC( ctx, list, OID_AUTO, keyword, CTLTYPE_STRING|CTLFLAG_RD, sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]), sfxge_vpd_handler, "A", ""); } static int sfxge_vpd_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *vpd_node; struct sysctl_oid_list *vpd_list; char keyword[3]; efx_vpd_value_t value; int rc; if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0) goto fail; sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK); if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0) goto fail2; /* Copy ID (product name) into device description, and log it. */ value.evv_tag = EFX_VPD_ID; if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) { value.evv_value[value.evv_length] = 0; device_set_desc_copy(sc->dev, value.evv_value); device_printf(sc->dev, "%s\n", value.evv_value); } vpd_node = SYSCTL_ADD_NODE( ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "vpd", CTLFLAG_RD, NULL, "Vital Product Data"); vpd_list = SYSCTL_CHILDREN(vpd_node); /* Add sysctls for all expected and any vendor-defined keywords. */ sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN"); sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC"); sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN"); keyword[0] = 'V'; keyword[2] = 0; for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++) sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword); for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++) sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword); return (0); fail2: free(sc->vpd_data, M_SFXGE); fail: return (rc); } static void sfxge_vpd_fini(struct sfxge_softc *sc) { free(sc->vpd_data, M_SFXGE); } static void sfxge_reset(void *arg, int npending) { struct sfxge_softc *sc; int rc; (void)npending; sc = (struct sfxge_softc *)arg; sx_xlock(&sc->softc_lock); if (sc->init_state != SFXGE_STARTED) goto done; sfxge_stop(sc); efx_nic_reset(sc->enp); if ((rc = sfxge_start(sc)) != 0) device_printf(sc->dev, "reset failed (%d); interface is now stopped\n", rc); done: sx_xunlock(&sc->softc_lock); } void sfxge_schedule_reset(struct sfxge_softc *sc) { taskqueue_enqueue(taskqueue_thread, &sc->task_reset); } static int sfxge_attach(device_t dev) { struct sfxge_softc *sc; struct ifnet *ifp; int error; sc = device_get_softc(dev); sc->dev = dev; /* Allocate ifnet. */ ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Couldn't allocate ifnet\n"); error = ENOMEM; goto fail; } sc->ifnet = ifp; /* Initialize hardware. */ if ((error = sfxge_create(sc)) != 0) goto fail2; /* Create the ifnet for the port. */ if ((error = sfxge_ifnet_init(ifp, sc)) != 0) goto fail3; if ((error = sfxge_vpd_init(sc)) != 0) goto fail4; sc->init_state = SFXGE_REGISTERED; return (0); fail4: sfxge_ifnet_fini(ifp); fail3: sfxge_destroy(sc); fail2: if_free(sc->ifnet); fail: return (error); } static int sfxge_detach(device_t dev) { struct sfxge_softc *sc; sc = device_get_softc(dev); sfxge_vpd_fini(sc); /* Destroy the ifnet. */ sfxge_ifnet_fini(sc->ifnet); /* Tear down hardware. */ sfxge_destroy(sc); return (0); } static int sfxge_probe(device_t dev) { uint16_t pci_vendor_id; uint16_t pci_device_id; efx_family_t family; int rc; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); rc = efx_family(pci_vendor_id, pci_device_id, &family); if (rc != 0) return (ENXIO); KASSERT(family == EFX_FAMILY_SIENA, ("impossible controller family")); device_set_desc(dev, "Solarflare SFC9000 family"); return (0); } static device_method_t sfxge_methods[] = { DEVMETHOD(device_probe, sfxge_probe), DEVMETHOD(device_attach, sfxge_attach), DEVMETHOD(device_detach, sfxge_detach), DEVMETHOD_END }; static devclass_t sfxge_devclass; static driver_t sfxge_driver = { "sfxge", sfxge_methods, sizeof(struct sfxge_softc) }; DRIVER_MODULE(sfxge, pci, sfxge_driver, sfxge_devclass, 0, 0); Index: projects/clang360-import/sys/dev/sfxge/sfxge.h =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge.h (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge.h (revision 277896) @@ -1,314 +1,317 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SFXGE_H #define _SFXGE_H #include #include #include #include #include #include #include #include #include #include #include #include /* * Backward-compatibility */ #ifndef CACHE_LINE_SIZE /* This should be right on most machines the driver will be used on, and * we needn't care too much about wasting a few KB per interface. */ #define CACHE_LINE_SIZE 128 #endif #ifndef IFCAP_LINKSTATE #define IFCAP_LINKSTATE 0 #endif #ifndef IFCAP_VLAN_HWTSO #define IFCAP_VLAN_HWTSO 0 #endif #ifndef IFM_10G_T #define IFM_10G_T IFM_UNKNOWN #endif #ifndef IFM_10G_KX4 #define IFM_10G_KX4 IFM_10G_CX4 #endif #if __FreeBSD_version >= 800054 /* Networking core is multiqueue aware. We can manage our own TX * queues and use m_pkthdr.flowid. */ #define SFXGE_HAVE_MQ #endif #if (__FreeBSD_version >= 800501 && __FreeBSD_version < 900000) || \ __FreeBSD_version >= 900003 #define SFXGE_HAVE_DESCRIBE_INTR #endif #ifdef IFM_ETH_RXPAUSE #define SFXGE_HAVE_PAUSE_MEDIAOPTS #endif #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #endif #include "sfxge_rx.h" #include "sfxge_tx.h" #define ROUNDUP_POW_OF_TWO(_n) (1ULL << flsl((_n) - 1)) #define SFXGE_IP_ALIGN 2 #define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */ enum sfxge_evq_state { SFXGE_EVQ_UNINITIALIZED = 0, SFXGE_EVQ_INITIALIZED, SFXGE_EVQ_STARTING, SFXGE_EVQ_STARTED }; #define SFXGE_EV_BATCH 16384 struct sfxge_evq { - struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE); - struct mtx lock __aligned(CACHE_LINE_SIZE); - - enum sfxge_evq_state init_state; + /* Structure members below are sorted by usage order */ + struct sfxge_softc *sc; + struct mtx lock; unsigned int index; - unsigned int entries; + enum sfxge_evq_state init_state; efsys_mem_t mem; - unsigned int buf_base_id; - - boolean_t exception; - efx_evq_t *common; unsigned int read_ptr; + boolean_t exception; unsigned int rx_done; unsigned int tx_done; /* Linked list of TX queues with completions to process */ struct sfxge_txq *txq; struct sfxge_txq **txqs; -}; + /* Structure members not used on event processing path */ + unsigned int buf_base_id; + unsigned int entries; +} __aligned(CACHE_LINE_SIZE); + #define SFXGE_NDESCS 1024 #define SFXGE_MODERATION 30 enum sfxge_intr_state { SFXGE_INTR_UNINITIALIZED = 0, SFXGE_INTR_INITIALIZED, SFXGE_INTR_TESTING, SFXGE_INTR_STARTED }; struct sfxge_intr_hdl { int eih_rid; void *eih_tag; struct resource *eih_res; }; struct sfxge_intr { enum sfxge_intr_state state; struct resource *msix_res; struct sfxge_intr_hdl *table; int n_alloc; int type; efsys_mem_t status; uint32_t zero_count; }; enum sfxge_mcdi_state { SFXGE_MCDI_UNINITIALIZED = 0, SFXGE_MCDI_INITIALIZED, SFXGE_MCDI_BUSY, SFXGE_MCDI_COMPLETED }; struct sfxge_mcdi { struct mtx lock; struct cv cv; enum sfxge_mcdi_state state; efx_mcdi_transport_t transport; }; struct sfxge_hw_stats { clock_t update_time; efsys_mem_t dma_buf; void *decode_buf; }; enum sfxge_port_state { SFXGE_PORT_UNINITIALIZED = 0, SFXGE_PORT_INITIALIZED, SFXGE_PORT_STARTED }; struct sfxge_port { struct sfxge_softc *sc; struct mtx lock; enum sfxge_port_state init_state; #ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS unsigned int wanted_fc; #endif struct sfxge_hw_stats phy_stats; struct sfxge_hw_stats mac_stats; efx_link_mode_t link_mode; }; enum sfxge_softc_state { SFXGE_UNINITIALIZED = 0, SFXGE_INITIALIZED, SFXGE_REGISTERED, SFXGE_STARTED }; struct sfxge_softc { device_t dev; struct sx softc_lock; enum sfxge_softc_state init_state; struct ifnet *ifnet; unsigned int if_flags; struct sysctl_oid *stats_node; struct sysctl_oid *txqs_node; struct task task_reset; efx_family_t family; caddr_t vpd_data; size_t vpd_size; efx_nic_t *enp; struct mtx enp_lock; unsigned int rxq_entries; unsigned int txq_entries; bus_dma_tag_t parent_dma_tag; efsys_bar_t bar; struct sfxge_intr intr; struct sfxge_mcdi mcdi; struct sfxge_port port; uint32_t buffer_table_next; struct sfxge_evq *evq[SFXGE_RX_SCALE_MAX]; unsigned int ev_moderation; +#if EFSYS_OPT_QSTATS clock_t ev_stats_update_time; uint64_t ev_stats[EV_NQSTATS]; +#endif + unsigned int max_rss_channels; uma_zone_t rxq_cache; struct sfxge_rxq *rxq[SFXGE_RX_SCALE_MAX]; unsigned int rx_indir_table[SFXGE_RX_SCALE_MAX]; #ifdef SFXGE_HAVE_MQ struct sfxge_txq *txq[SFXGE_TXQ_NTYPES + SFXGE_RX_SCALE_MAX]; #else struct sfxge_txq *txq[SFXGE_TXQ_NTYPES]; #endif struct ifmedia media; size_t rx_prefix_size; size_t rx_buffer_size; uma_zone_t rx_buffer_zone; #ifndef SFXGE_HAVE_MQ struct mtx tx_lock __aligned(CACHE_LINE_SIZE); #endif }; #define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN) #define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING) #define SFXGE_PARAM(_name) "hw.sfxge." #_name SYSCTL_DECL(_hw_sfxge); /* * From sfxge.c. */ extern void sfxge_schedule_reset(struct sfxge_softc *sc); extern void sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp); /* * From sfxge_dma.c. */ extern int sfxge_dma_init(struct sfxge_softc *sc); extern void sfxge_dma_fini(struct sfxge_softc *sc); extern int sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp); extern void sfxge_dma_free(efsys_mem_t *esmp); extern int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs); /* * From sfxge_ev.c. */ extern int sfxge_ev_init(struct sfxge_softc *sc); extern void sfxge_ev_fini(struct sfxge_softc *sc); extern int sfxge_ev_start(struct sfxge_softc *sc); extern void sfxge_ev_stop(struct sfxge_softc *sc); -extern int sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index); +extern int sfxge_ev_qpoll(struct sfxge_evq *evq); /* * From sfxge_intr.c. */ extern int sfxge_intr_init(struct sfxge_softc *sc); extern void sfxge_intr_fini(struct sfxge_softc *sc); extern int sfxge_intr_start(struct sfxge_softc *sc); extern void sfxge_intr_stop(struct sfxge_softc *sc); /* * From sfxge_mcdi.c. */ extern int sfxge_mcdi_init(struct sfxge_softc *sc); extern void sfxge_mcdi_fini(struct sfxge_softc *sc); /* * From sfxge_port.c. */ extern int sfxge_port_init(struct sfxge_softc *sc); extern void sfxge_port_fini(struct sfxge_softc *sc); extern int sfxge_port_start(struct sfxge_softc *sc); extern void sfxge_port_stop(struct sfxge_softc *sc); extern void sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode); extern int sfxge_mac_filter_set(struct sfxge_softc *sc); extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc); #define SFXGE_MAX_MTU (9 * 1024) #endif /* _SFXGE_H */ Index: projects/clang360-import/sys/dev/sfxge/sfxge_dma.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_dma.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_dma.c (revision 277896) @@ -1,209 +1,209 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" static void sfxge_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; addr = arg; if (error != 0) { *addr = 0; return; } *addr = segs[0].ds_addr; } int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs) { bus_dma_segment_t *psegs; struct mbuf *m; int seg_count; int defragged; int err; m = *mp; defragged = err = seg_count = 0; KASSERT(m->m_pkthdr.len, ("packet has zero header length")); retry: psegs = segs; seg_count = 0; if (m->m_next == NULL) { sfxge_map_mbuf_fast(tag, map, m, segs); *nsegs = 1; return (0); } #if defined(__i386__) || defined(__amd64__) while (m != NULL && seg_count < maxsegs) { /* * firmware doesn't like empty segments */ if (m->m_len != 0) { seg_count++; sfxge_map_mbuf_fast(tag, map, m, psegs); psegs++; } m = m->m_next; } #else err = bus_dmamap_load_mbuf_sg(tag, map, *mp, segs, &seg_count, 0); #endif if (seg_count == 0) { err = EFBIG; goto err_out; } else if (err == EFBIG || seg_count >= maxsegs) { if (!defragged) { m = m_defrag(*mp, M_NOWAIT); if (m == NULL) { err = ENOBUFS; goto err_out; } *mp = m; defragged = 1; goto retry; } err = EFBIG; goto err_out; } *nsegs = seg_count; err_out: return (err); } void sfxge_dma_free(efsys_mem_t *esmp) { bus_dmamap_unload(esmp->esm_tag, esmp->esm_map); bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map); bus_dma_tag_destroy(esmp->esm_tag); esmp->esm_addr = 0; esmp->esm_base = NULL; } int sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp) { void *vaddr; /* Create the child DMA tag. */ if (bus_dma_tag_create(sc->parent_dma_tag, PAGE_SIZE, 0, MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, &esmp->esm_tag) != 0) { device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); return (ENOMEM); } /* Allocate kernel memory. */ if (bus_dmamem_alloc(esmp->esm_tag, (void **)&vaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &esmp->esm_map) != 0) { device_printf(sc->dev, "Couldn't allocate DMA memory\n"); bus_dma_tag_destroy(esmp->esm_tag); return (ENOMEM); } /* Load map into device memory. */ if (bus_dmamap_load(esmp->esm_tag, esmp->esm_map, vaddr, len, sfxge_dma_cb, &esmp->esm_addr, 0) != 0) { device_printf(sc->dev, "Couldn't load DMA mapping\n"); - bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map); + bus_dmamem_free(esmp->esm_tag, vaddr, esmp->esm_map); bus_dma_tag_destroy(esmp->esm_tag); return (ENOMEM); } /* * The callback gets error information about the mapping * and will have set esm_addr to 0 if something went * wrong. */ if (esmp->esm_addr == 0) { bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map); bus_dma_tag_destroy(esmp->esm_tag); return (ENOMEM); } esmp->esm_base = vaddr; return (0); } void sfxge_dma_fini(struct sfxge_softc *sc) { bus_dma_tag_destroy(sc->parent_dma_tag); } int sfxge_dma_init(struct sfxge_softc *sc) { /* Create the parent dma tag. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock, lockarg */ &sc->parent_dma_tag) != 0) { device_printf(sc->dev, "Cannot allocate parent DMA tag\n"); return (ENOMEM); } return (0); } Index: projects/clang360-import/sys/dev/sfxge/sfxge_ev.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_ev.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_ev.c (revision 277896) @@ -1,898 +1,903 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" static void sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop) { struct sfxge_softc *sc; unsigned int index; struct sfxge_rxq *rxq; struct sfxge_txq *txq; sc = evq->sc; index = evq->index; rxq = sc->rxq[index]; if ((txq = evq->txq) != NULL) { evq->txq = NULL; evq->txqs = &(evq->txq); do { struct sfxge_txq *next; next = txq->next; txq->next = NULL; KASSERT(txq->evq_index == index, ("txq->evq_index != index")); if (txq->pending != txq->completed) - sfxge_tx_qcomplete(txq); + sfxge_tx_qcomplete(txq, evq); txq = next; } while (txq != NULL); } if (rxq->pending != rxq->completed) sfxge_rx_qcomplete(rxq, eop); } static boolean_t sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size, uint16_t flags) { struct sfxge_evq *evq; struct sfxge_softc *sc; struct sfxge_rxq *rxq; unsigned int expected; struct sfxge_rx_sw_desc *rx_desc; evq = arg; sc = evq->sc; if (evq->exception) goto done; rxq = sc->rxq[label]; KASSERT(rxq != NULL, ("rxq == NULL")); KASSERT(evq->index == rxq->index, ("evq->index != rxq->index")); if (rxq->init_state != SFXGE_RXQ_STARTED) goto done; expected = rxq->pending++ & rxq->ptr_mask; if (id != expected) { evq->exception = B_TRUE; device_printf(sc->dev, "RX completion out of order" " (id=%#x expected=%#x flags=%#x); resetting\n", id, expected, flags); sfxge_schedule_reset(sc); goto done; } rx_desc = &rxq->queue[id]; KASSERT(rx_desc->flags == EFX_DISCARD, ("rx_desc->flags != EFX_DISCARD")); rx_desc->flags = flags; KASSERT(size < (1 << 16), ("size > (1 << 16)")); rx_desc->size = (uint16_t)size; prefetch_read_many(rx_desc->mbuf); evq->rx_done++; if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH) sfxge_ev_qcomplete(evq, B_FALSE); done: return (evq->rx_done >= SFXGE_EV_BATCH); } static boolean_t sfxge_ev_exception(void *arg, uint32_t code, uint32_t data) { struct sfxge_evq *evq; struct sfxge_softc *sc; evq = (struct sfxge_evq *)arg; sc = evq->sc; evq->exception = B_TRUE; if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) { device_printf(sc->dev, "hardware exception (code=%u); resetting\n", code); sfxge_schedule_reset(sc); } return (B_FALSE); } static boolean_t sfxge_ev_rxq_flush_done(void *arg, uint32_t rxq_index) { struct sfxge_evq *evq; struct sfxge_softc *sc; struct sfxge_rxq *rxq; unsigned int index; unsigned int label; uint16_t magic; evq = (struct sfxge_evq *)arg; sc = evq->sc; rxq = sc->rxq[rxq_index]; KASSERT(rxq != NULL, ("rxq == NULL")); /* Resend a software event on the correct queue */ index = rxq->index; evq = sc->evq[index]; label = rxq_index; KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label, ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level")); magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label; KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); efx_ev_qpost(evq->common, magic); return (B_FALSE); } static boolean_t sfxge_ev_rxq_flush_failed(void *arg, uint32_t rxq_index) { struct sfxge_evq *evq; struct sfxge_softc *sc; struct sfxge_rxq *rxq; unsigned int index; unsigned int label; uint16_t magic; evq = (struct sfxge_evq *)arg; sc = evq->sc; rxq = sc->rxq[rxq_index]; KASSERT(rxq != NULL, ("rxq == NULL")); /* Resend a software event on the correct queue */ index = rxq->index; evq = sc->evq[index]; label = rxq_index; KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label, ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label")); magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label; KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); efx_ev_qpost(evq->common, magic); return (B_FALSE); } static struct sfxge_txq * sfxge_get_txq_by_label(struct sfxge_evq *evq, enum sfxge_txq_type label) { unsigned int index; KASSERT((evq->index == 0 && label < SFXGE_TXQ_NTYPES) || (label == SFXGE_TXQ_IP_TCP_UDP_CKSUM), ("unexpected txq label")); index = (evq->index == 0) ? label : (evq->index - 1 + SFXGE_TXQ_NTYPES); return (evq->sc->txq[index]); } static boolean_t sfxge_ev_tx(void *arg, uint32_t label, uint32_t id) { struct sfxge_evq *evq; struct sfxge_txq *txq; unsigned int stop; unsigned int delta; evq = (struct sfxge_evq *)arg; txq = sfxge_get_txq_by_label(evq, label); KASSERT(txq != NULL, ("txq == NULL")); KASSERT(evq->index == txq->evq_index, ("evq->index != txq->evq_index")); if (txq->init_state != SFXGE_TXQ_STARTED) goto done; stop = (id + 1) & txq->ptr_mask; id = txq->pending & txq->ptr_mask; delta = (stop >= id) ? (stop - id) : (txq->entries - id + stop); txq->pending += delta; evq->tx_done++; if (txq->next == NULL && evq->txqs != &(txq->next)) { *(evq->txqs) = txq; evq->txqs = &(txq->next); } if (txq->pending - txq->completed >= SFXGE_TX_BATCH) - sfxge_tx_qcomplete(txq); + sfxge_tx_qcomplete(txq, evq); done: return (evq->tx_done >= SFXGE_EV_BATCH); } static boolean_t sfxge_ev_txq_flush_done(void *arg, uint32_t txq_index) { struct sfxge_evq *evq; struct sfxge_softc *sc; struct sfxge_txq *txq; unsigned int label; uint16_t magic; evq = (struct sfxge_evq *)arg; sc = evq->sc; txq = sc->txq[txq_index]; KASSERT(txq != NULL, ("txq == NULL")); KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq not initialized")); /* Resend a software event on the correct queue */ evq = sc->evq[txq->evq_index]; label = txq->type; KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label, ("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label")); magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label; KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq not started")); efx_ev_qpost(evq->common, magic); return (B_FALSE); } static boolean_t sfxge_ev_software(void *arg, uint16_t magic) { struct sfxge_evq *evq; struct sfxge_softc *sc; unsigned int label; evq = (struct sfxge_evq *)arg; sc = evq->sc; label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK; magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK; switch (magic) { case SFXGE_MAGIC_RX_QFLUSH_DONE: { struct sfxge_rxq *rxq = sc->rxq[label]; KASSERT(rxq != NULL, ("rxq == NULL")); KASSERT(evq->index == rxq->index, ("evq->index != rxq->index")); sfxge_rx_qflush_done(rxq); break; } case SFXGE_MAGIC_RX_QFLUSH_FAILED: { struct sfxge_rxq *rxq = sc->rxq[label]; KASSERT(rxq != NULL, ("rxq == NULL")); KASSERT(evq->index == rxq->index, ("evq->index != rxq->index")); sfxge_rx_qflush_failed(rxq); break; } case SFXGE_MAGIC_RX_QREFILL: { struct sfxge_rxq *rxq = sc->rxq[label]; KASSERT(rxq != NULL, ("rxq == NULL")); KASSERT(evq->index == rxq->index, ("evq->index != rxq->index")); sfxge_rx_qrefill(rxq); break; } case SFXGE_MAGIC_TX_QFLUSH_DONE: { struct sfxge_txq *txq = sfxge_get_txq_by_label(evq, label); KASSERT(txq != NULL, ("txq == NULL")); KASSERT(evq->index == txq->evq_index, ("evq->index != txq->evq_index")); sfxge_tx_qflush_done(txq); break; } default: break; } return (B_FALSE); } static boolean_t sfxge_ev_sram(void *arg, uint32_t code) { (void)arg; (void)code; switch (code) { case EFX_SRAM_UPDATE: EFSYS_PROBE(sram_update); break; case EFX_SRAM_CLEAR: EFSYS_PROBE(sram_clear); break; case EFX_SRAM_ILLEGAL_CLEAR: EFSYS_PROBE(sram_illegal_clear); break; default: KASSERT(B_FALSE, ("Impossible SRAM event")); break; } return (B_FALSE); } static boolean_t sfxge_ev_timer(void *arg, uint32_t index) { (void)arg; (void)index; return (B_FALSE); } static boolean_t sfxge_ev_wake_up(void *arg, uint32_t index) { (void)arg; (void)index; return (B_FALSE); } +#if EFSYS_OPT_QSTATS + static void sfxge_ev_stat_update(struct sfxge_softc *sc) { struct sfxge_evq *evq; unsigned int index; clock_t now; sx_xlock(&sc->softc_lock); if (sc->evq[0]->init_state != SFXGE_EVQ_STARTED) goto out; now = ticks; if (now - sc->ev_stats_update_time < hz) goto out; sc->ev_stats_update_time = now; /* Add event counts from each event queue in turn */ for (index = 0; index < sc->intr.n_alloc; index++) { evq = sc->evq[index]; mtx_lock(&evq->lock); efx_ev_qstats_update(evq->common, sc->ev_stats); mtx_unlock(&evq->lock); } out: sx_xunlock(&sc->softc_lock); } static int sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; sfxge_ev_stat_update(sc); return (SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id]))); } static void sfxge_ev_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; char name[40]; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < EV_NQSTATS; id++) { snprintf(name, sizeof(name), "ev_%s", efx_ev_qstat_name(sc->enp, id)); SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD, sc, id, sfxge_ev_stat_handler, "Q", ""); } } +#endif /* EFSYS_OPT_QSTATS */ + static void sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us) { struct sfxge_evq *evq; efx_evq_t *eep; evq = sc->evq[idx]; eep = evq->common; KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); (void)efx_ev_qmoderate(eep, us); } static int sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; struct sfxge_intr *intr = &sc->intr; unsigned int moderation; int error; int index; sx_xlock(&sc->softc_lock); if (req->newptr != NULL) { if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation))) != 0) goto out; /* We may not be calling efx_ev_qmoderate() now, * so we have to range-check the value ourselves. */ if (moderation > efx_nic_cfg_get(sc->enp)->enc_evq_moderation_max) { error = EINVAL; goto out; } sc->ev_moderation = moderation; if (intr->state == SFXGE_INTR_STARTED) { for (index = 0; index < intr->n_alloc; index++) sfxge_ev_qmoderate(sc, index, moderation); } } else { error = SYSCTL_OUT(req, &sc->ev_moderation, sizeof(sc->ev_moderation)); } out: sx_xunlock(&sc->softc_lock); return (error); } static boolean_t sfxge_ev_initialized(void *arg) { struct sfxge_evq *evq; evq = (struct sfxge_evq *)arg; KASSERT(evq->init_state == SFXGE_EVQ_STARTING, ("evq not starting")); evq->init_state = SFXGE_EVQ_STARTED; return (0); } static boolean_t sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode) { struct sfxge_evq *evq; struct sfxge_softc *sc; evq = (struct sfxge_evq *)arg; sc = evq->sc; sfxge_mac_link_update(sc, link_mode); return (0); } static const efx_ev_callbacks_t sfxge_ev_callbacks = { .eec_initialized = sfxge_ev_initialized, .eec_rx = sfxge_ev_rx, .eec_tx = sfxge_ev_tx, .eec_exception = sfxge_ev_exception, .eec_rxq_flush_done = sfxge_ev_rxq_flush_done, .eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed, .eec_txq_flush_done = sfxge_ev_txq_flush_done, .eec_software = sfxge_ev_software, .eec_sram = sfxge_ev_sram, .eec_wake_up = sfxge_ev_wake_up, .eec_timer = sfxge_ev_timer, .eec_link_change = sfxge_ev_link_change, }; int -sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index) +sfxge_ev_qpoll(struct sfxge_evq *evq) { - struct sfxge_evq *evq; int rc; - evq = sc->evq[index]; - mtx_lock(&evq->lock); if (evq->init_state != SFXGE_EVQ_STARTING && evq->init_state != SFXGE_EVQ_STARTED) { rc = EINVAL; goto fail; } /* Synchronize the DMA memory for reading */ bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map, BUS_DMASYNC_POSTREAD); KASSERT(evq->rx_done == 0, ("evq->rx_done != 0")); KASSERT(evq->tx_done == 0, ("evq->tx_done != 0")); KASSERT(evq->txq == NULL, ("evq->txq != NULL")); KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq")); /* Poll the queue */ efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq); evq->rx_done = 0; evq->tx_done = 0; /* Perform any pending completion processing */ sfxge_ev_qcomplete(evq, B_TRUE); /* Re-prime the event queue for interrupts */ if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0) goto fail; mtx_unlock(&evq->lock); return (0); fail: mtx_unlock(&(evq->lock)); return (rc); } static void sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_evq *evq; evq = sc->evq[index]; KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); mtx_lock(&evq->lock); evq->init_state = SFXGE_EVQ_INITIALIZED; evq->read_ptr = 0; evq->exception = B_FALSE; +#if EFSYS_OPT_QSTATS /* Add event counts before discarding the common evq state */ efx_ev_qstats_update(evq->common, sc->ev_stats); +#endif efx_ev_qdestroy(evq->common); efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id, EFX_EVQ_NBUFS(evq->entries)); mtx_unlock(&evq->lock); } static int sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_evq *evq; efsys_mem_t *esmp; int count; int rc; evq = sc->evq[index]; esmp = &evq->mem; KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED, ("evq->init_state != SFXGE_EVQ_INITIALIZED")); /* Clear all events. */ (void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp, EFX_EVQ_NBUFS(evq->entries))) != 0) return (rc); /* Create the common code event queue. */ if ((rc = efx_ev_qcreate(sc->enp, index, esmp, evq->entries, evq->buf_base_id, &evq->common)) != 0) goto fail; mtx_lock(&evq->lock); /* Set the default moderation */ (void)efx_ev_qmoderate(evq->common, sc->ev_moderation); /* Prime the event queue for interrupts */ if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0) goto fail2; evq->init_state = SFXGE_EVQ_STARTING; mtx_unlock(&evq->lock); /* Wait for the initialization event */ count = 0; do { /* Pause for 100 ms */ pause("sfxge evq init", hz / 10); /* Check to see if the test event has been processed */ if (evq->init_state == SFXGE_EVQ_STARTED) goto done; } while (++count < 20); rc = ETIMEDOUT; goto fail3; done: return (0); fail3: mtx_lock(&evq->lock); evq->init_state = SFXGE_EVQ_INITIALIZED; fail2: mtx_unlock(&evq->lock); efx_ev_qdestroy(evq->common); fail: efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id, EFX_EVQ_NBUFS(evq->entries)); return (rc); } void sfxge_ev_stop(struct sfxge_softc *sc) { struct sfxge_intr *intr; efx_nic_t *enp; int index; intr = &sc->intr; enp = sc->enp; KASSERT(intr->state == SFXGE_INTR_STARTED, ("Interrupts not started")); /* Stop the event queue(s) */ index = intr->n_alloc; while (--index >= 0) sfxge_ev_qstop(sc, index); /* Tear down the event module */ efx_ev_fini(enp); } int sfxge_ev_start(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; int rc; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_STARTED, ("intr->state != SFXGE_INTR_STARTED")); /* Initialize the event module */ if ((rc = efx_ev_init(sc->enp)) != 0) return (rc); /* Start the event queues */ for (index = 0; index < intr->n_alloc; index++) { if ((rc = sfxge_ev_qstart(sc, index)) != 0) goto fail; } return (0); fail: /* Stop the event queue(s) */ while (--index >= 0) sfxge_ev_qstop(sc, index); /* Tear down the event module */ efx_ev_fini(sc->enp); return (rc); } static void sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_evq *evq; evq = sc->evq[index]; KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED, ("evq->init_state != SFXGE_EVQ_INITIALIZED")); KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq")); sfxge_dma_free(&evq->mem); sc->evq[index] = NULL; mtx_destroy(&evq->lock); free(evq, M_SFXGE); } static int sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index) { struct sfxge_evq *evq; efsys_mem_t *esmp; int rc; KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX")); evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK); evq->sc = sc; evq->index = index; sc->evq[index] = evq; esmp = &evq->mem; /* Build an event queue with room for one event per tx and rx buffer, * plus some extra for link state events and MCDI completions. * There are three tx queues in the first event queue and one in * other. */ if (index == 0) evq->entries = ROUNDUP_POW_OF_TWO(sc->rxq_entries + 3 * sc->txq_entries + 128); else evq->entries = ROUNDUP_POW_OF_TWO(sc->rxq_entries + sc->txq_entries + 128); /* Initialise TX completion list */ evq->txqs = &evq->txq; /* Allocate DMA space. */ if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(evq->entries), esmp)) != 0) return (rc); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(evq->entries), &evq->buf_base_id); mtx_init(&evq->lock, "evq", NULL, MTX_DEF); evq->init_state = SFXGE_EVQ_INITIALIZED; return (0); } void sfxge_ev_fini(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); sc->ev_moderation = 0; /* Tear down the event queue(s). */ index = intr->n_alloc; while (--index >= 0) sfxge_ev_qfini(sc, index); } int sfxge_ev_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev); struct sfxge_intr *intr; int index; int rc; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); /* Set default interrupt moderation; add a sysctl to * read and change it. */ - sc->ev_moderation = 30; + sc->ev_moderation = SFXGE_MODERATION; SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW, sc, 0, sfxge_int_mod_handler, "IU", "sfxge interrupt moderation (us)"); /* * Initialize the event queue(s) - one per interrupt. */ for (index = 0; index < intr->n_alloc; index++) { if ((rc = sfxge_ev_qinit(sc, index)) != 0) goto fail; } +#if EFSYS_OPT_QSTATS sfxge_ev_stat_init(sc); +#endif return (0); fail: while (--index >= 0) sfxge_ev_qfini(sc, index); return (rc); } Index: projects/clang360-import/sys/dev/sfxge/sfxge_intr.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_intr.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_intr.c (revision 277896) @@ -1,560 +1,562 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" static int sfxge_intr_line_filter(void *arg) { struct sfxge_evq *evq; struct sfxge_softc *sc; efx_nic_t *enp; struct sfxge_intr *intr; boolean_t fatal; uint32_t qmask; evq = (struct sfxge_evq *)arg; sc = evq->sc; enp = sc->enp; intr = &sc->intr; KASSERT(intr != NULL, ("intr == NULL")); KASSERT(intr->type == EFX_INTR_LINE, ("intr->type != EFX_INTR_LINE")); if (intr->state != SFXGE_INTR_STARTED) return (FILTER_STRAY); (void)efx_intr_status_line(enp, &fatal, &qmask); if (fatal) { (void) efx_intr_disable(enp); (void) efx_intr_fatal(enp); return (FILTER_HANDLED); } if (qmask != 0) { intr->zero_count = 0; return (FILTER_SCHEDULE_THREAD); } /* SF bug 15783: If the function is not asserting its IRQ and * we read the queue mask on the cycle before a flag is added * to the mask, this inhibits the function from asserting the * IRQ even though we don't see the flag set. To work around * this, we must re-prime all event queues and report the IRQ * as handled when we see a mask of zero. To allow for shared * IRQs, we don't repeat this if we see a mask of zero twice * or more in a row. */ if (intr->zero_count++ == 0) { if (evq->init_state == SFXGE_EVQ_STARTED) { if (efx_ev_qpending(evq->common, evq->read_ptr)) return (FILTER_SCHEDULE_THREAD); efx_ev_qprime(evq->common, evq->read_ptr); return (FILTER_HANDLED); } } return (FILTER_STRAY); } static void sfxge_intr_line(void *arg) { struct sfxge_evq *evq = arg; - struct sfxge_softc *sc = evq->sc; - (void)sfxge_ev_qpoll(sc, 0); + (void)sfxge_ev_qpoll(evq); } static void sfxge_intr_message(void *arg) { struct sfxge_evq *evq; struct sfxge_softc *sc; efx_nic_t *enp; struct sfxge_intr *intr; unsigned int index; boolean_t fatal; evq = (struct sfxge_evq *)arg; sc = evq->sc; enp = sc->enp; intr = &sc->intr; index = evq->index; KASSERT(intr != NULL, ("intr == NULL")); KASSERT(intr->type == EFX_INTR_MESSAGE, ("intr->type != EFX_INTR_MESSAGE")); if (intr->state != SFXGE_INTR_STARTED) return; (void)efx_intr_status_message(enp, index, &fatal); if (fatal) { (void)efx_intr_disable(enp); (void)efx_intr_fatal(enp); return; } - (void)sfxge_ev_qpoll(sc, index); + (void)sfxge_ev_qpoll(evq); } static int sfxge_intr_bus_enable(struct sfxge_softc *sc) { struct sfxge_intr *intr; struct sfxge_intr_hdl *table; driver_filter_t *filter; driver_intr_t *handler; int index; int err; intr = &sc->intr; table = intr->table; switch (intr->type) { case EFX_INTR_MESSAGE: filter = NULL; /* not shared */ handler = sfxge_intr_message; break; case EFX_INTR_LINE: filter = sfxge_intr_line_filter; handler = sfxge_intr_line; break; default: KASSERT(0, ("Invalid interrupt type")); return (EINVAL); } /* Try to add the handlers */ for (index = 0; index < intr->n_alloc; index++) { if ((err = bus_setup_intr(sc->dev, table[index].eih_res, INTR_MPSAFE|INTR_TYPE_NET, filter, handler, sc->evq[index], &table[index].eih_tag)) != 0) { goto fail; } #ifdef SFXGE_HAVE_DESCRIBE_INTR if (intr->n_alloc > 1) bus_describe_intr(sc->dev, table[index].eih_res, table[index].eih_tag, "%d", index); #endif bus_bind_intr(sc->dev, table[index].eih_res, index); } return (0); fail: /* Remove remaining handlers */ while (--index >= 0) bus_teardown_intr(sc->dev, table[index].eih_res, table[index].eih_tag); return (err); } static void sfxge_intr_bus_disable(struct sfxge_softc *sc) { struct sfxge_intr *intr; struct sfxge_intr_hdl *table; int i; intr = &sc->intr; table = intr->table; /* Remove all handlers */ for (i = 0; i < intr->n_alloc; i++) bus_teardown_intr(sc->dev, table[i].eih_res, table[i].eih_tag); } static int sfxge_intr_alloc(struct sfxge_softc *sc, int count) { device_t dev; struct sfxge_intr_hdl *table; struct sfxge_intr *intr; struct resource *res; int rid; int error; int i; dev = sc->dev; intr = &sc->intr; error = 0; table = malloc(count * sizeof(struct sfxge_intr_hdl), M_SFXGE, M_WAITOK); intr->table = table; for (i = 0; i < count; i++) { rid = i + 1; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { device_printf(dev, "Couldn't allocate interrupts for " "message %d\n", rid); error = ENOMEM; break; } table[i].eih_rid = rid; table[i].eih_res = res; } if (error != 0) { count = i - 1; for (i = 0; i < count; i++) bus_release_resource(dev, SYS_RES_IRQ, table[i].eih_rid, table[i].eih_res); } return (error); } static void sfxge_intr_teardown_msix(struct sfxge_softc *sc) { device_t dev; struct resource *resp; int rid; dev = sc->dev; resp = sc->intr.msix_res; rid = rman_get_rid(resp); bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); } static int sfxge_intr_setup_msix(struct sfxge_softc *sc) { struct sfxge_intr *intr; struct resource *resp; device_t dev; int count; int rid; dev = sc->dev; intr = &sc->intr; /* Check if MSI-X is available. */ count = pci_msix_count(dev); if (count == 0) return (EINVAL); /* Limit the number of interrupts to the number of CPUs. */ if (count > mp_ncpus) count = mp_ncpus; /* Not very likely these days... */ if (count > EFX_MAXRSS) count = EFX_MAXRSS; + + if (sc->max_rss_channels > 0 && count > sc->max_rss_channels) + count = sc->max_rss_channels; rid = PCIR_BAR(4); resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (resp == NULL) return (ENOMEM); if (pci_alloc_msix(dev, &count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); return (ENOMEM); } /* Allocate interrupt handlers. */ if (sfxge_intr_alloc(sc, count) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, resp); pci_release_msi(dev); return (ENOMEM); } intr->type = EFX_INTR_MESSAGE; intr->n_alloc = count; intr->msix_res = resp; return (0); } static int sfxge_intr_setup_msi(struct sfxge_softc *sc) { struct sfxge_intr_hdl *table; struct sfxge_intr *intr; device_t dev; int count; int error; dev = sc->dev; intr = &sc->intr; table = intr->table; /* * Check if MSI is available. All messages must be written to * the same address and on x86 this means the IRQs have the * same CPU affinity. So we only ever allocate 1. */ count = pci_msi_count(dev) ? 1 : 0; if (count == 0) return (EINVAL); if ((error = pci_alloc_msi(dev, &count)) != 0) return (ENOMEM); /* Allocate interrupt handler. */ if (sfxge_intr_alloc(sc, count) != 0) { pci_release_msi(dev); return (ENOMEM); } intr->type = EFX_INTR_MESSAGE; intr->n_alloc = count; return (0); } static int sfxge_intr_setup_fixed(struct sfxge_softc *sc) { struct sfxge_intr_hdl *table; struct sfxge_intr *intr; struct resource *res; device_t dev; int rid; dev = sc->dev; intr = &sc->intr; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) return (ENOMEM); table = malloc(sizeof(struct sfxge_intr_hdl), M_SFXGE, M_WAITOK); table[0].eih_rid = rid; table[0].eih_res = res; intr->type = EFX_INTR_LINE; intr->n_alloc = 1; intr->table = table; return (0); } static const char *const __sfxge_err[] = { "", "SRAM out-of-bounds", "Buffer ID out-of-bounds", "Internal memory parity", "Receive buffer ownership", "Transmit buffer ownership", "Receive descriptor ownership", "Transmit descriptor ownership", "Event queue ownership", "Event queue FIFO overflow", "Illegal address", "SRAM parity" }; void sfxge_err(efsys_identifier_t *arg, unsigned int code, uint32_t dword0, uint32_t dword1) { struct sfxge_softc *sc = (struct sfxge_softc *)arg; device_t dev = sc->dev; log(LOG_WARNING, "[%s%d] FATAL ERROR: %s (0x%08x%08x)", device_get_name(dev), device_get_unit(dev), __sfxge_err[code], dword1, dword0); } void sfxge_intr_stop(struct sfxge_softc *sc) { struct sfxge_intr *intr; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_STARTED, ("Interrupts not started")); intr->state = SFXGE_INTR_INITIALIZED; /* Disable interrupts at the NIC */ efx_intr_disable(sc->enp); /* Disable interrupts at the bus */ sfxge_intr_bus_disable(sc); /* Tear down common code interrupt bits. */ efx_intr_fini(sc->enp); } int sfxge_intr_start(struct sfxge_softc *sc) { struct sfxge_intr *intr; efsys_mem_t *esmp; int rc; intr = &sc->intr; esmp = &intr->status; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("Interrupts not initialized")); /* Zero the memory. */ (void)memset(esmp->esm_base, 0, EFX_INTR_SIZE); /* Initialize common code interrupt bits. */ (void)efx_intr_init(sc->enp, intr->type, esmp); /* Enable interrupts at the bus */ if ((rc = sfxge_intr_bus_enable(sc)) != 0) goto fail; intr->state = SFXGE_INTR_STARTED; /* Enable interrupts at the NIC */ efx_intr_enable(sc->enp); return (0); fail: /* Tear down common code interrupt bits. */ efx_intr_fini(sc->enp); intr->state = SFXGE_INTR_INITIALIZED; return (rc); } void sfxge_intr_fini(struct sfxge_softc *sc) { struct sfxge_intr_hdl *table; struct sfxge_intr *intr; efsys_mem_t *esmp; device_t dev; int i; dev = sc->dev; intr = &sc->intr; esmp = &intr->status; table = intr->table; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); /* Free DMA memory. */ sfxge_dma_free(esmp); /* Free interrupt handles. */ for (i = 0; i < intr->n_alloc; i++) bus_release_resource(dev, SYS_RES_IRQ, table[i].eih_rid, table[i].eih_res); if (table[0].eih_rid != 0) pci_release_msi(dev); if (intr->msix_res != NULL) sfxge_intr_teardown_msix(sc); /* Free the handle table */ free(table, M_SFXGE); intr->table = NULL; intr->n_alloc = 0; /* Clear the interrupt type */ intr->type = EFX_INTR_INVALID; intr->state = SFXGE_INTR_UNINITIALIZED; } int sfxge_intr_init(struct sfxge_softc *sc) { device_t dev; struct sfxge_intr *intr; efsys_mem_t *esmp; int rc; dev = sc->dev; intr = &sc->intr; esmp = &intr->status; KASSERT(intr->state == SFXGE_INTR_UNINITIALIZED, ("Interrupts already initialized")); /* Try to setup MSI-X or MSI interrupts if available. */ if ((rc = sfxge_intr_setup_msix(sc)) == 0) device_printf(dev, "Using MSI-X interrupts\n"); else if ((rc = sfxge_intr_setup_msi(sc)) == 0) device_printf(dev, "Using MSI interrupts\n"); else if ((rc = sfxge_intr_setup_fixed(sc)) == 0) { device_printf(dev, "Using fixed interrupts\n"); } else { device_printf(dev, "Couldn't setup interrupts\n"); return (ENOMEM); } /* Set up DMA for interrupts. */ if ((rc = sfxge_dma_alloc(sc, EFX_INTR_SIZE, esmp)) != 0) return (ENOMEM); intr->state = SFXGE_INTR_INITIALIZED; return (0); } Index: projects/clang360-import/sys/dev/sfxge/sfxge_port.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_port.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_port.c (revision 277896) @@ -1,800 +1,798 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "common/efx.h" #include "sfxge.h" static int sfxge_mac_stat_update(struct sfxge_softc *sc) { struct sfxge_port *port = &sc->port; efsys_mem_t *esmp = &(port->mac_stats.dma_buf); clock_t now; unsigned int count; int rc; mtx_lock(&port->lock); if (port->init_state != SFXGE_PORT_STARTED) { rc = 0; goto out; } now = ticks; if (now - port->mac_stats.update_time < hz) { rc = 0; goto out; } port->mac_stats.update_time = now; /* If we're unlucky enough to read statistics wduring the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (count = 0; count < 100; ++count) { EFSYS_PROBE1(wait, unsigned int, count); /* Synchronize the DMA memory for reading */ bus_dmamap_sync(esmp->esm_tag, esmp->esm_map, BUS_DMASYNC_POSTREAD); /* Try to update the cached counters */ if ((rc = efx_mac_stats_update(sc->enp, esmp, port->mac_stats.decode_buf, NULL)) != EAGAIN) goto out; DELAY(100); } rc = ETIMEDOUT; out: mtx_unlock(&port->lock); return (rc); } static int sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; int rc; if ((rc = sfxge_mac_stat_update(sc)) != 0) return (rc); return (SYSCTL_OUT(req, (uint64_t *)sc->port.mac_stats.decode_buf + id, sizeof(uint64_t))); } static void sfxge_mac_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; const char *name; stat_list = SYSCTL_CHILDREN(sc->stats_node); /* Initialise the named stats */ for (id = 0; id < EFX_MAC_NSTATS; id++) { name = efx_mac_stat_name(sc->enp, id); SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD, sc, id, sfxge_mac_stat_handler, "Q", ""); } } #ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS static unsigned int sfxge_port_wanted_fc(struct sfxge_softc *sc) { struct ifmedia_entry *ifm = sc->media.ifm_cur; if (ifm->ifm_media == (IFM_ETHER | IFM_AUTO)) return (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE); return (((ifm->ifm_media & IFM_ETH_RXPAUSE) ? EFX_FCNTL_RESPOND : 0) | ((ifm->ifm_media & IFM_ETH_TXPAUSE) ? EFX_FCNTL_GENERATE : 0)); } static unsigned int sfxge_port_link_fc_ifm(struct sfxge_softc *sc) { unsigned int wanted_fc, link_fc; efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc); return ((link_fc & EFX_FCNTL_RESPOND) ? IFM_ETH_RXPAUSE : 0) | ((link_fc & EFX_FCNTL_GENERATE) ? IFM_ETH_TXPAUSE : 0); } #else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */ static unsigned int sfxge_port_wanted_fc(struct sfxge_softc *sc) { return (sc->port.wanted_fc); } static unsigned int sfxge_port_link_fc_ifm(struct sfxge_softc *sc) { return (0); } static int sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc; struct sfxge_port *port; unsigned int fcntl; int error; sc = arg1; port = &sc->port; mtx_lock(&port->lock); if (req->newptr != NULL) { if ((error = SYSCTL_IN(req, &fcntl, sizeof(fcntl))) != 0) goto out; if (port->wanted_fc == fcntl) goto out; port->wanted_fc = fcntl; if (port->init_state != SFXGE_PORT_STARTED) goto out; error = efx_mac_fcntl_set(sc->enp, port->wanted_fc, B_TRUE); } else { error = SYSCTL_OUT(req, &port->wanted_fc, sizeof(port->wanted_fc)); } out: mtx_unlock(&port->lock); return (error); } static int sfxge_port_link_fc_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc; struct sfxge_port *port; unsigned int wanted_fc, link_fc; int error; sc = arg1; port = &sc->port; mtx_lock(&port->lock); if (port->init_state == SFXGE_PORT_STARTED && SFXGE_LINK_UP(sc)) efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc); else link_fc = 0; error = SYSCTL_OUT(req, &link_fc, sizeof(link_fc)); mtx_unlock(&port->lock); return (error); } #endif /* SFXGE_HAVE_PAUSE_MEDIAOPTS */ static const uint64_t sfxge_link_baudrate[EFX_LINK_NMODES] = { [EFX_LINK_10HDX] = IF_Mbps(10), [EFX_LINK_10FDX] = IF_Mbps(10), [EFX_LINK_100HDX] = IF_Mbps(100), [EFX_LINK_100FDX] = IF_Mbps(100), [EFX_LINK_1000HDX] = IF_Gbps(1), [EFX_LINK_1000FDX] = IF_Gbps(1), [EFX_LINK_10000FDX] = IF_Gbps(10), }; void sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode) { struct sfxge_port *port; int link_state; port = &sc->port; if (port->link_mode == mode) return; port->link_mode = mode; /* Push link state update to the OS */ link_state = (port->link_mode != EFX_LINK_DOWN ? LINK_STATE_UP : LINK_STATE_DOWN); sc->ifnet->if_baudrate = sfxge_link_baudrate[port->link_mode]; if_link_state_change(sc->ifnet, link_state); } static void sfxge_mac_poll_work(void *arg, int npending) { struct sfxge_softc *sc; efx_nic_t *enp; struct sfxge_port *port; efx_link_mode_t mode; sc = (struct sfxge_softc *)arg; enp = sc->enp; port = &sc->port; mtx_lock(&port->lock); if (port->init_state != SFXGE_PORT_STARTED) goto done; /* This may sleep waiting for MCDI completion */ (void)efx_port_poll(enp, &mode); sfxge_mac_link_update(sc, mode); done: mtx_unlock(&port->lock); } static int sfxge_mac_filter_set_locked(struct sfxge_softc *sc) { unsigned int bucket[EFX_MAC_HASH_BITS]; struct ifnet *ifp = sc->ifnet; struct ifmultiaddr *ifma; struct sockaddr_dl *sa; efx_nic_t *enp = sc->enp; unsigned int index; int rc; /* Set promisc-unicast and broadcast filter bits */ if ((rc = efx_mac_filter_set(enp, !!(ifp->if_flags & IFF_PROMISC), B_TRUE)) != 0) return (rc); /* Set multicast hash filter */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { for (index = 0; index < EFX_MAC_HASH_BITS; index++) bucket[index] = 1; } else { /* Broadcast frames also go through the multicast * filter, and the broadcast address hashes to * 0xff. */ bucket[0xff] = 1; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family == AF_LINK) { sa = (struct sockaddr_dl *)ifma->ifma_addr; index = ether_crc32_le(LLADDR(sa), 6) & 0xff; bucket[index] = 1; } } if_maddr_runlock(ifp); } return (efx_mac_hash_set(enp, bucket)); } int sfxge_mac_filter_set(struct sfxge_softc *sc) { struct sfxge_port *port = &sc->port; int rc; mtx_lock(&port->lock); /* * The function may be called without softc_lock held in the * case of SIOCADDMULTI and SIOCDELMULTI ioctls. ioctl handler * checks IFF_DRV_RUNNING flag which implies port started, but * it is not guaranteed to remain. softc_lock shared lock can't * be held in the case of these ioctls processing, since it * results in failure where kernel complains that non-sleepable * lock is held in sleeping thread. Both problems are repeatable * on LAG with LACP proto bring up. */ if (port->init_state == SFXGE_PORT_STARTED) rc = sfxge_mac_filter_set_locked(sc); else rc = 0; mtx_unlock(&port->lock); return (rc); } void sfxge_port_stop(struct sfxge_softc *sc) { struct sfxge_port *port; efx_nic_t *enp; port = &sc->port; enp = sc->enp; mtx_lock(&port->lock); KASSERT(port->init_state == SFXGE_PORT_STARTED, ("port not started")); port->init_state = SFXGE_PORT_INITIALIZED; port->mac_stats.update_time = 0; /* This may call MCDI */ (void)efx_mac_drain(enp, B_TRUE); (void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, 0, B_FALSE); port->link_mode = EFX_LINK_UNKNOWN; /* Destroy the common code port object. */ efx_port_fini(sc->enp); mtx_unlock(&port->lock); } int sfxge_port_start(struct sfxge_softc *sc) { uint8_t mac_addr[ETHER_ADDR_LEN]; struct ifnet *ifp = sc->ifnet; struct sfxge_port *port; efx_nic_t *enp; size_t pdu; int rc; port = &sc->port; enp = sc->enp; mtx_lock(&port->lock); KASSERT(port->init_state == SFXGE_PORT_INITIALIZED, ("port not initialized")); /* Initialize the port object in the common code. */ if ((rc = efx_port_init(sc->enp)) != 0) goto fail; /* Set the SDU */ pdu = EFX_MAC_PDU(ifp->if_mtu); if ((rc = efx_mac_pdu_set(enp, pdu)) != 0) goto fail2; if ((rc = efx_mac_fcntl_set(enp, sfxge_port_wanted_fc(sc), B_TRUE)) != 0) goto fail2; /* Set the unicast address */ if_addr_rlock(ifp); bcopy(LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr), mac_addr, sizeof(mac_addr)); if_addr_runlock(ifp); if ((rc = efx_mac_addr_set(enp, mac_addr)) != 0) goto fail; sfxge_mac_filter_set_locked(sc); /* Update MAC stats by DMA every second */ if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, 1000, B_FALSE)) != 0) goto fail2; if ((rc = efx_mac_drain(enp, B_FALSE)) != 0) goto fail3; if ((rc = efx_phy_adv_cap_set(sc->enp, sc->media.ifm_cur->ifm_data)) != 0) goto fail4; port->init_state = SFXGE_PORT_STARTED; /* Single poll in case there were missing initial events */ mtx_unlock(&port->lock); sfxge_mac_poll_work(sc, 0); return (0); fail4: (void)efx_mac_drain(enp, B_TRUE); fail3: (void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, 0, B_FALSE); fail2: efx_port_fini(sc->enp); fail: mtx_unlock(&port->lock); return (rc); } static int sfxge_phy_stat_update(struct sfxge_softc *sc) { struct sfxge_port *port = &sc->port; efsys_mem_t *esmp = &port->phy_stats.dma_buf; clock_t now; unsigned int count; int rc; mtx_lock(&port->lock); if (port->init_state != SFXGE_PORT_STARTED) { rc = 0; goto out; } now = ticks; if (now - port->phy_stats.update_time < hz) { rc = 0; goto out; } port->phy_stats.update_time = now; /* If we're unlucky enough to read statistics wduring the DMA, wait * up to 10ms for it to finish (typically takes <500us) */ for (count = 0; count < 100; ++count) { EFSYS_PROBE1(wait, unsigned int, count); /* Synchronize the DMA memory for reading */ bus_dmamap_sync(esmp->esm_tag, esmp->esm_map, BUS_DMASYNC_POSTREAD); /* Try to update the cached counters */ if ((rc = efx_phy_stats_update(sc->enp, esmp, port->phy_stats.decode_buf)) != EAGAIN) goto out; DELAY(100); } rc = ETIMEDOUT; out: mtx_unlock(&port->lock); return (rc); } static int sfxge_phy_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; int rc; if ((rc = sfxge_phy_stat_update(sc)) != 0) return (rc); return (SYSCTL_OUT(req, (uint32_t *)sc->port.phy_stats.decode_buf + id, sizeof(uint32_t))); } static void sfxge_phy_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; const char *name; uint64_t stat_mask = efx_nic_cfg_get(sc->enp)->enc_phy_stat_mask; stat_list = SYSCTL_CHILDREN(sc->stats_node); /* Initialise the named stats */ for (id = 0; id < EFX_PHY_NSTATS; id++) { if (!(stat_mask & ((uint64_t)1 << id))) continue; name = efx_phy_stat_name(sc->enp, id); SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, name, CTLTYPE_UINT|CTLFLAG_RD, sc, id, sfxge_phy_stat_handler, id == EFX_PHY_STAT_OUI ? "IX" : "IU", ""); } } void sfxge_port_fini(struct sfxge_softc *sc) { struct sfxge_port *port; efsys_mem_t *esmp; port = &sc->port; esmp = &port->mac_stats.dma_buf; KASSERT(port->init_state == SFXGE_PORT_INITIALIZED, ("Port not initialized")); port->init_state = SFXGE_PORT_UNINITIALIZED; port->link_mode = EFX_LINK_UNKNOWN; /* Finish with PHY DMA memory */ sfxge_dma_free(&port->phy_stats.dma_buf); free(port->phy_stats.decode_buf, M_SFXGE); sfxge_dma_free(esmp); free(port->mac_stats.decode_buf, M_SFXGE); mtx_destroy(&port->lock); port->sc = NULL; } int sfxge_port_init(struct sfxge_softc *sc) { struct sfxge_port *port; struct sysctl_ctx_list *sysctl_ctx; struct sysctl_oid *sysctl_tree; efsys_mem_t *mac_stats_buf, *phy_stats_buf; int rc; port = &sc->port; mac_stats_buf = &port->mac_stats.dma_buf; phy_stats_buf = &port->phy_stats.dma_buf; KASSERT(port->init_state == SFXGE_PORT_UNINITIALIZED, ("Port already initialized")); port->sc = sc; mtx_init(&port->lock, "sfxge_port", NULL, MTX_DEF); port->phy_stats.decode_buf = malloc(EFX_PHY_NSTATS * sizeof(uint32_t), M_SFXGE, M_WAITOK | M_ZERO); if ((rc = sfxge_dma_alloc(sc, EFX_PHY_STATS_SIZE, phy_stats_buf)) != 0) goto fail; - bzero(phy_stats_buf->esm_base, phy_stats_buf->esm_size); sfxge_phy_stat_init(sc); sysctl_ctx = device_get_sysctl_ctx(sc->dev); sysctl_tree = device_get_sysctl_tree(sc->dev); #ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS /* If flow control cannot be configured or reported through * ifmedia, provide sysctls for it. */ port->wanted_fc = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE; SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "wanted_fc", CTLTYPE_UINT|CTLFLAG_RW, sc, 0, sfxge_port_wanted_fc_handler, "IU", "wanted flow control mode"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "link_fc", CTLTYPE_UINT|CTLFLAG_RD, sc, 0, sfxge_port_link_fc_handler, "IU", "link flow control mode"); #endif port->mac_stats.decode_buf = malloc(EFX_MAC_NSTATS * sizeof(uint64_t), M_SFXGE, M_WAITOK | M_ZERO); if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0) goto fail2; - bzero(mac_stats_buf->esm_base, mac_stats_buf->esm_size); sfxge_mac_stat_init(sc); port->init_state = SFXGE_PORT_INITIALIZED; return (0); fail2: free(port->mac_stats.decode_buf, M_SFXGE); sfxge_dma_free(phy_stats_buf); fail: free(port->phy_stats.decode_buf, M_SFXGE); (void)mtx_destroy(&port->lock); port->sc = NULL; return (rc); } static int sfxge_link_mode[EFX_PHY_MEDIA_NTYPES][EFX_LINK_NMODES] = { [EFX_PHY_MEDIA_CX4] = { [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_CX4, }, [EFX_PHY_MEDIA_KX4] = { [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_KX4, }, [EFX_PHY_MEDIA_XFP] = { /* Don't know the module type, but assume SR for now. */ [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR, }, [EFX_PHY_MEDIA_SFP_PLUS] = { /* Don't know the module type, but assume SX/SR for now. */ [EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_SX, [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR, }, [EFX_PHY_MEDIA_BASE_T] = { [EFX_LINK_10HDX] = IFM_ETHER | IFM_HDX | IFM_10_T, [EFX_LINK_10FDX] = IFM_ETHER | IFM_FDX | IFM_10_T, [EFX_LINK_100HDX] = IFM_ETHER | IFM_HDX | IFM_100_TX, [EFX_LINK_100FDX] = IFM_ETHER | IFM_FDX | IFM_100_TX, [EFX_LINK_1000HDX] = IFM_ETHER | IFM_HDX | IFM_1000_T, [EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_T, [EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_T, }, }; static void sfxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct sfxge_softc *sc; efx_phy_media_type_t medium_type; efx_link_mode_t mode; sc = ifp->if_softc; sx_xlock(&sc->softc_lock); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (SFXGE_RUNNING(sc) && SFXGE_LINK_UP(sc)) { ifmr->ifm_status |= IFM_ACTIVE; efx_phy_media_type_get(sc->enp, &medium_type); mode = sc->port.link_mode; ifmr->ifm_active |= sfxge_link_mode[medium_type][mode]; ifmr->ifm_active |= sfxge_port_link_fc_ifm(sc); } sx_xunlock(&sc->softc_lock); } static int sfxge_media_change(struct ifnet *ifp) { struct sfxge_softc *sc; struct ifmedia_entry *ifm; int rc; sc = ifp->if_softc; ifm = sc->media.ifm_cur; sx_xlock(&sc->softc_lock); if (!SFXGE_RUNNING(sc)) { rc = 0; goto out; } rc = efx_mac_fcntl_set(sc->enp, sfxge_port_wanted_fc(sc), B_TRUE); if (rc != 0) goto out; rc = efx_phy_adv_cap_set(sc->enp, ifm->ifm_data); out: sx_xunlock(&sc->softc_lock); return (rc); } int sfxge_port_ifmedia_init(struct sfxge_softc *sc) { efx_phy_media_type_t medium_type; uint32_t cap_mask, mode_cap_mask; efx_link_mode_t mode; int mode_ifm, best_mode_ifm = 0; int rc; /* We need port state to initialise the ifmedia list. */ if ((rc = efx_nic_init(sc->enp)) != 0) goto out; if ((rc = efx_port_init(sc->enp)) != 0) goto out2; /* * Register ifconfig callbacks for querying and setting the * link mode and link status. */ ifmedia_init(&sc->media, IFM_IMASK, sfxge_media_change, sfxge_media_status); /* * Map firmware medium type and capabilities to ifmedia types. * ifmedia does not distinguish between forcing the link mode * and disabling auto-negotiation. 1000BASE-T and 10GBASE-T * require AN even if only one link mode is enabled, and for * 100BASE-TX it is useful even if the link mode is forced. * Therefore we never disable auto-negotiation. * * Also enable and advertise flow control by default. */ efx_phy_media_type_get(sc->enp, &medium_type); efx_phy_adv_cap_get(sc->enp, EFX_PHY_CAP_PERM, &cap_mask); EFX_STATIC_ASSERT(EFX_LINK_10HDX == EFX_PHY_CAP_10HDX + 1); EFX_STATIC_ASSERT(EFX_LINK_10FDX == EFX_PHY_CAP_10FDX + 1); EFX_STATIC_ASSERT(EFX_LINK_100HDX == EFX_PHY_CAP_100HDX + 1); EFX_STATIC_ASSERT(EFX_LINK_100FDX == EFX_PHY_CAP_100FDX + 1); EFX_STATIC_ASSERT(EFX_LINK_1000HDX == EFX_PHY_CAP_1000HDX + 1); EFX_STATIC_ASSERT(EFX_LINK_1000FDX == EFX_PHY_CAP_1000FDX + 1); EFX_STATIC_ASSERT(EFX_LINK_10000FDX == EFX_PHY_CAP_10000FDX + 1); for (mode = EFX_LINK_10HDX; mode <= EFX_LINK_10000FDX; mode++) { mode_cap_mask = 1 << (mode - 1); mode_ifm = sfxge_link_mode[medium_type][mode]; if ((cap_mask & mode_cap_mask) && mode_ifm) { mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_AN); #ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS /* No flow-control */ ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL); /* Respond-only. If using AN, we implicitly * offer symmetric as well, but that doesn't * mean we *have* to generate pause frames. */ mode_cap_mask |= cap_mask & ((1 << EFX_PHY_CAP_PAUSE) | (1 << EFX_PHY_CAP_ASYM)); mode_ifm |= IFM_ETH_RXPAUSE; ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL); /* Symmetric */ mode_cap_mask &= ~(1 << EFX_PHY_CAP_ASYM); mode_ifm |= IFM_ETH_TXPAUSE; #else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */ mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_PAUSE); #endif ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL); /* Link modes are numbered in order of speed, * so assume the last one available is the best. */ best_mode_ifm = mode_ifm; } } if (cap_mask & (1 << EFX_PHY_CAP_AN)) { /* Add autoselect mode. */ mode_ifm = IFM_ETHER | IFM_AUTO; ifmedia_add(&sc->media, mode_ifm, cap_mask & ~(1 << EFX_PHY_CAP_ASYM), NULL); best_mode_ifm = mode_ifm; } if (best_mode_ifm != 0) ifmedia_set(&sc->media, best_mode_ifm); /* Now discard port state until interface is started. */ efx_port_fini(sc->enp); out2: efx_nic_fini(sc->enp); out: return (rc); } Index: projects/clang360-import/sys/dev/sfxge/sfxge_tx.c =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_tx.c (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_tx.c (revision 277896) @@ -1,1579 +1,1634 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Theory of operation: * * Tx queues allocation and mapping * * One Tx queue with enabled checksum offload is allocated per Rx channel * (event queue). Also 2 Tx queues (one without checksum offload and one * with IP checksum offload only) are allocated and bound to event queue 0. * sfxge_txq_type is used as Tx queue label. * * So, event queue plus label mapping to Tx queue index is: * if event queue index is 0, TxQ-index = TxQ-label * [0..SFXGE_TXQ_NTYPES) * else TxQ-index = SFXGE_TXQ_NTYPES + EvQ-index - 1 * See sfxge_get_txq_by_label() sfxge_ev.c */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "common/efx.h" #include "sfxge.h" #include "sfxge_tx.h" /* Set the block level to ensure there is space to generate a * large number of descriptors for TSO. With minimum MSS and * maximum mbuf length we might need more than a ring-ful of * descriptors, but this should not happen in practice except * due to deliberate attack. In that case we will truncate * the output at a packet boundary. Allow for a reasonable * minimum MSS of 512. */ #define SFXGE_TSO_MAX_DESC ((65535 / 512) * 2 + SFXGE_TX_MAPPING_MAX_SEG - 1) #define SFXGE_TXQ_BLOCK_LEVEL(_entries) ((_entries) - SFXGE_TSO_MAX_DESC) #ifdef SFXGE_HAVE_MQ #define SFXGE_PARAM_TX_DPL_GET_MAX SFXGE_PARAM(tx_dpl_get_max) static int sfxge_tx_dpl_get_max = SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_MAX, &sfxge_tx_dpl_get_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_get_max, 0, - "Maximum number of packets in deferred packet get-list"); + "Maximum number of any packets in deferred packet get-list"); +#define SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX \ + SFXGE_PARAM(tx_dpl_get_non_tcp_max) +static int sfxge_tx_dpl_get_non_tcp_max = + SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT; +TUNABLE_INT(SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, &sfxge_tx_dpl_get_non_tcp_max); +SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_get_non_tcp_max, CTLFLAG_RDTUN, + &sfxge_tx_dpl_get_non_tcp_max, 0, + "Maximum number of non-TCP packets in deferred packet get-list"); + #define SFXGE_PARAM_TX_DPL_PUT_MAX SFXGE_PARAM(tx_dpl_put_max) static int sfxge_tx_dpl_put_max = SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT; TUNABLE_INT(SFXGE_PARAM_TX_DPL_PUT_MAX, &sfxge_tx_dpl_put_max); SYSCTL_INT(_hw_sfxge, OID_AUTO, tx_dpl_put_max, CTLFLAG_RDTUN, &sfxge_tx_dpl_put_max, 0, - "Maximum number of packets in deferred packet put-list"); + "Maximum number of any packets in deferred packet put-list"); #endif /* Forward declarations. */ static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq); static void sfxge_tx_qlist_post(struct sfxge_txq *txq); static void sfxge_tx_qunblock(struct sfxge_txq *txq); static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg); void -sfxge_tx_qcomplete(struct sfxge_txq *txq) +sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq) { - struct sfxge_softc *sc; - struct sfxge_evq *evq; unsigned int completed; - sc = txq->sc; - evq = sc->evq[txq->evq_index]; - mtx_assert(&evq->lock, MA_OWNED); completed = txq->completed; while (completed != txq->pending) { struct sfxge_tx_mapping *stmp; unsigned int id; id = completed++ & txq->ptr_mask; stmp = &txq->stmp[id]; if (stmp->flags & TX_BUF_UNMAP) { bus_dmamap_unload(txq->packet_dma_tag, stmp->map); if (stmp->flags & TX_BUF_MBUF) { struct mbuf *m = stmp->u.mbuf; do m = m_free(m); while (m != NULL); } else { free(stmp->u.heap_buf, M_SFXGE); } stmp->flags = 0; } } txq->completed = completed; /* Check whether we need to unblock the queue. */ mb(); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) sfxge_tx_qunblock(txq); } } #ifdef SFXGE_HAVE_MQ +static inline unsigned int +sfxge_is_mbuf_non_tcp(struct mbuf *mbuf) +{ + /* Absense of TCP checksum flags does not mean that it is non-TCP + * but it should be true if user wants to achieve high throughput. + */ + return (!(mbuf->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP))); +} + /* * Reorder the put list and append it to the get list. */ static void sfxge_tx_qdpl_swizzle(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *get_next, **get_tailp; volatile uintptr_t *putp; uintptr_t put; unsigned int count; + unsigned int non_tcp_count; mtx_assert(&txq->lock, MA_OWNED); stdp = &txq->dpl; /* Acquire the put list. */ putp = &stdp->std_put; put = atomic_readandclear_ptr(putp); mbuf = (void *)put; if (mbuf == NULL) return; /* Reverse the put list. */ get_tailp = &mbuf->m_nextpkt; get_next = NULL; count = 0; + non_tcp_count = 0; do { struct mbuf *put_next; + non_tcp_count += sfxge_is_mbuf_non_tcp(mbuf); put_next = mbuf->m_nextpkt; mbuf->m_nextpkt = get_next; get_next = mbuf; mbuf = put_next; count++; } while (mbuf != NULL); /* Append the reversed put list to the get list. */ KASSERT(*get_tailp == NULL, ("*get_tailp != NULL")); *stdp->std_getp = get_next; stdp->std_getp = get_tailp; stdp->std_get_count += count; + stdp->std_get_non_tcp_count += non_tcp_count; } #endif /* SFXGE_HAVE_MQ */ static void sfxge_tx_qreap(struct sfxge_txq *txq) { mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); txq->reaped = txq->completed; } static void sfxge_tx_qlist_post(struct sfxge_txq *txq) { unsigned int old_added; unsigned int level; int rc; mtx_assert(SFXGE_TXQ_LOCK(txq), MA_OWNED); KASSERT(txq->n_pend_desc != 0, ("txq->n_pend_desc == 0")); KASSERT(txq->n_pend_desc <= SFXGE_TSO_MAX_DESC, ("txq->n_pend_desc too large")); KASSERT(!txq->blocked, ("txq->blocked")); old_added = txq->added; /* Post the fragment list. */ rc = efx_tx_qpost(txq->common, txq->pend_desc, txq->n_pend_desc, txq->reaped, &txq->added); KASSERT(rc == 0, ("efx_tx_qpost() failed")); /* If efx_tx_qpost() had to refragment, our information about * buffers to free may be associated with the wrong * descriptors. */ KASSERT(txq->added - old_added == txq->n_pend_desc, ("efx_tx_qpost() refragmented descriptors")); level = txq->added - txq->reaped; KASSERT(level <= txq->entries, ("overfilled TX queue")); /* Clear the fragment list. */ txq->n_pend_desc = 0; /* Have we reached the block level? */ if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) return; /* Reap, and check again */ sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) return; txq->blocked = 1; /* * Avoid a race with completion interrupt handling that could leave * the queue blocked. */ mb(); sfxge_tx_qreap(txq); level = txq->added - txq->reaped; if (level < SFXGE_TXQ_BLOCK_LEVEL(txq->entries)) { mb(); txq->blocked = 0; } } static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf) { bus_dmamap_t *used_map; bus_dmamap_t map; bus_dma_segment_t dma_seg[SFXGE_TX_MAPPING_MAX_SEG]; unsigned int id; struct sfxge_tx_mapping *stmp; efx_buffer_t *desc; int n_dma_seg; int rc; int i; KASSERT(!txq->blocked, ("txq->blocked")); if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) prefetch_read_many(mbuf->m_data); if (txq->init_state != SFXGE_TXQ_STARTED) { rc = EINTR; goto reject; } /* Load the packet for DMA. */ id = txq->added & txq->ptr_mask; stmp = &txq->stmp[id]; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); if (rc == EFBIG) { /* Try again. */ struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT, SFXGE_TX_MAPPING_MAX_SEG); if (new_mbuf == NULL) goto reject; ++txq->collapses; mbuf = new_mbuf; rc = bus_dmamap_load_mbuf_sg(txq->packet_dma_tag, stmp->map, mbuf, dma_seg, &n_dma_seg, 0); } if (rc != 0) goto reject; /* Make the packet visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, stmp->map, BUS_DMASYNC_PREWRITE); used_map = &stmp->map; if (mbuf->m_pkthdr.csum_flags & CSUM_TSO) { rc = sfxge_tx_queue_tso(txq, mbuf, dma_seg, n_dma_seg); if (rc < 0) goto reject_mapped; stmp = &txq->stmp[rc]; } else { /* Add the mapping to the fragment list, and set flags * for the buffer. */ i = 0; for (;;) { desc = &txq->pend_desc[i]; desc->eb_addr = dma_seg[i].ds_addr; desc->eb_size = dma_seg[i].ds_len; if (i == n_dma_seg - 1) { desc->eb_eop = 1; break; } desc->eb_eop = 0; i++; stmp->flags = 0; if (__predict_false(stmp == &txq->stmp[txq->ptr_mask])) stmp = &txq->stmp[0]; else stmp++; } txq->n_pend_desc = n_dma_seg; } /* * If the mapping required more than one descriptor * then we need to associate the DMA map with the last * descriptor, not the first. */ if (used_map != &stmp->map) { map = stmp->map; stmp->map = *used_map; *used_map = map; } stmp->u.mbuf = mbuf; stmp->flags = TX_BUF_UNMAP | TX_BUF_MBUF; /* Post the fragment list. */ sfxge_tx_qlist_post(txq); return (0); reject_mapped: bus_dmamap_unload(txq->packet_dma_tag, *used_map); reject: /* Drop the packet on the floor. */ m_freem(mbuf); ++txq->drops; return (rc); } #ifdef SFXGE_HAVE_MQ /* * Drain the deferred packet list into the transmit queue. */ static void sfxge_tx_qdpl_drain(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_tx_dpl *stdp; struct mbuf *mbuf, *next; unsigned int count; + unsigned int non_tcp_count; unsigned int pushed; int rc; mtx_assert(&txq->lock, MA_OWNED); sc = txq->sc; stdp = &txq->dpl; pushed = txq->added; prefetch_read_many(sc->enp); prefetch_read_many(txq->common); mbuf = stdp->std_get; count = stdp->std_get_count; + non_tcp_count = stdp->std_get_non_tcp_count; + if (count > stdp->std_get_hiwat) + stdp->std_get_hiwat = count; + while (count != 0) { KASSERT(mbuf != NULL, ("mbuf == NULL")); next = mbuf->m_nextpkt; mbuf->m_nextpkt = NULL; ETHER_BPF_MTAP(sc->ifnet, mbuf); /* packet capture */ if (next != NULL) prefetch_read_many(next); rc = sfxge_tx_queue_mbuf(txq, mbuf); --count; + non_tcp_count -= sfxge_is_mbuf_non_tcp(mbuf); mbuf = next; if (rc != 0) continue; if (txq->blocked) break; /* Push the fragments to the hardware in batches. */ if (txq->added - pushed >= SFXGE_TX_BATCH) { efx_tx_qpush(txq->common, txq->added); pushed = txq->added; } } if (count == 0) { KASSERT(mbuf == NULL, ("mbuf != NULL")); + KASSERT(non_tcp_count == 0, + ("inconsistent TCP/non-TCP detection")); stdp->std_get = NULL; stdp->std_get_count = 0; + stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; } else { stdp->std_get = mbuf; stdp->std_get_count = count; + stdp->std_get_non_tcp_count = non_tcp_count; } if (txq->added != pushed) efx_tx_qpush(txq->common, txq->added); KASSERT(txq->blocked || stdp->std_get_count == 0, ("queue unblocked but count is non-zero")); } #define SFXGE_TX_QDPL_PENDING(_txq) \ ((_txq)->dpl.std_put != 0) /* * Service the deferred packet list. * * NOTE: drops the txq mutex! */ static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq) { mtx_assert(&txq->lock, MA_OWNED); do { if (SFXGE_TX_QDPL_PENDING(txq)) sfxge_tx_qdpl_swizzle(txq); if (!txq->blocked) sfxge_tx_qdpl_drain(txq); mtx_unlock(&txq->lock); } while (SFXGE_TX_QDPL_PENDING(txq) && mtx_trylock(&txq->lock)); } /* * Put a packet on the deferred packet list. * * If we are called with the txq lock held, we put the packet on the "get * list", otherwise we atomically push it on the "put list". The swizzle * function takes care of ordering. * * The length of the put list is bounded by SFXGE_TX_MAX_DEFFERED. We * overload the csum_data field in the mbuf to keep track of this length * because there is no cheap alternative to avoid races. */ static inline int sfxge_tx_qdpl_put(struct sfxge_txq *txq, struct mbuf *mbuf, int locked) { struct sfxge_tx_dpl *stdp; stdp = &txq->dpl; KASSERT(mbuf->m_nextpkt == NULL, ("mbuf->m_nextpkt != NULL")); if (locked) { mtx_assert(&txq->lock, MA_OWNED); sfxge_tx_qdpl_swizzle(txq); - if (stdp->std_get_count >= stdp->std_get_max) + if (stdp->std_get_count >= stdp->std_get_max) { + txq->get_overflow++; return (ENOBUFS); + } + if (sfxge_is_mbuf_non_tcp(mbuf)) { + if (stdp->std_get_non_tcp_count >= + stdp->std_get_non_tcp_max) { + txq->get_non_tcp_overflow++; + return (ENOBUFS); + } + stdp->std_get_non_tcp_count++; + } *(stdp->std_getp) = mbuf; stdp->std_getp = &mbuf->m_nextpkt; stdp->std_get_count++; } else { volatile uintptr_t *putp; uintptr_t old; uintptr_t new; unsigned old_len; putp = &stdp->std_put; new = (uintptr_t)mbuf; do { old = *putp; if (old != 0) { struct mbuf *mp = (struct mbuf *)old; old_len = mp->m_pkthdr.csum_data; } else old_len = 0; - if (old_len >= stdp->std_put_max) + if (old_len >= stdp->std_put_max) { + atomic_add_long(&txq->put_overflow, 1); return (ENOBUFS); + } mbuf->m_pkthdr.csum_data = old_len + 1; mbuf->m_nextpkt = (void *)old; } while (atomic_cmpset_ptr(putp, old, new) == 0); } return (0); } /* * Called from if_transmit - will try to grab the txq lock and enqueue to the * put list if it succeeds, otherwise will push onto the defer list. */ int sfxge_tx_packet_add(struct sfxge_txq *txq, struct mbuf *m) { int locked; int rc; if (!SFXGE_LINK_UP(txq->sc)) { rc = ENETDOWN; + atomic_add_long(&txq->netdown_drops, 1); goto fail; } /* * Try to grab the txq lock. If we are able to get the lock, * the packet will be appended to the "get list" of the deferred * packet list. Otherwise, it will be pushed on the "put list". */ locked = mtx_trylock(&txq->lock); if (sfxge_tx_qdpl_put(txq, m, locked) != 0) { if (locked) mtx_unlock(&txq->lock); rc = ENOBUFS; goto fail; } /* * Try to grab the lock again. * * If we are able to get the lock, we need to process the deferred * packet list. If we are not able to get the lock, another thread * is processing the list. */ if (!locked) locked = mtx_trylock(&txq->lock); if (locked) { /* Try to service the list. */ sfxge_tx_qdpl_service(txq); /* Lock has been dropped. */ } return (0); fail: m_freem(m); - atomic_add_long(&txq->early_drops, 1); return (rc); } static void sfxge_tx_qdpl_flush(struct sfxge_txq *txq) { struct sfxge_tx_dpl *stdp = &txq->dpl; struct mbuf *mbuf, *next; mtx_lock(&txq->lock); sfxge_tx_qdpl_swizzle(txq); for (mbuf = stdp->std_get; mbuf != NULL; mbuf = next) { next = mbuf->m_nextpkt; m_freem(mbuf); } stdp->std_get = NULL; stdp->std_get_count = 0; + stdp->std_get_non_tcp_count = 0; stdp->std_getp = &stdp->std_get; mtx_unlock(&txq->lock); } void sfxge_if_qflush(struct ifnet *ifp) { struct sfxge_softc *sc; int i; sc = ifp->if_softc; for (i = 0; i < SFXGE_TX_SCALE(sc); i++) sfxge_tx_qdpl_flush(sc->txq[i]); } /* * TX start -- called by the stack. */ int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m) { struct sfxge_softc *sc; struct sfxge_txq *txq; int rc; sc = (struct sfxge_softc *)ifp->if_softc; KASSERT(ifp->if_flags & IFF_UP, ("interface not up")); /* Pick the desired transmit queue. */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) { int index = 0; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { uint32_t hash = m->m_pkthdr.flowid; index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX]; } txq = sc->txq[SFXGE_TXQ_IP_TCP_UDP_CKSUM + index]; } else if (m->m_pkthdr.csum_flags & CSUM_DELAY_IP) { txq = sc->txq[SFXGE_TXQ_IP_CKSUM]; } else { txq = sc->txq[SFXGE_TXQ_NON_CKSUM]; } rc = sfxge_tx_packet_add(txq, m); return (rc); } #else /* !SFXGE_HAVE_MQ */ static void sfxge_if_start_locked(struct ifnet *ifp) { struct sfxge_softc *sc = ifp->if_softc; struct sfxge_txq *txq; struct mbuf *mbuf; unsigned int pushed[SFXGE_TXQ_NTYPES]; unsigned int q_index; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!sc->port.link_up) return; for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { txq = sc->txq[q_index]; pushed[q_index] = txq->added; } while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, mbuf); if (mbuf == NULL) break; ETHER_BPF_MTAP(ifp, mbuf); /* packet capture */ /* Pick the desired transmit queue. */ if (mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) q_index = SFXGE_TXQ_IP_TCP_UDP_CKSUM; else if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) q_index = SFXGE_TXQ_IP_CKSUM; else q_index = SFXGE_TXQ_NON_CKSUM; txq = sc->txq[q_index]; if (sfxge_tx_queue_mbuf(txq, mbuf) != 0) continue; if (txq->blocked) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* Push the fragments to the hardware in batches. */ if (txq->added - pushed[q_index] >= SFXGE_TX_BATCH) { efx_tx_qpush(txq->common, txq->added); pushed[q_index] = txq->added; } } for (q_index = 0; q_index < SFXGE_TXQ_NTYPES; q_index++) { txq = sc->txq[q_index]; if (txq->added != pushed[q_index]) efx_tx_qpush(txq->common, txq->added); } } void sfxge_if_start(struct ifnet *ifp) { struct sfxge_softc *sc = ifp->if_softc; mtx_lock(&sc->tx_lock); sfxge_if_start_locked(ifp); mtx_unlock(&sc->tx_lock); } static inline void sfxge_tx_qdpl_service(struct sfxge_txq *txq) { struct sfxge_softc *sc = txq->sc; struct ifnet *ifp = sc->ifnet; mtx_assert(&sc->tx_lock, MA_OWNED); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sfxge_if_start_locked(ifp); mtx_unlock(&sc->tx_lock); } #endif /* SFXGE_HAVE_MQ */ /* * Software "TSO". Not quite as good as doing it in hardware, but * still faster than segmenting in the stack. */ struct sfxge_tso_state { /* Output position */ unsigned out_len; /* Remaining length in current segment */ unsigned seqnum; /* Current sequence number */ unsigned packet_space; /* Remaining space in current packet */ /* Input position */ unsigned dma_seg_i; /* Current DMA segment number */ uint64_t dma_addr; /* DMA address of current position */ unsigned in_len; /* Remaining length in current mbuf */ const struct mbuf *mbuf; /* Input mbuf (head of chain) */ u_short protocol; /* Network protocol (after VLAN decap) */ ssize_t nh_off; /* Offset of network header */ ssize_t tcph_off; /* Offset of TCP header */ unsigned header_len; /* Number of bytes of header */ int full_packet_size; /* Number of bytes to put in each outgoing * segment */ }; static inline const struct ip *tso_iph(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IP), ("tso_iph() in non-IPv4 state")); return (const struct ip *)(tso->mbuf->m_data + tso->nh_off); } static inline const struct ip6_hdr *tso_ip6h(const struct sfxge_tso_state *tso) { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("tso_ip6h() in non-IPv6 state")); return (const struct ip6_hdr *)(tso->mbuf->m_data + tso->nh_off); } static inline const struct tcphdr *tso_tcph(const struct sfxge_tso_state *tso) { return (const struct tcphdr *)(tso->mbuf->m_data + tso->tcph_off); } /* Size of preallocated TSO header buffers. Larger blocks must be * allocated from the heap. */ #define TSOH_STD_SIZE 128 /* At most half the descriptors in the queue at any time will refer to * a TSO header buffer, since they must always be followed by a * payload descriptor referring to an mbuf. */ #define TSOH_COUNT(_txq_entries) ((_txq_entries) / 2u) #define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) #define TSOH_PAGE_COUNT(_txq_entries) \ ((TSOH_COUNT(_txq_entries) + TSOH_PER_PAGE - 1) / TSOH_PER_PAGE) static int tso_init(struct sfxge_txq *txq) { struct sfxge_softc *sc = txq->sc; unsigned int tsoh_page_count = TSOH_PAGE_COUNT(sc->txq_entries); int i, rc; /* Allocate TSO header buffers */ txq->tsoh_buffer = malloc(tsoh_page_count * sizeof(txq->tsoh_buffer[0]), M_SFXGE, M_WAITOK); for (i = 0; i < tsoh_page_count; i++) { rc = sfxge_dma_alloc(sc, PAGE_SIZE, &txq->tsoh_buffer[i]); if (rc != 0) goto fail; } return (0); fail: while (i-- > 0) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); txq->tsoh_buffer = NULL; return (rc); } static void tso_fini(struct sfxge_txq *txq) { int i; if (txq->tsoh_buffer != NULL) { for (i = 0; i < TSOH_PAGE_COUNT(txq->sc->txq_entries); i++) sfxge_dma_free(&txq->tsoh_buffer[i]); free(txq->tsoh_buffer, M_SFXGE); } } static void tso_start(struct sfxge_tso_state *tso, struct mbuf *mbuf) { struct ether_header *eh = mtod(mbuf, struct ether_header *); tso->mbuf = mbuf; /* Find network protocol and header */ tso->protocol = eh->ether_type; if (tso->protocol == htons(ETHERTYPE_VLAN)) { struct ether_vlan_header *veh = mtod(mbuf, struct ether_vlan_header *); tso->protocol = veh->evl_proto; tso->nh_off = sizeof(*veh); } else { tso->nh_off = sizeof(*eh); } /* Find TCP header */ if (tso->protocol == htons(ETHERTYPE_IP)) { KASSERT(tso_iph(tso)->ip_p == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + 4 * tso_iph(tso)->ip_hl; } else { KASSERT(tso->protocol == htons(ETHERTYPE_IPV6), ("TSO required on non-IP packet")); KASSERT(tso_ip6h(tso)->ip6_nxt == IPPROTO_TCP, ("TSO required on non-TCP packet")); tso->tcph_off = tso->nh_off + sizeof(struct ip6_hdr); } - /* We assume all headers are linear in the head mbuf */ tso->header_len = tso->tcph_off + 4 * tso_tcph(tso)->th_off; - KASSERT(tso->header_len <= mbuf->m_len, ("packet headers fragmented")); tso->full_packet_size = tso->header_len + mbuf->m_pkthdr.tso_segsz; tso->seqnum = ntohl(tso_tcph(tso)->th_seq); /* These flags must not be duplicated */ KASSERT(!(tso_tcph(tso)->th_flags & (TH_URG | TH_SYN | TH_RST)), ("incompatible TCP flag on TSO packet")); tso->out_len = mbuf->m_pkthdr.len - tso->header_len; } /* * tso_fill_packet_with_fragment - form descriptors for the current fragment * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space. */ static void tso_fill_packet_with_fragment(struct sfxge_txq *txq, struct sfxge_tso_state *tso) { efx_buffer_t *desc; int n; if (tso->in_len == 0 || tso->packet_space == 0) return; KASSERT(tso->in_len > 0, ("TSO input length went negative")); KASSERT(tso->packet_space > 0, ("TSO packet space went negative")); n = min(tso->in_len, tso->packet_space); tso->packet_space -= n; tso->out_len -= n; tso->in_len -= n; desc = &txq->pend_desc[txq->n_pend_desc++]; desc->eb_addr = tso->dma_addr; desc->eb_size = n; desc->eb_eop = tso->out_len == 0 || tso->packet_space == 0; tso->dma_addr += n; } /* Callback from bus_dmamap_load() for long TSO headers. */ static void tso_map_long_header(void *dma_addr_ret, bus_dma_segment_t *segs, int nseg, int error) { *(uint64_t *)dma_addr_ret = ((__predict_true(error == 0) && __predict_true(nseg == 1)) ? segs->ds_addr : 0); } /* * tso_start_new_packet - generate a new header and prepare for the new packet * * Generate a new header and prepare for the new packet. Return 0 on * success, or an error code if failed to alloc header. */ static int tso_start_new_packet(struct sfxge_txq *txq, struct sfxge_tso_state *tso, unsigned int id) { struct sfxge_tx_mapping *stmp = &txq->stmp[id]; struct tcphdr *tsoh_th; unsigned ip_length; caddr_t header; uint64_t dma_addr; bus_dmamap_t map; efx_buffer_t *desc; int rc; /* Allocate a DMA-mapped header buffer. */ if (__predict_true(tso->header_len <= TSOH_STD_SIZE)) { unsigned int page_index = (id / 2) / TSOH_PER_PAGE; unsigned int buf_index = (id / 2) % TSOH_PER_PAGE; header = (txq->tsoh_buffer[page_index].esm_base + buf_index * TSOH_STD_SIZE); dma_addr = (txq->tsoh_buffer[page_index].esm_addr + buf_index * TSOH_STD_SIZE); map = txq->tsoh_buffer[page_index].esm_map; stmp->flags = 0; } else { /* We cannot use bus_dmamem_alloc() as that may sleep */ header = malloc(tso->header_len, M_SFXGE, M_NOWAIT); if (__predict_false(!header)) return (ENOMEM); rc = bus_dmamap_load(txq->packet_dma_tag, stmp->map, header, tso->header_len, tso_map_long_header, &dma_addr, BUS_DMA_NOWAIT); if (__predict_false(dma_addr == 0)) { if (rc == 0) { /* Succeeded but got >1 segment */ bus_dmamap_unload(txq->packet_dma_tag, stmp->map); rc = EINVAL; } free(header, M_SFXGE); return (rc); } map = stmp->map; txq->tso_long_headers++; stmp->u.heap_buf = header; stmp->flags = TX_BUF_UNMAP; } tsoh_th = (struct tcphdr *)(header + tso->tcph_off); /* Copy and update the headers. */ - memcpy(header, tso->mbuf->m_data, tso->header_len); + m_copydata(tso->mbuf, 0, tso->header_len, header); tsoh_th->th_seq = htonl(tso->seqnum); tso->seqnum += tso->mbuf->m_pkthdr.tso_segsz; if (tso->out_len > tso->mbuf->m_pkthdr.tso_segsz) { /* This packet will not finish the TSO burst. */ ip_length = tso->full_packet_size - tso->nh_off; tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH); } else { /* This packet will be the last in the TSO burst. */ ip_length = tso->header_len - tso->nh_off + tso->out_len; } if (tso->protocol == htons(ETHERTYPE_IP)) { struct ip *tsoh_iph = (struct ip *)(header + tso->nh_off); tsoh_iph->ip_len = htons(ip_length); /* XXX We should increment ip_id, but FreeBSD doesn't * currently allocate extra IDs for multiple segments. */ } else { struct ip6_hdr *tsoh_iph = (struct ip6_hdr *)(header + tso->nh_off); tsoh_iph->ip6_plen = htons(ip_length - sizeof(*tsoh_iph)); } /* Make the header visible to the hardware. */ bus_dmamap_sync(txq->packet_dma_tag, map, BUS_DMASYNC_PREWRITE); tso->packet_space = tso->mbuf->m_pkthdr.tso_segsz; txq->tso_packets++; /* Form a descriptor for this header. */ desc = &txq->pend_desc[txq->n_pend_desc++]; desc->eb_addr = dma_addr; desc->eb_size = tso->header_len; desc->eb_eop = 0; return (0); } static int sfxge_tx_queue_tso(struct sfxge_txq *txq, struct mbuf *mbuf, const bus_dma_segment_t *dma_seg, int n_dma_seg) { struct sfxge_tso_state tso; unsigned int id, next_id; + unsigned skipped = 0; tso_start(&tso, mbuf); - /* Grab the first payload fragment. */ - if (dma_seg->ds_len == tso.header_len) { + while (dma_seg->ds_len + skipped <= tso.header_len) { + skipped += dma_seg->ds_len; --n_dma_seg; KASSERT(n_dma_seg, ("no payload found in TSO packet")); ++dma_seg; - tso.in_len = dma_seg->ds_len; - tso.dma_addr = dma_seg->ds_addr; - } else { - tso.in_len = dma_seg->ds_len - tso.header_len; - tso.dma_addr = dma_seg->ds_addr + tso.header_len; } + tso.in_len = dma_seg->ds_len + (tso.header_len - skipped); + tso.dma_addr = dma_seg->ds_addr + (tso.header_len - skipped); id = txq->added & txq->ptr_mask; if (__predict_false(tso_start_new_packet(txq, &tso, id))) return (-1); while (1) { id = (id + 1) & txq->ptr_mask; tso_fill_packet_with_fragment(txq, &tso); /* Move onto the next fragment? */ if (tso.in_len == 0) { --n_dma_seg; if (n_dma_seg == 0) break; ++dma_seg; tso.in_len = dma_seg->ds_len; tso.dma_addr = dma_seg->ds_addr; } /* End of packet? */ if (tso.packet_space == 0) { /* If the queue is now full due to tiny MSS, * or we can't create another header, discard * the remainder of the input mbuf but do not * roll back the work we have done. */ if (txq->n_pend_desc > SFXGE_TSO_MAX_DESC - (1 + SFXGE_TX_MAPPING_MAX_SEG)) break; next_id = (id + 1) & txq->ptr_mask; if (__predict_false(tso_start_new_packet(txq, &tso, next_id))) break; id = next_id; } } txq->tso_bursts++; return (id); } static void sfxge_tx_qunblock(struct sfxge_txq *txq) { struct sfxge_softc *sc; struct sfxge_evq *evq; sc = txq->sc; evq = sc->evq[txq->evq_index]; mtx_assert(&evq->lock, MA_OWNED); if (txq->init_state != SFXGE_TXQ_STARTED) return; mtx_lock(SFXGE_TXQ_LOCK(txq)); if (txq->blocked) { unsigned int level; level = txq->added - txq->completed; if (level <= SFXGE_TXQ_UNBLOCK_LEVEL(txq->entries)) txq->blocked = 0; } sfxge_tx_qdpl_service(txq); /* note: lock has been dropped */ } void sfxge_tx_qflush_done(struct sfxge_txq *txq) { txq->flush_state = SFXGE_FLUSH_DONE; } static void sfxge_tx_qstop(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; struct sfxge_evq *evq; unsigned int count; txq = sc->txq[index]; evq = sc->evq[txq->evq_index]; mtx_lock(SFXGE_TXQ_LOCK(txq)); KASSERT(txq->init_state == SFXGE_TXQ_STARTED, ("txq->init_state != SFXGE_TXQ_STARTED")); txq->init_state = SFXGE_TXQ_INITIALIZED; txq->flush_state = SFXGE_FLUSH_PENDING; /* Flush the transmit queue. */ efx_tx_qflush(txq->common); mtx_unlock(SFXGE_TXQ_LOCK(txq)); count = 0; do { /* Spin for 100ms. */ DELAY(100000); if (txq->flush_state != SFXGE_FLUSH_PENDING) break; } while (++count < 20); mtx_lock(&evq->lock); mtx_lock(SFXGE_TXQ_LOCK(txq)); KASSERT(txq->flush_state != SFXGE_FLUSH_FAILED, ("txq->flush_state == SFXGE_FLUSH_FAILED")); txq->flush_state = SFXGE_FLUSH_DONE; txq->blocked = 0; txq->pending = txq->added; - sfxge_tx_qcomplete(txq); + sfxge_tx_qcomplete(txq, evq); KASSERT(txq->completed == txq->added, ("txq->completed != txq->added")); sfxge_tx_qreap(txq); KASSERT(txq->reaped == txq->completed, ("txq->reaped != txq->completed")); txq->added = 0; txq->pending = 0; txq->completed = 0; txq->reaped = 0; /* Destroy the common code transmit queue. */ efx_tx_qdestroy(txq->common); txq->common = NULL; efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); mtx_unlock(&evq->lock); mtx_unlock(SFXGE_TXQ_LOCK(txq)); } static int sfxge_tx_qstart(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; efsys_mem_t *esmp; uint16_t flags; struct sfxge_evq *evq; int rc; txq = sc->txq[index]; esmp = &txq->mem; evq = sc->evq[txq->evq_index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); KASSERT(evq->init_state == SFXGE_EVQ_STARTED, ("evq->init_state != SFXGE_EVQ_STARTED")); /* Program the buffer table. */ if ((rc = efx_sram_buf_tbl_set(sc->enp, txq->buf_base_id, esmp, EFX_TXQ_NBUFS(sc->txq_entries))) != 0) return (rc); /* Determine the kind of queue we are creating. */ switch (txq->type) { case SFXGE_TXQ_NON_CKSUM: flags = 0; break; case SFXGE_TXQ_IP_CKSUM: flags = EFX_CKSUM_IPV4; break; case SFXGE_TXQ_IP_TCP_UDP_CKSUM: flags = EFX_CKSUM_IPV4 | EFX_CKSUM_TCPUDP; break; default: KASSERT(0, ("Impossible TX queue")); flags = 0; break; } /* Create the common code transmit queue. */ if ((rc = efx_tx_qcreate(sc->enp, index, txq->type, esmp, sc->txq_entries, txq->buf_base_id, flags, evq->common, &txq->common)) != 0) goto fail; mtx_lock(SFXGE_TXQ_LOCK(txq)); /* Enable the transmit queue. */ efx_tx_qenable(txq->common); txq->init_state = SFXGE_TXQ_STARTED; mtx_unlock(SFXGE_TXQ_LOCK(txq)); return (0); fail: efx_sram_buf_tbl_clear(sc->enp, txq->buf_base_id, EFX_TXQ_NBUFS(sc->txq_entries)); return (rc); } void sfxge_tx_stop(struct sfxge_softc *sc) { const efx_nic_cfg_t *encp; int index; index = SFXGE_TX_SCALE(sc); while (--index >= 0) sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); encp = efx_nic_cfg_get(sc->enp); sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); /* Tear down the transmit module */ efx_tx_fini(sc->enp); } int sfxge_tx_start(struct sfxge_softc *sc) { int index; int rc; /* Initialize the common code transmit module. */ if ((rc = efx_tx_init(sc->enp)) != 0) return (rc); if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_NON_CKSUM)) != 0) goto fail; if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_CKSUM)) != 0) goto fail2; for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { if ((rc = sfxge_tx_qstart(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index)) != 0) goto fail3; } return (0); fail3: while (--index >= 0) sfxge_tx_qstop(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); sfxge_tx_qstop(sc, SFXGE_TXQ_IP_CKSUM); fail2: sfxge_tx_qstop(sc, SFXGE_TXQ_NON_CKSUM); fail: efx_tx_fini(sc->enp); return (rc); } /** * Destroy a transmit queue. */ static void sfxge_tx_qfini(struct sfxge_softc *sc, unsigned int index) { struct sfxge_txq *txq; unsigned int nmaps; txq = sc->txq[index]; KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED, ("txq->init_state != SFXGE_TXQ_INITIALIZED")); if (txq->type == SFXGE_TXQ_IP_TCP_UDP_CKSUM) tso_fini(txq); /* Free the context arrays. */ free(txq->pend_desc, M_SFXGE); nmaps = sc->txq_entries; while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); /* Release DMA memory mapping. */ sfxge_dma_free(&txq->mem); sc->txq[index] = NULL; #ifdef SFXGE_HAVE_MQ mtx_destroy(&txq->lock); #endif free(txq, M_SFXGE); } static int sfxge_tx_qinit(struct sfxge_softc *sc, unsigned int txq_index, enum sfxge_txq_type type, unsigned int evq_index) { char name[16]; struct sysctl_oid *txq_node; struct sfxge_txq *txq; struct sfxge_evq *evq; #ifdef SFXGE_HAVE_MQ struct sfxge_tx_dpl *stdp; #endif efsys_mem_t *esmp; unsigned int nmaps; int rc; txq = malloc(sizeof(struct sfxge_txq), M_SFXGE, M_ZERO | M_WAITOK); txq->sc = sc; txq->entries = sc->txq_entries; txq->ptr_mask = txq->entries - 1; sc->txq[txq_index] = txq; esmp = &txq->mem; evq = sc->evq[evq_index]; /* Allocate and zero DMA space for the descriptor ring. */ if ((rc = sfxge_dma_alloc(sc, EFX_TXQ_SIZE(sc->txq_entries), esmp)) != 0) return (rc); (void)memset(esmp->esm_base, 0, EFX_TXQ_SIZE(sc->txq_entries)); /* Allocate buffer table entries. */ sfxge_sram_buf_tbl_alloc(sc, EFX_TXQ_NBUFS(sc->txq_entries), &txq->buf_base_id); /* Create a DMA tag for packet mappings. */ if (bus_dma_tag_create(sc->parent_dma_tag, 1, 0x1000, MIN(0x3FFFFFFFFFFFUL, BUS_SPACE_MAXADDR), BUS_SPACE_MAXADDR, NULL, NULL, 0x11000, SFXGE_TX_MAPPING_MAX_SEG, 0x1000, 0, NULL, NULL, &txq->packet_dma_tag) != 0) { device_printf(sc->dev, "Couldn't allocate txq DMA tag\n"); rc = ENOMEM; goto fail; } /* Allocate pending descriptor array for batching writes. */ txq->pend_desc = malloc(sizeof(efx_buffer_t) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); /* Allocate and initialise mbuf DMA mapping array. */ txq->stmp = malloc(sizeof(struct sfxge_tx_mapping) * sc->txq_entries, M_SFXGE, M_ZERO | M_WAITOK); for (nmaps = 0; nmaps < sc->txq_entries; nmaps++) { rc = bus_dmamap_create(txq->packet_dma_tag, 0, &txq->stmp[nmaps].map); if (rc != 0) goto fail2; } snprintf(name, sizeof(name), "%u", txq_index); txq_node = SYSCTL_ADD_NODE( device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(sc->txqs_node), OID_AUTO, name, CTLFLAG_RD, NULL, ""); if (txq_node == NULL) { rc = ENOMEM; goto fail_txq_node; } if (type == SFXGE_TXQ_IP_TCP_UDP_CKSUM && (rc = tso_init(txq)) != 0) goto fail3; #ifdef SFXGE_HAVE_MQ if (sfxge_tx_dpl_get_max <= 0) { log(LOG_ERR, "%s=%d must be greater than 0", SFXGE_PARAM_TX_DPL_GET_MAX, sfxge_tx_dpl_get_max); rc = EINVAL; goto fail_tx_dpl_get_max; } + if (sfxge_tx_dpl_get_non_tcp_max <= 0) { + log(LOG_ERR, "%s=%d must be greater than 0", + SFXGE_PARAM_TX_DPL_GET_NON_TCP_MAX, + sfxge_tx_dpl_get_non_tcp_max); + rc = EINVAL; + goto fail_tx_dpl_get_max; + } if (sfxge_tx_dpl_put_max < 0) { log(LOG_ERR, "%s=%d must be greater or equal to 0", SFXGE_PARAM_TX_DPL_PUT_MAX, sfxge_tx_dpl_put_max); rc = EINVAL; goto fail_tx_dpl_put_max; } /* Initialize the deferred packet list. */ stdp = &txq->dpl; stdp->std_put_max = sfxge_tx_dpl_put_max; stdp->std_get_max = sfxge_tx_dpl_get_max; + stdp->std_get_non_tcp_max = sfxge_tx_dpl_get_non_tcp_max; stdp->std_getp = &stdp->std_get; mtx_init(&txq->lock, "txq", NULL, MTX_DEF); SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(txq_node), OID_AUTO, "dpl_get_count", CTLFLAG_RD | CTLFLAG_STATS, &stdp->std_get_count, 0, ""); + SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), + SYSCTL_CHILDREN(txq_node), OID_AUTO, + "dpl_get_non_tcp_count", CTLFLAG_RD | CTLFLAG_STATS, + &stdp->std_get_non_tcp_count, 0, ""); + SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev), + SYSCTL_CHILDREN(txq_node), OID_AUTO, + "dpl_get_hiwat", CTLFLAG_RD | CTLFLAG_STATS, + &stdp->std_get_hiwat, 0, ""); #endif txq->type = type; txq->evq_index = evq_index; txq->txq_index = txq_index; txq->init_state = SFXGE_TXQ_INITIALIZED; return (0); fail_tx_dpl_put_max: fail_tx_dpl_get_max: fail3: fail_txq_node: free(txq->pend_desc, M_SFXGE); fail2: while (nmaps-- != 0) bus_dmamap_destroy(txq->packet_dma_tag, txq->stmp[nmaps].map); free(txq->stmp, M_SFXGE); bus_dma_tag_destroy(txq->packet_dma_tag); fail: sfxge_dma_free(esmp); return (rc); } static const struct { const char *name; size_t offset; } sfxge_tx_stats[] = { #define SFXGE_TX_STAT(name, member) \ { #name, offsetof(struct sfxge_txq, member) } SFXGE_TX_STAT(tso_bursts, tso_bursts), SFXGE_TX_STAT(tso_packets, tso_packets), SFXGE_TX_STAT(tso_long_headers, tso_long_headers), SFXGE_TX_STAT(tx_collapses, collapses), SFXGE_TX_STAT(tx_drops, drops), - SFXGE_TX_STAT(tx_early_drops, early_drops), + SFXGE_TX_STAT(tx_get_overflow, get_overflow), + SFXGE_TX_STAT(tx_get_non_tcp_overflow, get_non_tcp_overflow), + SFXGE_TX_STAT(tx_put_overflow, put_overflow), + SFXGE_TX_STAT(tx_netdown_drops, netdown_drops), }; static int sfxge_tx_stat_handler(SYSCTL_HANDLER_ARGS) { struct sfxge_softc *sc = arg1; unsigned int id = arg2; unsigned long sum; unsigned int index; /* Sum across all TX queues */ sum = 0; for (index = 0; index < SFXGE_TXQ_IP_TCP_UDP_CKSUM + SFXGE_TX_SCALE(sc); index++) sum += *(unsigned long *)((caddr_t)sc->txq[index] + sfxge_tx_stats[id].offset); return (SYSCTL_OUT(req, &sum, sizeof(sum))); } static void sfxge_tx_stat_init(struct sfxge_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev); struct sysctl_oid_list *stat_list; unsigned int id; stat_list = SYSCTL_CHILDREN(sc->stats_node); for (id = 0; id < sizeof(sfxge_tx_stats) / sizeof(sfxge_tx_stats[0]); id++) { SYSCTL_ADD_PROC( ctx, stat_list, OID_AUTO, sfxge_tx_stats[id].name, CTLTYPE_ULONG|CTLFLAG_RD, sc, id, sfxge_tx_stat_handler, "LU", ""); } } void sfxge_tx_fini(struct sfxge_softc *sc) { int index; index = SFXGE_TX_SCALE(sc); while (--index >= 0) sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); } int sfxge_tx_init(struct sfxge_softc *sc) { struct sfxge_intr *intr; int index; int rc; intr = &sc->intr; KASSERT(intr->state == SFXGE_INTR_INITIALIZED, ("intr->state != SFXGE_INTR_INITIALIZED")); sc->txqs_node = SYSCTL_ADD_NODE( device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "txq", CTLFLAG_RD, NULL, "Tx queues"); if (sc->txqs_node == NULL) { rc = ENOMEM; goto fail_txq_node; } /* Initialize the transmit queues */ if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_NON_CKSUM, SFXGE_TXQ_NON_CKSUM, 0)) != 0) goto fail; if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_CKSUM, SFXGE_TXQ_IP_CKSUM, 0)) != 0) goto fail2; for (index = 0; index < SFXGE_TX_SCALE(sc); index++) { if ((rc = sfxge_tx_qinit(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index, SFXGE_TXQ_IP_TCP_UDP_CKSUM, index)) != 0) goto fail3; } sfxge_tx_stat_init(sc); return (0); fail3: sfxge_tx_qfini(sc, SFXGE_TXQ_IP_CKSUM); while (--index >= 0) sfxge_tx_qfini(sc, SFXGE_TXQ_IP_TCP_UDP_CKSUM + index); fail2: sfxge_tx_qfini(sc, SFXGE_TXQ_NON_CKSUM); fail: fail_txq_node: return (rc); } Index: projects/clang360-import/sys/dev/sfxge/sfxge_tx.h =================================================================== --- projects/clang360-import/sys/dev/sfxge/sfxge_tx.h (revision 277895) +++ projects/clang360-import/sys/dev/sfxge/sfxge_tx.h (revision 277896) @@ -1,193 +1,206 @@ /*- * Copyright (c) 2010-2011 Solarflare Communications, Inc. * All rights reserved. * * This software was developed in part by Philip Paeps under contract for * Solarflare Communications, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SFXGE_TX_H #define _SFXGE_TX_H #include #include #include /* Maximum number of DMA segments needed to map an mbuf chain. With * TSO, the mbuf length may be just over 64K, divided into 2K mbuf * clusters. (The chain could be longer than this initially, but can * be shortened with m_collapse().) */ #define SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1) /* Maximum number of DMA segments needed to map an output packet. It * could overlap all mbufs in the chain and also require an extra * segment for a TSO header. */ #define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1) /* * Buffer mapping flags. * * Buffers and DMA mappings must be freed when the last descriptor * referring to them is completed. Set the TX_BUF_UNMAP and * TX_BUF_MBUF flags on the last descriptor generated for an mbuf * chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to * a heap buffer. */ enum sfxge_tx_buf_flags { TX_BUF_UNMAP = 1, TX_BUF_MBUF = 2, }; /* * Buffer mapping information for descriptors in flight. */ struct sfxge_tx_mapping { union { struct mbuf *mbuf; caddr_t heap_buf; } u; bus_dmamap_t map; enum sfxge_tx_buf_flags flags; }; -#define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT 1024 -#define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 64 +#define SFXGE_TX_DPL_GET_PKT_LIMIT_DEFAULT (64 * 1024) +#define SFXGE_TX_DPL_GET_NON_TCP_PKT_LIMIT_DEFAULT 1024 +#define SFXGE_TX_DPL_PUT_PKT_LIMIT_DEFAULT 64 /* * Deferred packet list. */ struct sfxge_tx_dpl { - unsigned int std_get_max; /* Maximum number of packets + unsigned int std_get_max; /* Maximum number of packets * in get list */ - unsigned int std_put_max; /* Maximum number of packets + unsigned int std_get_non_tcp_max; /* Maximum number + * of non-TCP packets + * in get list */ + unsigned int std_put_max; /* Maximum number of packets * in put list */ - uintptr_t std_put; /* Head of put list. */ - struct mbuf *std_get; /* Head of get list. */ - struct mbuf **std_getp; /* Tail of get list. */ - unsigned int std_get_count; /* Packets in get list. */ + uintptr_t std_put; /* Head of put list. */ + struct mbuf *std_get; /* Head of get list. */ + struct mbuf **std_getp; /* Tail of get list. */ + unsigned int std_get_count; /* Packets in get list. */ + unsigned int std_get_non_tcp_count; /* Non-TCP packets + * in get list */ + unsigned int std_get_hiwat; /* Packets in get list + * high watermark */ }; #define SFXGE_TX_BUFFER_SIZE 0x400 #define SFXGE_TX_HEADER_SIZE 0x100 #define SFXGE_TX_COPY_THRESHOLD 0x200 enum sfxge_txq_state { SFXGE_TXQ_UNINITIALIZED = 0, SFXGE_TXQ_INITIALIZED, SFXGE_TXQ_STARTED }; enum sfxge_txq_type { SFXGE_TXQ_NON_CKSUM = 0, SFXGE_TXQ_IP_CKSUM, SFXGE_TXQ_IP_TCP_UDP_CKSUM, SFXGE_TXQ_NTYPES }; #define SFXGE_TXQ_UNBLOCK_LEVEL(_entries) (EFX_TXQ_LIMIT(_entries) / 4) #define SFXGE_TX_BATCH 64 #ifdef SFXGE_HAVE_MQ #define SFXGE_TXQ_LOCK(txq) (&(txq)->lock) #define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc) #else #define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock) #define SFXGE_TX_SCALE(sc) 1 #endif struct sfxge_txq { /* The following fields should be written very rarely */ struct sfxge_softc *sc; enum sfxge_txq_state init_state; enum sfxge_flush_state flush_state; enum sfxge_txq_type type; unsigned int txq_index; unsigned int evq_index; efsys_mem_t mem; unsigned int buf_base_id; unsigned int entries; unsigned int ptr_mask; struct sfxge_tx_mapping *stmp; /* Packets in flight. */ bus_dma_tag_t packet_dma_tag; efx_buffer_t *pend_desc; efx_txq_t *common; - struct sfxge_txq *next; efsys_mem_t *tsoh_buffer; /* This field changes more often and is read regularly on both * the initiation and completion paths */ int blocked __aligned(CACHE_LINE_SIZE); /* The following fields change more often, and are used mostly * on the initiation path */ #ifdef SFXGE_HAVE_MQ struct mtx lock __aligned(CACHE_LINE_SIZE); struct sfxge_tx_dpl dpl; /* Deferred packet list. */ unsigned int n_pend_desc; #else unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE); #endif unsigned int added; unsigned int reaped; /* Statistics */ unsigned long tso_bursts; unsigned long tso_packets; unsigned long tso_long_headers; unsigned long collapses; unsigned long drops; - unsigned long early_drops; + unsigned long get_overflow; + unsigned long get_non_tcp_overflow; + unsigned long put_overflow; + unsigned long netdown_drops; /* The following fields change more often, and are used mostly * on the completion path */ unsigned int pending __aligned(CACHE_LINE_SIZE); unsigned int completed; + struct sfxge_txq *next; }; +struct sfxge_evq; + extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *); extern int sfxge_tx_init(struct sfxge_softc *sc); extern void sfxge_tx_fini(struct sfxge_softc *sc); extern int sfxge_tx_start(struct sfxge_softc *sc); extern void sfxge_tx_stop(struct sfxge_softc *sc); -extern void sfxge_tx_qcomplete(struct sfxge_txq *txq); +extern void sfxge_tx_qcomplete(struct sfxge_txq *txq, struct sfxge_evq *evq); extern void sfxge_tx_qflush_done(struct sfxge_txq *txq); #ifdef SFXGE_HAVE_MQ extern void sfxge_if_qflush(struct ifnet *ifp); extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m); #else extern void sfxge_if_start(struct ifnet *ifp); #endif #endif Index: projects/clang360-import/sys/sys/cdefs.h =================================================================== --- projects/clang360-import/sys/sys/cdefs.h (revision 277895) +++ projects/clang360-import/sys/sys/cdefs.h (revision 277896) @@ -1,806 +1,806 @@ /*- * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Berkeley Software Design, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)cdefs.h 8.8 (Berkeley) 1/9/95 * $FreeBSD$ */ #ifndef _SYS_CDEFS_H_ #define _SYS_CDEFS_H_ /* * Testing against Clang-specific extensions. */ #ifndef __has_extension #define __has_extension __has_feature #endif #ifndef __has_feature #define __has_feature(x) 0 #endif #ifndef __has_include #define __has_include(x) 0 #endif #ifndef __has_builtin #define __has_builtin(x) 0 #endif #if defined(__cplusplus) #define __BEGIN_DECLS extern "C" { #define __END_DECLS } #else #define __BEGIN_DECLS #define __END_DECLS #endif /* * This code has been put in place to help reduce the addition of * compiler specific defines in FreeBSD code. It helps to aid in * having a compiler-agnostic source tree. */ #if defined(__GNUC__) || defined(__INTEL_COMPILER) #if __GNUC__ >= 3 || defined(__INTEL_COMPILER) #define __GNUCLIKE_ASM 3 #define __GNUCLIKE_MATH_BUILTIN_CONSTANTS #else #define __GNUCLIKE_ASM 2 #endif #define __GNUCLIKE___TYPEOF 1 #define __GNUCLIKE___OFFSETOF 1 #define __GNUCLIKE___SECTION 1 #ifndef __INTEL_COMPILER # define __GNUCLIKE_CTOR_SECTION_HANDLING 1 #endif #define __GNUCLIKE_BUILTIN_CONSTANT_P 1 # if defined(__INTEL_COMPILER) && defined(__cplusplus) \ && __INTEL_COMPILER < 800 # undef __GNUCLIKE_BUILTIN_CONSTANT_P # endif #if (__GNUC_MINOR__ > 95 || __GNUC__ >= 3) && !defined(__INTEL_COMPILER) # define __GNUCLIKE_BUILTIN_VARARGS 1 # define __GNUCLIKE_BUILTIN_STDARG 1 # define __GNUCLIKE_BUILTIN_VAALIST 1 #endif #if defined(__GNUC__) # define __GNUC_VA_LIST_COMPATIBILITY 1 #endif /* * Compiler memory barriers, specific to gcc and clang. */ #if defined(__GNUC__) #define __compiler_membar() __asm __volatile(" " : : : "memory") #endif #ifndef __INTEL_COMPILER # define __GNUCLIKE_BUILTIN_NEXT_ARG 1 # define __GNUCLIKE_MATH_BUILTIN_RELOPS #endif #define __GNUCLIKE_BUILTIN_MEMCPY 1 /* XXX: if __GNUC__ >= 2: not tested everywhere originally, where replaced */ #define __CC_SUPPORTS_INLINE 1 #define __CC_SUPPORTS___INLINE 1 #define __CC_SUPPORTS___INLINE__ 1 #define __CC_SUPPORTS___FUNC__ 1 #define __CC_SUPPORTS_WARNING 1 #define __CC_SUPPORTS_VARADIC_XXX 1 /* see varargs.h */ #define __CC_SUPPORTS_DYNAMIC_ARRAY_INIT 1 #endif /* __GNUC__ || __INTEL_COMPILER */ /* * Macro to test if we're using a specific version of gcc or later. */ #if defined(__GNUC__) && !defined(__INTEL_COMPILER) #define __GNUC_PREREQ__(ma, mi) \ (__GNUC__ > (ma) || __GNUC__ == (ma) && __GNUC_MINOR__ >= (mi)) #else #define __GNUC_PREREQ__(ma, mi) 0 #endif /* * The __CONCAT macro is used to concatenate parts of symbol names, e.g. * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo. * The __CONCAT macro is a bit tricky to use if it must work in non-ANSI * mode -- there must be no spaces between its arguments, and for nested * __CONCAT's, all the __CONCAT's must be at the left. __CONCAT can also * concatenate double-quoted strings produced by the __STRING macro, but * this only works with ANSI C. * * __XSTRING is like __STRING, but it expands any macros in its argument * first. It is only available with ANSI C. */ #if defined(__STDC__) || defined(__cplusplus) #define __P(protos) protos /* full-blown ANSI C */ #define __CONCAT1(x,y) x ## y #define __CONCAT(x,y) __CONCAT1(x,y) #define __STRING(x) #x /* stringify without expanding x */ #define __XSTRING(x) __STRING(x) /* expand x, then stringify */ #define __const const /* define reserved names to standard */ #define __signed signed #define __volatile volatile #if defined(__cplusplus) #define __inline inline /* convert to C++ keyword */ #else #if !(defined(__CC_SUPPORTS___INLINE)) #define __inline /* delete GCC keyword */ #endif /* ! __CC_SUPPORTS___INLINE */ #endif /* !__cplusplus */ #else /* !(__STDC__ || __cplusplus) */ #define __P(protos) () /* traditional C preprocessor */ #define __CONCAT(x,y) x/**/y #define __STRING(x) "x" #if !defined(__CC_SUPPORTS___INLINE) #define __const /* delete pseudo-ANSI C keywords */ #define __inline #define __signed #define __volatile /* * In non-ANSI C environments, new programs will want ANSI-only C keywords * deleted from the program and old programs will want them left alone. * When using a compiler other than gcc, programs using the ANSI C keywords * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS. * When using "gcc -traditional", we assume that this is the intent; if * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone. */ #ifndef NO_ANSI_KEYWORDS #define const /* delete ANSI C keywords */ #define inline #define signed #define volatile #endif /* !NO_ANSI_KEYWORDS */ #endif /* !__CC_SUPPORTS___INLINE */ #endif /* !(__STDC__ || __cplusplus) */ /* * Compiler-dependent macros to help declare dead (non-returning) and * pure (no side effects) functions, and unused variables. They are * null except for versions of gcc that are known to support the features * properly (old versions of gcc-2 supported the dead and pure features * in a different (wrong) way). If we do not provide an implementation * for a given compiler, let the compile fail if it is told to use * a feature that we cannot live without. */ #ifdef lint #define __dead2 #define __pure2 #define __unused #define __packed #define __aligned(x) #define __section(x) #define __weak #else #define __weak __attribute__((__weak__)) #if !__GNUC_PREREQ__(2, 5) && !defined(__INTEL_COMPILER) #define __dead2 #define __pure2 #define __unused #endif #if __GNUC__ == 2 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 7 && !defined(__INTEL_COMPILER) #define __dead2 __attribute__((__noreturn__)) #define __pure2 __attribute__((__const__)) #define __unused /* XXX Find out what to do for __packed, __aligned and __section */ #endif #if __GNUC_PREREQ__(2, 7) #define __dead2 __attribute__((__noreturn__)) #define __pure2 __attribute__((__const__)) #define __unused __attribute__((__unused__)) #define __used __attribute__((__used__)) #define __packed __attribute__((__packed__)) #define __aligned(x) __attribute__((__aligned__(x))) #define __section(x) __attribute__((__section__(x))) #endif #if defined(__INTEL_COMPILER) #define __dead2 __attribute__((__noreturn__)) #define __pure2 __attribute__((__const__)) #define __unused __attribute__((__unused__)) #define __used __attribute__((__used__)) #define __packed __attribute__((__packed__)) #define __aligned(x) __attribute__((__aligned__(x))) #define __section(x) __attribute__((__section__(x))) #endif #endif #if !__GNUC_PREREQ__(2, 95) #define __alignof(x) __offsetof(struct { char __a; x __b; }, __b) #endif /* * Keywords added in C11. */ -#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L +#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L || defined(lint) #if !__has_extension(c_alignas) #if (defined(__cplusplus) && __cplusplus >= 201103L) || \ __has_extension(cxx_alignas) #define _Alignas(x) alignas(x) #else /* XXX: Only emulates _Alignas(constant-expression); not _Alignas(type-name). */ #define _Alignas(x) __aligned(x) #endif #endif #if defined(__cplusplus) && __cplusplus >= 201103L #define _Alignof(x) alignof(x) #else #define _Alignof(x) __alignof(x) #endif #if !__has_extension(c_atomic) && !__has_extension(cxx_atomic) /* * No native support for _Atomic(). Place object in structure to prevent * most forms of direct non-atomic access. */ #define _Atomic(T) struct { T volatile __val; } #endif #if defined(__cplusplus) && __cplusplus >= 201103L #define _Noreturn [[noreturn]] #else #define _Noreturn __dead2 #endif #if !__has_extension(c_static_assert) #if (defined(__cplusplus) && __cplusplus >= 201103L) || \ __has_extension(cxx_static_assert) #define _Static_assert(x, y) static_assert(x, y) #elif __GNUC_PREREQ__(4,6) /* Nothing, gcc 4.6 and higher has _Static_assert built-in */ #elif defined(__COUNTER__) #define _Static_assert(x, y) __Static_assert(x, __COUNTER__) #define __Static_assert(x, y) ___Static_assert(x, y) #define ___Static_assert(x, y) typedef char __assert_ ## y[(x) ? 1 : -1] #else #define _Static_assert(x, y) struct __hack #endif #endif #if !__has_extension(c_thread_local) /* * XXX: Some compilers (Clang 3.3, GCC 4.7) falsely announce C++11 mode * without actually supporting the thread_local keyword. Don't check for * the presence of C++11 when defining _Thread_local. */ #if /* (defined(__cplusplus) && __cplusplus >= 201103L) || */ \ __has_extension(cxx_thread_local) #define _Thread_local thread_local #else #define _Thread_local __thread #endif #endif #endif /* __STDC_VERSION__ || __STDC_VERSION__ < 201112L */ /* * Emulation of C11 _Generic(). Unlike the previously defined C11 * keywords, it is not possible to implement this using exactly the same * syntax. Therefore implement something similar under the name * __generic(). Unlike _Generic(), this macro can only distinguish * between a single type, so it requires nested invocations to * distinguish multiple cases. */ #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \ __has_extension(c_generic_selections) #define __generic(expr, t, yes, no) \ _Generic(expr, t: yes, default: no) #elif __GNUC_PREREQ__(3, 1) && !defined(__cplusplus) #define __generic(expr, t, yes, no) \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof(expr), t), yes, no) #endif #if __GNUC_PREREQ__(2, 96) #define __malloc_like __attribute__((__malloc__)) #define __pure __attribute__((__pure__)) #else #define __malloc_like #define __pure #endif #if __GNUC_PREREQ__(3, 1) || (defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 800) #define __always_inline __attribute__((__always_inline__)) #else #define __always_inline #endif #if __GNUC_PREREQ__(3, 1) #define __noinline __attribute__ ((__noinline__)) #else #define __noinline #endif #if __GNUC_PREREQ__(3, 3) #define __nonnull(x) __attribute__((__nonnull__(x))) #else #define __nonnull(x) #endif #if __GNUC_PREREQ__(3, 4) #define __fastcall __attribute__((__fastcall__)) #else #define __fastcall #endif #if __GNUC_PREREQ__(4, 1) #define __returns_twice __attribute__((__returns_twice__)) #else #define __returns_twice #endif /* XXX: should use `#if __STDC_VERSION__ < 199901'. */ #if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER) #define __func__ NULL #endif #if (defined(__INTEL_COMPILER) || (defined(__GNUC__) && __GNUC__ >= 2)) && !defined(__STRICT_ANSI__) || __STDC_VERSION__ >= 199901 #define __LONG_LONG_SUPPORTED #endif /* C++11 exposes a load of C99 stuff */ #if defined(__cplusplus) && __cplusplus >= 201103L #define __LONG_LONG_SUPPORTED #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #endif /* * GCC 2.95 provides `__restrict' as an extension to C90 to support the * C99-specific `restrict' type qualifier. We happen to use `__restrict' as * a way to define the `restrict' type qualifier without disturbing older * software that is unaware of C99 keywords. */ #if !(__GNUC__ == 2 && __GNUC_MINOR__ == 95) #if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901 || defined(lint) #define __restrict #else #define __restrict restrict #endif #endif /* * GNU C version 2.96 adds explicit branch prediction so that * the CPU back-end can hint the processor and also so that * code blocks can be reordered such that the predicted path * sees a more linear flow, thus improving cache behavior, etc. * * The following two macros provide us with a way to utilize this * compiler feature. Use __predict_true() if you expect the expression * to evaluate to true, and __predict_false() if you expect the * expression to evaluate to false. * * A few notes about usage: * * * Generally, __predict_false() error condition checks (unless * you have some _strong_ reason to do otherwise, in which case * document it), and/or __predict_true() `no-error' condition * checks, assuming you want to optimize for the no-error case. * * * Other than that, if you don't know the likelihood of a test * succeeding from empirical or other `hard' evidence, don't * make predictions. * * * These are meant to be used in places that are run `a lot'. * It is wasteful to make predictions in code that is run * seldomly (e.g. at subsystem initialization time) as the * basic block reordering that this affects can often generate * larger code. */ #if __GNUC_PREREQ__(2, 96) #define __predict_true(exp) __builtin_expect((exp), 1) #define __predict_false(exp) __builtin_expect((exp), 0) #else #define __predict_true(exp) (exp) #define __predict_false(exp) (exp) #endif #if __GNUC_PREREQ__(4, 2) #define __hidden __attribute__((__visibility__("hidden"))) #define __exported __attribute__((__visibility__("default"))) #else #define __hidden #define __exported #endif /* * We define this here since , , and * require it. */ #if __GNUC_PREREQ__(4, 1) #define __offsetof(type, field) __builtin_offsetof(type, field) #else #ifndef __cplusplus #define __offsetof(type, field) \ ((__size_t)(__uintptr_t)((const volatile void *)&((type *)0)->field)) #else #define __offsetof(type, field) \ (__offsetof__ (reinterpret_cast <__size_t> \ (&reinterpret_cast \ (static_cast (0)->field)))) #endif #endif #define __rangeof(type, start, end) \ (__offsetof(type, end) - __offsetof(type, start)) /* * Given the pointer x to the member m of the struct s, return * a pointer to the containing structure. When using GCC, we first * assign pointer x to a local variable, to check that its type is * compatible with member m. */ #if __GNUC_PREREQ__(3, 1) #define __containerof(x, s, m) ({ \ const volatile __typeof(((s *)0)->m) *__x = (x); \ __DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\ }) #else #define __containerof(x, s, m) \ __DEQUALIFY(s *, (const volatile char *)(x) - __offsetof(s, m)) #endif /* * Compiler-dependent macros to declare that functions take printf-like * or scanf-like arguments. They are null except for versions of gcc * that are known to support the features properly (old versions of gcc-2 * didn't permit keeping the keywords out of the application namespace). */ #if !__GNUC_PREREQ__(2, 7) && !defined(__INTEL_COMPILER) #define __printflike(fmtarg, firstvararg) #define __scanflike(fmtarg, firstvararg) #define __format_arg(fmtarg) #define __strfmonlike(fmtarg, firstvararg) #define __strftimelike(fmtarg, firstvararg) #else #define __printflike(fmtarg, firstvararg) \ __attribute__((__format__ (__printf__, fmtarg, firstvararg))) #define __scanflike(fmtarg, firstvararg) \ __attribute__((__format__ (__scanf__, fmtarg, firstvararg))) #define __format_arg(fmtarg) __attribute__((__format_arg__ (fmtarg))) #define __strfmonlike(fmtarg, firstvararg) \ __attribute__((__format__ (__strfmon__, fmtarg, firstvararg))) #define __strftimelike(fmtarg, firstvararg) \ __attribute__((__format__ (__strftime__, fmtarg, firstvararg))) #endif /* Compiler-dependent macros that rely on FreeBSD-specific extensions. */ #if defined(__FreeBSD_cc_version) && __FreeBSD_cc_version >= 300001 && \ defined(__GNUC__) && !defined(__INTEL_COMPILER) #define __printf0like(fmtarg, firstvararg) \ __attribute__((__format__ (__printf0__, fmtarg, firstvararg))) #else #define __printf0like(fmtarg, firstvararg) #endif #if defined(__GNUC__) || defined(__INTEL_COMPILER) #ifndef __INTEL_COMPILER #define __strong_reference(sym,aliassym) \ extern __typeof (sym) aliassym __attribute__ ((__alias__ (#sym))) #endif #ifdef __STDC__ #define __weak_reference(sym,alias) \ __asm__(".weak " #alias); \ __asm__(".equ " #alias ", " #sym) #define __warn_references(sym,msg) \ __asm__(".section .gnu.warning." #sym); \ __asm__(".asciz \"" msg "\""); \ __asm__(".previous") #define __sym_compat(sym,impl,verid) \ __asm__(".symver " #impl ", " #sym "@" #verid) #define __sym_default(sym,impl,verid) \ __asm__(".symver " #impl ", " #sym "@@" #verid) #else #define __weak_reference(sym,alias) \ __asm__(".weak alias"); \ __asm__(".equ alias, sym") #define __warn_references(sym,msg) \ __asm__(".section .gnu.warning.sym"); \ __asm__(".asciz \"msg\""); \ __asm__(".previous") #define __sym_compat(sym,impl,verid) \ __asm__(".symver impl, sym@verid") #define __sym_default(impl,sym,verid) \ __asm__(".symver impl, sym@@verid") #endif /* __STDC__ */ #endif /* __GNUC__ || __INTEL_COMPILER */ #define __GLOBL1(sym) __asm__(".globl " #sym) #define __GLOBL(sym) __GLOBL1(sym) #if defined(__GNUC__) || defined(__INTEL_COMPILER) #define __IDSTRING(name,string) __asm__(".ident\t\"" string "\"") #else /* * The following definition might not work well if used in header files, * but it should be better than nothing. If you want a "do nothing" * version, then it should generate some harmless declaration, such as: * #define __IDSTRING(name,string) struct __hack */ #define __IDSTRING(name,string) static const char name[] __unused = string #endif /* * Embed the rcs id of a source file in the resulting library. Note that in * more recent ELF binutils, we use .ident allowing the ID to be stripped. * Usage: * __FBSDID("$FreeBSD$"); */ #ifndef __FBSDID #if !defined(lint) && !defined(STRIP_FBSDID) #define __FBSDID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s) #else #define __FBSDID(s) struct __hack #endif #endif #ifndef __RCSID #ifndef NO__RCSID #define __RCSID(s) __IDSTRING(__CONCAT(__rcsid_,__LINE__),s) #else #define __RCSID(s) struct __hack #endif #endif #ifndef __RCSID_SOURCE #ifndef NO__RCSID_SOURCE #define __RCSID_SOURCE(s) __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s) #else #define __RCSID_SOURCE(s) struct __hack #endif #endif #ifndef __SCCSID #ifndef NO__SCCSID #define __SCCSID(s) __IDSTRING(__CONCAT(__sccsid_,__LINE__),s) #else #define __SCCSID(s) struct __hack #endif #endif #ifndef __COPYRIGHT #ifndef NO__COPYRIGHT #define __COPYRIGHT(s) __IDSTRING(__CONCAT(__copyright_,__LINE__),s) #else #define __COPYRIGHT(s) struct __hack #endif #endif #ifndef __DECONST #define __DECONST(type, var) ((type)(__uintptr_t)(const void *)(var)) #endif #ifndef __DEVOLATILE #define __DEVOLATILE(type, var) ((type)(__uintptr_t)(volatile void *)(var)) #endif #ifndef __DEQUALIFY #define __DEQUALIFY(type, var) ((type)(__uintptr_t)(const volatile void *)(var)) #endif /*- * The following definitions are an extension of the behavior originally * implemented in , but with a different level of granularity. * POSIX.1 requires that the macros we test be defined before any standard * header file is included. * * Here's a quick run-down of the versions: * defined(_POSIX_SOURCE) 1003.1-1988 * _POSIX_C_SOURCE == 1 1003.1-1990 * _POSIX_C_SOURCE == 2 1003.2-1992 C Language Binding Option * _POSIX_C_SOURCE == 199309 1003.1b-1993 * _POSIX_C_SOURCE == 199506 1003.1c-1995, 1003.1i-1995, * and the omnibus ISO/IEC 9945-1: 1996 * _POSIX_C_SOURCE == 200112 1003.1-2001 * _POSIX_C_SOURCE == 200809 1003.1-2008 * * In addition, the X/Open Portability Guide, which is now the Single UNIX * Specification, defines a feature-test macro which indicates the version of * that specification, and which subsumes _POSIX_C_SOURCE. * * Our macros begin with two underscores to avoid namespace screwage. */ /* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1. */ #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1 #undef _POSIX_C_SOURCE /* Probably illegal, but beyond caring now. */ #define _POSIX_C_SOURCE 199009 #endif /* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2. */ #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2 #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 199209 #endif /* Deal with various X/Open Portability Guides and Single UNIX Spec. */ #ifdef _XOPEN_SOURCE #if _XOPEN_SOURCE - 0 >= 700 #define __XSI_VISIBLE 700 #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 200809 #elif _XOPEN_SOURCE - 0 >= 600 #define __XSI_VISIBLE 600 #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 200112 #elif _XOPEN_SOURCE - 0 >= 500 #define __XSI_VISIBLE 500 #undef _POSIX_C_SOURCE #define _POSIX_C_SOURCE 199506 #endif #endif /* * Deal with all versions of POSIX. The ordering relative to the tests above is * important. */ #if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE) #define _POSIX_C_SOURCE 198808 #endif #ifdef _POSIX_C_SOURCE #if _POSIX_C_SOURCE >= 200809 #define __POSIX_VISIBLE 200809 #define __ISO_C_VISIBLE 1999 #elif _POSIX_C_SOURCE >= 200112 #define __POSIX_VISIBLE 200112 #define __ISO_C_VISIBLE 1999 #elif _POSIX_C_SOURCE >= 199506 #define __POSIX_VISIBLE 199506 #define __ISO_C_VISIBLE 1990 #elif _POSIX_C_SOURCE >= 199309 #define __POSIX_VISIBLE 199309 #define __ISO_C_VISIBLE 1990 #elif _POSIX_C_SOURCE >= 199209 #define __POSIX_VISIBLE 199209 #define __ISO_C_VISIBLE 1990 #elif _POSIX_C_SOURCE >= 199009 #define __POSIX_VISIBLE 199009 #define __ISO_C_VISIBLE 1990 #else #define __POSIX_VISIBLE 198808 #define __ISO_C_VISIBLE 0 #endif /* _POSIX_C_SOURCE */ #else /*- * Deal with _ANSI_SOURCE: * If it is defined, and no other compilation environment is explicitly * requested, then define our internal feature-test macros to zero. This * makes no difference to the preprocessor (undefined symbols in preprocessing * expressions are defined to have value zero), but makes it more convenient for * a test program to print out the values. * * If a program mistakenly defines _ANSI_SOURCE and some other macro such as * _POSIX_C_SOURCE, we will assume that it wants the broader compilation * environment (and in fact we will never get here). */ #if defined(_ANSI_SOURCE) /* Hide almost everything. */ #define __POSIX_VISIBLE 0 #define __XSI_VISIBLE 0 #define __BSD_VISIBLE 0 #define __ISO_C_VISIBLE 1990 #elif defined(_C99_SOURCE) /* Localism to specify strict C99 env. */ #define __POSIX_VISIBLE 0 #define __XSI_VISIBLE 0 #define __BSD_VISIBLE 0 #define __ISO_C_VISIBLE 1999 #elif defined(_C11_SOURCE) /* Localism to specify strict C11 env. */ #define __POSIX_VISIBLE 0 #define __XSI_VISIBLE 0 #define __BSD_VISIBLE 0 #define __ISO_C_VISIBLE 2011 #else /* Default environment: show everything. */ #define __POSIX_VISIBLE 200809 #define __XSI_VISIBLE 700 #define __BSD_VISIBLE 1 #define __ISO_C_VISIBLE 2011 #endif #endif #if defined(__mips) || defined(__powerpc64__) #define __NO_TLS 1 #endif /* * Lock annotations. * * Clang provides support for doing basic thread-safety tests at * compile-time, by marking which locks will/should be held when * entering/leaving a functions. * * Furthermore, it is also possible to annotate variables and structure * members to enforce that they are only accessed when certain locks are * held. */ #if __has_extension(c_thread_safety_attributes) #define __lock_annotate(x) __attribute__((x)) #else #define __lock_annotate(x) #endif /* Structure implements a lock. */ #define __lockable __lock_annotate(lockable) /* Function acquires an exclusive or shared lock. */ #define __locks_exclusive(...) \ __lock_annotate(exclusive_lock_function(__VA_ARGS__)) #define __locks_shared(...) \ __lock_annotate(shared_lock_function(__VA_ARGS__)) /* Function attempts to acquire an exclusive or shared lock. */ #define __trylocks_exclusive(...) \ __lock_annotate(exclusive_trylock_function(__VA_ARGS__)) #define __trylocks_shared(...) \ __lock_annotate(shared_trylock_function(__VA_ARGS__)) /* Function releases a lock. */ #define __unlocks(...) __lock_annotate(unlock_function(__VA_ARGS__)) /* Function asserts that an exclusive or shared lock is held. */ #define __asserts_exclusive(...) \ __lock_annotate(assert_exclusive_lock(__VA_ARGS__)) #define __asserts_shared(...) \ __lock_annotate(assert_shared_lock(__VA_ARGS__)) /* Function requires that an exclusive or shared lock is or is not held. */ #define __requires_exclusive(...) \ __lock_annotate(exclusive_locks_required(__VA_ARGS__)) #define __requires_shared(...) \ __lock_annotate(shared_locks_required(__VA_ARGS__)) #define __requires_unlocked(...) \ __lock_annotate(locks_excluded(__VA_ARGS__)) /* Function should not be analyzed. */ #define __no_lock_analysis __lock_annotate(no_thread_safety_analysis) /* Guard variables and structure members by lock. */ #define __guarded_by(x) __lock_annotate(guarded_by(x)) #define __pt_guarded_by(x) __lock_annotate(pt_guarded_by(x)) #endif /* !_SYS_CDEFS_H_ */ Index: projects/clang360-import/sys =================================================================== --- projects/clang360-import/sys (revision 277895) +++ projects/clang360-import/sys (revision 277896) Property changes on: projects/clang360-import/sys ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head/sys:r277858-277895 Index: projects/clang360-import =================================================================== --- projects/clang360-import (revision 277895) +++ projects/clang360-import (revision 277896) Property changes on: projects/clang360-import ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r277861-277895