diff --git a/include/stdlib.h b/include/stdlib.h index bf1a612190ee..754e8f5f5fd4 100644 --- a/include/stdlib.h +++ b/include/stdlib.h @@ -1,368 +1,402 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)stdlib.h 8.5 (Berkeley) 5/19/95 * $FreeBSD$ */ #ifndef _STDLIB_H_ #define _STDLIB_H_ #include #include #include __NULLABILITY_PRAGMA_PUSH #if __BSD_VISIBLE #ifndef _RUNE_T_DECLARED typedef __rune_t rune_t; #define _RUNE_T_DECLARED #endif #endif #ifndef _SIZE_T_DECLARED typedef __size_t size_t; #define _SIZE_T_DECLARED #endif #ifndef __cplusplus #ifndef _WCHAR_T_DECLARED typedef ___wchar_t wchar_t; #define _WCHAR_T_DECLARED #endif #endif typedef struct { int quot; /* quotient */ int rem; /* remainder */ } div_t; typedef struct { long quot; long rem; } ldiv_t; #define EXIT_FAILURE 1 #define EXIT_SUCCESS 0 /* * I.e., INT_MAX; rand(3) returns a signed integer but must produce output in * the range [0, RAND_MAX], so half of the possible output range is unused. */ #define RAND_MAX 0x7fffffff __BEGIN_DECLS #ifdef _XLOCALE_H_ #include #endif extern int __mb_cur_max; extern int ___mb_cur_max(void); #define MB_CUR_MAX ((size_t)___mb_cur_max()) _Noreturn void abort(void); int abs(int) __pure2; int atexit(void (* _Nonnull)(void)); double atof(const char *); int atoi(const char *); long atol(const char *); void *bsearch(const void *, const void *, size_t, size_t, int (*)(const void * _Nonnull, const void *)); void *calloc(size_t, size_t) __malloc_like __result_use_check __alloc_size2(1, 2); div_t div(int, int) __pure2; _Noreturn void exit(int); void free(void *); char *getenv(const char *); long labs(long) __pure2; ldiv_t ldiv(long, long) __pure2; void *malloc(size_t) __malloc_like __result_use_check __alloc_size(1); int mblen(const char *, size_t); size_t mbstowcs(wchar_t * __restrict , const char * __restrict, size_t); int mbtowc(wchar_t * __restrict, const char * __restrict, size_t); void qsort(void *, size_t, size_t, int (* _Nonnull)(const void *, const void *)); int rand(void); void *realloc(void *, size_t) __result_use_check __alloc_size(2); void srand(unsigned); double strtod(const char * __restrict, char ** __restrict); float strtof(const char * __restrict, char ** __restrict); long strtol(const char * __restrict, char ** __restrict, int); long double strtold(const char * __restrict, char ** __restrict); unsigned long strtoul(const char * __restrict, char ** __restrict, int); int system(const char *); int wctomb(char *, wchar_t); size_t wcstombs(char * __restrict, const wchar_t * __restrict, size_t); /* * Functions added in C99 which we make conditionally available in the * BSD^C89 namespace if the compiler supports `long long'. * The #if test is more complicated than it ought to be because * __BSD_VISIBLE implies __ISO_C_VISIBLE == 1999 *even if* `long long' * is not supported in the compilation environment (which therefore means * that it can't really be ISO C99). * * (The only other extension made by C99 in thie header is _Exit().) */ #if __ISO_C_VISIBLE >= 1999 || defined(__cplusplus) #ifdef __LONG_LONG_SUPPORTED /* LONGLONG */ typedef struct { long long quot; long long rem; } lldiv_t; /* LONGLONG */ long long atoll(const char *); /* LONGLONG */ long long llabs(long long) __pure2; /* LONGLONG */ lldiv_t lldiv(long long, long long) __pure2; /* LONGLONG */ long long strtoll(const char * __restrict, char ** __restrict, int); /* LONGLONG */ unsigned long long strtoull(const char * __restrict, char ** __restrict, int); #endif /* __LONG_LONG_SUPPORTED */ _Noreturn void _Exit(int); #endif /* __ISO_C_VISIBLE >= 1999 */ /* * If we're in a mode greater than C99, expose C11 functions. */ #if __ISO_C_VISIBLE >= 2011 || __cplusplus >= 201103L void * aligned_alloc(size_t, size_t) __malloc_like __alloc_align(1) __alloc_size(2); int at_quick_exit(void (*)(void)); _Noreturn void quick_exit(int); #endif /* __ISO_C_VISIBLE >= 2011 */ /* * Extensions made by POSIX relative to C. */ #if __POSIX_VISIBLE >= 199506 || __XSI_VISIBLE char *realpath(const char * __restrict, char * __restrict); #endif #if __POSIX_VISIBLE >= 199506 int rand_r(unsigned *); /* (TSF) */ #endif #if __POSIX_VISIBLE >= 200112 int posix_memalign(void **, size_t, size_t); /* (ADV) */ int setenv(const char *, const char *, int); int unsetenv(const char *); #endif #if __POSIX_VISIBLE >= 200809 || __XSI_VISIBLE int getsubopt(char **, char *const *, char **); #ifndef _MKDTEMP_DECLARED char *mkdtemp(char *); #define _MKDTEMP_DECLARED #endif #ifndef _MKSTEMP_DECLARED int mkstemp(char *); #define _MKSTEMP_DECLARED #endif #endif /* __POSIX_VISIBLE >= 200809 || __XSI_VISIBLE */ /* * The only changes to the XSI namespace in revision 6 were the deletion * of the ttyslot() and valloc() functions, which FreeBSD never declared * in this header. For revision 7, ecvt(), fcvt(), and gcvt(), which * FreeBSD also does not have, and mktemp(), are to be deleted. */ #if __XSI_VISIBLE /* XXX XSI requires pollution from here. We'd rather not. */ long a64l(const char *); double drand48(void); /* char *ecvt(double, int, int * __restrict, int * __restrict); */ double erand48(unsigned short[3]); /* char *fcvt(double, int, int * __restrict, int * __restrict); */ /* char *gcvt(double, int, int * __restrict, int * __restrict); */ char *initstate(unsigned int, char *, size_t); long jrand48(unsigned short[3]); char *l64a(long); void lcong48(unsigned short[7]); long lrand48(void); #if !defined(_MKTEMP_DECLARED) && (__BSD_VISIBLE || __XSI_VISIBLE <= 600) char *mktemp(char *); #define _MKTEMP_DECLARED #endif long mrand48(void); long nrand48(unsigned short[3]); int putenv(char *); long random(void); unsigned short *seed48(unsigned short[3]); char *setstate(/* const */ char *); void srand48(long); void srandom(unsigned int); #endif /* __XSI_VISIBLE */ #if __XSI_VISIBLE int grantpt(int); int posix_openpt(int); char *ptsname(int); int unlockpt(int); #endif /* __XSI_VISIBLE */ #if __BSD_VISIBLE /* ptsname_r will be included in POSIX issue 8 */ int ptsname_r(int, char *, size_t); #endif #if __BSD_VISIBLE extern const char *malloc_conf; extern void (*malloc_message)(void *, const char *); /* * The alloca() function can't be implemented in C, and on some * platforms it can't be implemented at all as a callable function. * The GNU C compiler provides a built-in alloca() which we can use. * On platforms where alloca() is not in libc, programs which use it * will fail to link when compiled with non-GNU compilers. */ #if __GNUC__ >= 2 #undef alloca /* some GNU bits try to get cute and define this on their own */ #define alloca(sz) __builtin_alloca(sz) #endif void abort2(const char *, int, void **) __dead2; __uint32_t arc4random(void); void arc4random_buf(void *, size_t); __uint32_t arc4random_uniform(__uint32_t); #ifdef __BLOCKS__ int atexit_b(void (^ _Nonnull)(void)); void *bsearch_b(const void *, const void *, size_t, size_t, int (^ _Nonnull)(const void *, const void *)); #endif char *getbsize(int *, long *); /* getcap(3) functions */ char *cgetcap(char *, const char *, int); int cgetclose(void); int cgetent(char **, char **, const char *); int cgetfirst(char **, char **); int cgetmatch(const char *, const char *); int cgetnext(char **, char **); int cgetnum(char *, const char *, long *); int cgetset(const char *); int cgetstr(char *, const char *, char **); int cgetustr(char *, const char *, char **); int clearenv(void); int daemon(int, int); int daemonfd(int, int); char *devname(__dev_t, __mode_t); char *devname_r(__dev_t, __mode_t, char *, int); char *fdevname(int); char *fdevname_r(int, char *, int); int getloadavg(double [], int); const char * getprogname(void); int heapsort(void *, size_t, size_t, int (* _Nonnull)(const void *, const void *)); #ifdef __BLOCKS__ int heapsort_b(void *, size_t, size_t, int (^ _Nonnull)(const void *, const void *)); void qsort_b(void *, size_t, size_t, int (^ _Nonnull)(const void *, const void *)); #endif int l64a_r(long, char *, int); int mergesort(void *, size_t, size_t, int (*)(const void *, const void *)); #ifdef __BLOCKS__ int mergesort_b(void *, size_t, size_t, int (^)(const void *, const void *)); #endif int mkostemp(char *, int); int mkostemps(char *, int, int); int mkostempsat(int, char *, int, int); -void qsort_r(void *, size_t, size_t, void *, - int (*)(void *, const void *, const void *)); +void qsort_r(void *, size_t, size_t, + int (*)(const void *, const void *, void *), void *); int radixsort(const unsigned char **, int, const unsigned char *, unsigned); void *reallocarray(void *, size_t, size_t) __result_use_check __alloc_size2(2, 3); void *reallocf(void *, size_t) __result_use_check __alloc_size(2); int rpmatch(const char *); void setprogname(const char *); int sradixsort(const unsigned char **, int, const unsigned char *, unsigned); void srandomdev(void); long long strtonum(const char *, long long, long long, const char **); /* Deprecated interfaces, to be removed. */ __int64_t strtoq(const char *, char **, int); __uint64_t strtouq(const char *, char **, int); +/* + * In FreeBSD 14, the prototype of qsort_r() was modified to comply with + * POSIX. The standardized qsort_r()'s order of last two parameters was + * changed, and the comparator function is now taking thunk as its last + * parameter, and both are different from the ones expected by the historical + * FreeBSD qsort_r() interface. + * + * Apply a workaround where we explicitly link against the historical + * interface, qsort_r@FBSD_1.0, in case when qsort_r() is called with + * the last parameter with a function pointer that exactly matches the + * historical FreeBSD qsort_r() comparator signature, so applications + * written for the historical interface can continue to work without + * modification. + */ +#if defined(__generic) || defined(__cplusplus) +void __qsort_r_compat(void *, size_t, size_t, void *, + int (*)(void *, const void *, const void *)); +__sym_compat(qsort_r, __qsort_r_compat, FBSD_1.0); +#endif +#if defined(__generic) && !defined(__cplusplus) +#define qsort_r(base, nel, width, arg4, arg5) \ + __generic(arg5, int (*)(void *, const void *, const void *), \ + __qsort_r_compat, qsort_r)(base, nel, width, arg4, arg5) +#elif defined(__cplusplus) +__END_DECLS +extern "C++" { +static inline void qsort_r(void *base, size_t nmemb, size_t size, + void *thunk, int (*compar)(void *, const void *, const void *)) { + __qsort_r_compat(base, nmemb, size, thunk, compar); +} +} +__BEGIN_DECLS +#endif + extern char *suboptarg; /* getsubopt(3) external variable */ #endif /* __BSD_VISIBLE */ #if __EXT1_VISIBLE #ifndef _RSIZE_T_DEFINED #define _RSIZE_T_DEFINED typedef size_t rsize_t; #endif #ifndef _ERRNO_T_DEFINED #define _ERRNO_T_DEFINED typedef int errno_t; #endif /* K.3.6 */ typedef void (*constraint_handler_t)(const char * __restrict, void * __restrict, errno_t); /* K.3.6.1.1 */ constraint_handler_t set_constraint_handler_s(constraint_handler_t handler); /* K.3.6.1.2 */ _Noreturn void abort_handler_s(const char * __restrict, void * __restrict, errno_t); /* K3.6.1.3 */ void ignore_handler_s(const char * __restrict, void * __restrict, errno_t); /* K.3.6.3.2 */ errno_t qsort_s(void *, rsize_t, rsize_t, int (*)(const void *, const void *, void *), void *); #endif /* __EXT1_VISIBLE */ __END_DECLS __NULLABILITY_PRAGMA_POP #endif /* !_STDLIB_H_ */ diff --git a/lib/libc/gen/scandir-compat11.c b/lib/libc/gen/scandir-compat11.c index e6af1929a4d2..fe8d34d4b7d6 100644 --- a/lib/libc/gen/scandir-compat11.c +++ b/lib/libc/gen/scandir-compat11.c @@ -1,156 +1,156 @@ /* * Copyright (c) 1983, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)scandir.c 8.3 (Berkeley) 1/2/94 * From: FreeBSD: head/lib/libc/gen/scandir.c 317372 2017-04-24 14:56:41Z pfg */ #include __FBSDID("$FreeBSD$"); /* * Scan the directory dirname calling select to make a list of selected * directory entries then sort using qsort and compare routine dcomp. * Returns the number of entries and a pointer to a list of pointers to * struct dirent (through namelist). Returns -1 if there were any errors. */ #include "namespace.h" #define _WANT_FREEBSD11_DIRENT #include #include #include #include "un-namespace.h" #include "gen-compat.h" /* * scandir_b@FBSD_1.4 was never exported from libc.so.7 due to a * mistake, so there is no use of exporting it now with some earlier * symbol version. As result, we do not need to implement compat * function freebsd11_scandir_b(). */ #define SELECT(x) select(x) -static int freebsd11_scandir_thunk_cmp(void *thunk, const void *p1, - const void *p2); +static int freebsd11_scandir_thunk_cmp(const void *p1, const void *p2, + void *thunk); int freebsd11_scandir(const char *dirname, struct freebsd11_dirent ***namelist, int (*select)(const struct freebsd11_dirent *), int (*dcomp)(const struct freebsd11_dirent **, const struct freebsd11_dirent **)) { struct freebsd11_dirent *d, *p, **names = NULL; size_t arraysz, numitems; DIR *dirp; if ((dirp = opendir(dirname)) == NULL) return(-1); numitems = 0; arraysz = 32; /* initial estimate of the array size */ names = (struct freebsd11_dirent **)malloc( arraysz * sizeof(struct freebsd11_dirent *)); if (names == NULL) goto fail; while ((d = freebsd11_readdir(dirp)) != NULL) { if (select != NULL && !SELECT(d)) continue; /* just selected names */ /* * Make a minimum size copy of the data */ p = (struct freebsd11_dirent *)malloc(FREEBSD11_DIRSIZ(d)); if (p == NULL) goto fail; p->d_fileno = d->d_fileno; p->d_type = d->d_type; p->d_reclen = d->d_reclen; p->d_namlen = d->d_namlen; bcopy(d->d_name, p->d_name, p->d_namlen + 1); /* * Check to make sure the array has space left and * realloc the maximum size. */ if (numitems >= arraysz) { struct freebsd11_dirent **names2; names2 = reallocarray(names, arraysz, 2 * sizeof(struct freebsd11_dirent *)); if (names2 == NULL) { free(p); goto fail; } names = names2; arraysz *= 2; } names[numitems++] = p; } closedir(dirp); if (numitems && dcomp != NULL) qsort_r(names, numitems, sizeof(struct freebsd11_dirent *), - &dcomp, freebsd11_scandir_thunk_cmp); + freebsd11_scandir_thunk_cmp, &dcomp); *namelist = names; return (numitems); fail: while (numitems > 0) free(names[--numitems]); free(names); closedir(dirp); return (-1); } /* * Alphabetic order comparison routine for those who want it. * POSIX 2008 requires that alphasort() uses strcoll(). */ int freebsd11_alphasort(const struct freebsd11_dirent **d1, const struct freebsd11_dirent **d2) { return (strcoll((*d1)->d_name, (*d2)->d_name)); } static int -freebsd11_scandir_thunk_cmp(void *thunk, const void *p1, const void *p2) +freebsd11_scandir_thunk_cmp(const void *p1, const void *p2, void *thunk) { int (*dc)(const struct freebsd11_dirent **, const struct freebsd11_dirent **); dc = *(int (**)(const struct freebsd11_dirent **, const struct freebsd11_dirent **))thunk; return (dc((const struct freebsd11_dirent **)p1, (const struct freebsd11_dirent **)p2)); } __sym_compat(alphasort, freebsd11_alphasort, FBSD_1.0); __sym_compat(scandir, freebsd11_scandir, FBSD_1.0); diff --git a/lib/libc/gen/scandir.c b/lib/libc/gen/scandir.c index 496b1ddc29db..4f40678513dd 100644 --- a/lib/libc/gen/scandir.c +++ b/lib/libc/gen/scandir.c @@ -1,209 +1,209 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1983, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __SCCSID("@(#)scandir.c 8.3 (Berkeley) 1/2/94"); __FBSDID("$FreeBSD$"); /* * Scan the directory dirname calling select to make a list of selected * directory entries then sort using qsort and compare routine dcomp. * Returns the number of entries and a pointer to a list of pointers to * struct dirent (through namelist). Returns -1 if there were any errors. */ #include "namespace.h" #include #include #include #include #include #include "un-namespace.h" #ifdef I_AM_SCANDIR_B #include "block_abi.h" #define SELECT(x) CALL_BLOCK(select, x) #ifndef __BLOCKS__ void qsort_b(void *, size_t, size_t, void *); #endif #else #define SELECT(x) select(x) #endif #ifdef I_AM_SCANDIR_B typedef DECLARE_BLOCK(int, select_block, const struct dirent *); typedef DECLARE_BLOCK(int, dcomp_block, const struct dirent **, const struct dirent **); #else -static int scandir_thunk_cmp(void *thunk, const void *p1, const void *p2); +static int scandir_thunk_cmp(const void *p1, const void *p2, void *thunk); #endif static int #ifdef I_AM_SCANDIR_B scandir_b_dirp(DIR *dirp, struct dirent ***namelist, select_block select, dcomp_block dcomp) #else scandir_dirp(DIR *dirp, struct dirent ***namelist, int (*select)(const struct dirent *), int (*dcomp)(const struct dirent **, const struct dirent **)) #endif { struct dirent *d, *p, **names = NULL; size_t arraysz, numitems; numitems = 0; arraysz = 32; /* initial estimate of the array size */ names = (struct dirent **)malloc(arraysz * sizeof(struct dirent *)); if (names == NULL) goto fail; while ((d = readdir(dirp)) != NULL) { if (select != NULL && !SELECT(d)) continue; /* just selected names */ /* * Make a minimum size copy of the data */ p = (struct dirent *)malloc(_GENERIC_DIRSIZ(d)); if (p == NULL) goto fail; p->d_fileno = d->d_fileno; p->d_type = d->d_type; p->d_reclen = d->d_reclen; p->d_namlen = d->d_namlen; bcopy(d->d_name, p->d_name, p->d_namlen + 1); /* * Check to make sure the array has space left and * realloc the maximum size. */ if (numitems >= arraysz) { struct dirent **names2; names2 = reallocarray(names, arraysz, 2 * sizeof(struct dirent *)); if (names2 == NULL) { free(p); goto fail; } names = names2; arraysz *= 2; } names[numitems++] = p; } closedir(dirp); if (numitems && dcomp != NULL) #ifdef I_AM_SCANDIR_B qsort_b(names, numitems, sizeof(struct dirent *), (void*)dcomp); #else qsort_r(names, numitems, sizeof(struct dirent *), - &dcomp, scandir_thunk_cmp); + scandir_thunk_cmp, &dcomp); #endif *namelist = names; return (numitems); fail: while (numitems > 0) free(names[--numitems]); free(names); closedir(dirp); return (-1); } int #ifdef I_AM_SCANDIR_B scandir_b(const char *dirname, struct dirent ***namelist, select_block select, dcomp_block dcomp) #else scandir(const char *dirname, struct dirent ***namelist, int (*select)(const struct dirent *), int (*dcomp)(const struct dirent **, const struct dirent **)) #endif { DIR *dirp; dirp = opendir(dirname); if (dirp == NULL) return (-1); return ( #ifdef I_AM_SCANDIR_B scandir_b_dirp #else scandir_dirp #endif (dirp, namelist, select, dcomp)); } #ifndef I_AM_SCANDIR_B int scandirat(int dirfd, const char *dirname, struct dirent ***namelist, int (*select)(const struct dirent *), int (*dcomp)(const struct dirent **, const struct dirent **)) { DIR *dirp; int fd; fd = _openat(dirfd, dirname, O_RDONLY | O_DIRECTORY | O_CLOEXEC); if (fd == -1) return (-1); dirp = fdopendir(fd); if (dirp == NULL) { _close(fd); return (-1); } return (scandir_dirp(dirp, namelist, select, dcomp)); } /* * Alphabetic order comparison routine for those who want it. * POSIX 2008 requires that alphasort() uses strcoll(). */ int alphasort(const struct dirent **d1, const struct dirent **d2) { return (strcoll((*d1)->d_name, (*d2)->d_name)); } int versionsort(const struct dirent **d1, const struct dirent **d2) { return (strverscmp((*d1)->d_name, (*d2)->d_name)); } static int -scandir_thunk_cmp(void *thunk, const void *p1, const void *p2) +scandir_thunk_cmp(const void *p1, const void *p2, void *thunk) { int (*dc)(const struct dirent **, const struct dirent **); dc = *(int (**)(const struct dirent **, const struct dirent **))thunk; return (dc((const struct dirent **)p1, (const struct dirent **)p2)); } #endif diff --git a/lib/libc/stdlib/Makefile.inc b/lib/libc/stdlib/Makefile.inc index a658fd78e862..8ace2c051b82 100644 --- a/lib/libc/stdlib/Makefile.inc +++ b/lib/libc/stdlib/Makefile.inc @@ -1,69 +1,69 @@ # from @(#)Makefile.inc 8.3 (Berkeley) 2/4/95 # $FreeBSD$ # machine-independent stdlib sources .PATH: ${LIBC_SRCTOP}/${LIBC_ARCH}/stdlib ${LIBC_SRCTOP}/stdlib MISRCS+=C99_Exit.c a64l.c abort.c abs.c atexit.c atof.c atoi.c atol.c atoll.c \ bsearch.c \ cxa_thread_atexit.c cxa_thread_atexit_impl.c \ div.c exit.c getenv.c getopt.c getopt_long.c \ getsubopt.c hcreate.c hcreate_r.c hdestroy_r.c heapsort.c heapsort_b.c \ hsearch_r.c imaxabs.c imaxdiv.c \ insque.c l64a.c labs.c ldiv.c llabs.c lldiv.c lsearch.c \ - merge.c mergesort_b.c ptsname.c qsort.c qsort_r.c qsort_s.c \ - quick_exit.c radixsort.c rand.c \ + merge.c mergesort_b.c ptsname.c qsort.c qsort_r.c qsort_r_compat.c \ + qsort_s.c quick_exit.c radixsort.c rand.c \ random.c reallocarray.c reallocf.c realpath.c remque.c \ set_constraint_handler_s.c strfmon.c strtoimax.c \ strtol.c strtold.c strtoll.c strtoq.c strtoul.c strtonum.c strtoull.c \ strtoumax.c strtouq.c system.c tdelete.c tfind.c tsearch.c twalk.c CFLAGS.qsort.c+= -Wsign-compare # Work around an issue on case-insensitive file systems. # libc has both _Exit.c and _exit.s and they both yield # _exit.o (case insensitively speaking). CLEANFILES+=C99_Exit.c C99_Exit.c: ${LIBC_SRCTOP}/stdlib/_Exit.c .NOMETA ln -sf ${.ALLSRC} ${.TARGET} SYM_MAPS+= ${LIBC_SRCTOP}/stdlib/Symbol.map # machine-dependent stdlib sources .sinclude "${LIBC_SRCTOP}/${LIBC_ARCH}/stdlib/Makefile.inc" MAN+= a64l.3 abort.3 abs.3 alloca.3 atexit.3 atof.3 \ atoi.3 atol.3 at_quick_exit.3 bsearch.3 \ div.3 exit.3 getenv.3 getopt.3 getopt_long.3 getsubopt.3 \ hcreate.3 imaxabs.3 imaxdiv.3 insque.3 labs.3 ldiv.3 llabs.3 lldiv.3 \ lsearch.3 memory.3 ptsname.3 qsort.3 \ quick_exit.3 \ radixsort.3 rand.3 random.3 reallocarray.3 reallocf.3 realpath.3 \ set_constraint_handler_s.3 \ strfmon.3 strtod.3 strtol.3 strtonum.3 strtoul.3 system.3 \ tsearch.3 MLINKS+=a64l.3 l64a.3 a64l.3 l64a_r.3 MLINKS+=atol.3 atoll.3 MLINKS+=exit.3 _Exit.3 MLINKS+=getenv.3 clearenv.3 getenv.3 putenv.3 getenv.3 setenv.3 \ getenv.3 unsetenv.3 MLINKS+=getopt_long.3 getopt_long_only.3 MLINKS+=hcreate.3 hdestroy.3 hcreate.3 hsearch.3 MLINKS+=hcreate.3 hcreate_r.3 hcreate.3 hdestroy_r.3 hcreate.3 hsearch_r.3 MLINKS+=insque.3 remque.3 MLINKS+=lsearch.3 lfind.3 MLINKS+=ptsname.3 grantpt.3 ptsname.3 ptsname_r.3 ptsname.3 unlockpt.3 MLINKS+=qsort.3 heapsort.3 qsort.3 mergesort.3 qsort.3 qsort_r.3 \ qsort.3 qsort_s.3 MLINKS+=rand.3 rand_r.3 rand.3 srand.3 MLINKS+=random.3 initstate.3 random.3 setstate.3 random.3 srandom.3 \ random.3 srandomdev.3 MLINKS+=radixsort.3 sradixsort.3 MLINKS+=set_constraint_handler_s.3 abort_handler_s.3 MLINKS+=set_constraint_handler_s.3 ignore_handler_s.3 MLINKS+=strfmon.3 strfmon_l.3 MLINKS+=strtod.3 strtof.3 strtod.3 strtold.3 MLINKS+=strtol.3 strtoll.3 strtol.3 strtoq.3 strtol.3 strtoimax.3 MLINKS+=strtoul.3 strtoull.3 strtoul.3 strtouq.3 strtoul.3 strtoumax.3 MLINKS+=tsearch.3 tdelete.3 tsearch.3 tfind.3 tsearch.3 twalk.3 diff --git a/lib/libc/stdlib/Symbol.map b/lib/libc/stdlib/Symbol.map index 6524c6097b96..7e0b141a21b6 100644 --- a/lib/libc/stdlib/Symbol.map +++ b/lib/libc/stdlib/Symbol.map @@ -1,141 +1,141 @@ /* * $FreeBSD$ */ FBSD_1.0 { _Exit; a64l; abort; abs; atexit; __cxa_atexit; __cxa_finalize; atof; atoi; atol; atoll; bsearch; div; __isthreaded; exit; getenv; opterr; optind; optopt; optreset; optarg; getopt; getopt_long; getopt_long_only; suboptarg; getsubopt; grantpt; ptsname; unlockpt; hcreate; hdestroy; hsearch; heapsort; imaxabs; imaxdiv; insque; l64a; l64a_r; labs; ldiv; llabs; lldiv; lsearch; lfind; mergesort; putenv; - qsort_r; qsort; radixsort; sradixsort; rand_r; srandom; srandomdev; initstate; setstate; random; reallocf; realpath; remque; setenv; unsetenv; strfmon; strtoimax; strtol; strtoll; strtonum; strtoq; strtoul; strtoull; strtoumax; strtouq; system; tdelete; tfind; tsearch; twalk; }; FBSD_1.3 { at_quick_exit; atof_l; atoi_l; atol_l; atoll_l; quick_exit; strtod_l; strtof_l; strtoimax_l; strtol_l; strtold_l; strtoll_l; strtoq_l; strtoul_l; strtoull_l; strtoumax_l; strtouq_l; }; FBSD_1.4 { atexit_b; bsearch_b; heapsort_b; mergesort_b; qsort_b; hcreate_r; hdestroy_r; hsearch_r; reallocarray; }; FBSD_1.5 { __cxa_thread_atexit; __cxa_thread_atexit_impl; abort_handler_s; ignore_handler_s; set_constraint_handler_s; }; FBSD_1.6 { ptsname_r; qsort_s; rand; srand; }; FBSD_1.7 { clearenv; + qsort_r; }; FBSDprivate_1.0 { __system; _system; __libc_system; __cxa_thread_call_dtors; __libc_atexit; }; diff --git a/lib/libc/stdlib/qsort.3 b/lib/libc/stdlib/qsort.3 index 606185f9baee..6449849fc490 100644 --- a/lib/libc/stdlib/qsort.3 +++ b/lib/libc/stdlib/qsort.3 @@ -1,438 +1,444 @@ .\" Copyright (c) 1990, 1991, 1993 .\" The Regents of the University of California. All rights reserved. .\" .\" This code is derived from software contributed to Berkeley by .\" the American National Standards Committee X3, on Information .\" Processing Systems. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" 3. Neither the name of the University nor the names of its contributors .\" may be used to endorse or promote products derived from this software .\" without specific prior written permission. .\" .\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND .\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE .\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL .\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS .\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) .\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT .\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .\" @(#)qsort.3 8.1 (Berkeley) 6/4/93 .\" $FreeBSD$ .\" -.Dd January 20, 2020 +.Dd September 30, 2022 .Dt QSORT 3 .Os .Sh NAME .Nm qsort , .Nm qsort_b , .Nm qsort_r , .Nm heapsort , .Nm heapsort_b , .Nm mergesort , .Nm mergesort_b .Nd sort functions .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In stdlib.h .Ft void .Fo qsort .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Ft void .Fo qsort_b .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Ft void .Fo qsort_r .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" +.Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *, void *\*[rp]" .Fa "void *thunk" -.Fa "int \*[lp]*compar\*[rp]\*[lp]void *, const void *, const void *\*[rp]" .Fc .Ft int .Fo heapsort .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Ft int .Fo heapsort_b .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Ft int .Fo mergesort .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Ft int .Fo mergesort_b .Fa "void *base" .Fa "size_t nmemb" .Fa "size_t size" .Fa "int \*[lp]^compar\*[rp]\*[lp]const void *, const void *\*[rp]" .Fc .Fd #define __STDC_WANT_LIB_EXT1__ 1 .Ft errno_t .Fo qsort_s .Fa "void *base" .Fa "rsize_t nmemb" .Fa "rsize_t size" .Fa "int \*[lp]*compar\*[rp]\*[lp]const void *, const void *, void *\*[rp]" .Fa "void *thunk" .Fc .Sh DESCRIPTION The .Fn qsort function is a modified partition-exchange sort, or quicksort. The .Fn heapsort function is a modified selection sort. The .Fn mergesort function is a modified merge sort with exponential search intended for sorting data with pre-existing order. .Pp The .Fn qsort and .Fn heapsort functions sort an array of .Fa nmemb objects, the initial member of which is pointed to by .Fa base . The size of each object is specified by .Fa size . The .Fn mergesort function behaves similarly, but .Em requires that .Fa size be greater than .Dq "sizeof(void *) / 2" . .Pp The contents of the array .Fa base are sorted in ascending order according to a comparison function pointed to by .Fa compar , which requires two arguments pointing to the objects being compared. .Pp The comparison function must return an integer less than, equal to, or greater than zero if the first argument is considered to be respectively less than, equal to, or greater than the second. .Pp The .Fn qsort_r function behaves identically to .Fn qsort , except that it takes an additional argument, .Fa thunk , -which is passed unchanged as the first argument to function pointed to +which is passed unchanged as the last argument to function pointed to .Fa compar . This allows the comparison function to access additional data without using global variables, and thus .Fn qsort_r is suitable for use in functions which must be reentrant. The .Fn qsort_b function behaves identically to .Fn qsort , except that it takes a block, rather than a function pointer. .Pp The algorithms implemented by .Fn qsort , .Fn qsort_r , and .Fn heapsort are .Em not stable, that is, if two members compare as equal, their order in the sorted array is undefined. The .Fn heapsort_b function behaves identically to .Fn heapsort , except that it takes a block, rather than a function pointer. The .Fn mergesort algorithm is stable. The .Fn mergesort_b function behaves identically to .Fn mergesort , except that it takes a block, rather than a function pointer. .Pp The .Fn qsort and .Fn qsort_r functions are an implementation of C.A.R. Hoare's .Dq quicksort algorithm, a variant of partition-exchange sorting; in particular, see .An D.E. Knuth Ns 's .%T "Algorithm Q" . .Sy Quicksort takes O N lg N average time. This implementation uses median selection to avoid its O N**2 worst-case behavior. .Pp The .Fn heapsort function is an implementation of .An "J.W.J. William" Ns 's .Dq heapsort algorithm, a variant of selection sorting; in particular, see .An "D.E. Knuth" Ns 's .%T "Algorithm H" . .Sy Heapsort takes O N lg N worst-case time. Its .Em only advantage over .Fn qsort is that it uses almost no additional memory; while .Fn qsort does not allocate memory, it is implemented using recursion. .Pp The function .Fn mergesort requires additional memory of size .Fa nmemb * .Fa size bytes; it should be used only when space is not at a premium. The .Fn mergesort function is optimized for data with pre-existing order; its worst case time is O N lg N; its best case is O N. .Pp Normally, .Fn qsort is faster than .Fn mergesort is faster than .Fn heapsort . Memory availability and pre-existing order in the data can make this untrue. .Pp The .Fn qsort_s function behaves the same as .Fn qsort_r , except that: .Bl -dash .It The order of arguments is different .It The order of arguments to .Fa compar is different .It if .Fa nmemb or .Fa size are greater than .Dv RSIZE_MAX , or .Fa nmemb is not zero and .Fa compar is NULL, then the runtime-constraint handler is called, and .Fn qsort_s returns an error. Note that the handler is called before .Fn qsort_s returns the error, and the handler function might not return. .El .Sh RETURN VALUES The .Fn qsort and .Fn qsort_r functions return no value. The .Fn qsort_s function returns zero on success, non-zero on error. .Pp .Rv -std heapsort mergesort .Sh EXAMPLES A sample program that sorts an array of .Vt int values in place using .Fn qsort , and then prints the sorted array to standard output is: .Bd -literal #include #include /* * Custom comparison function that compares 'int' values through pointers * passed by qsort(3). */ static int int_compare(const void *p1, const void *p2) { int left = *(const int *)p1; int right = *(const int *)p2; return ((left > right) - (left < right)); } /* * Sort an array of 'int' values and print it to standard output. */ int main(void) { int int_array[] = { 4, 5, 9, 3, 0, 1, 7, 2, 8, 6 }; size_t array_size = sizeof(int_array) / sizeof(int_array[0]); size_t k; qsort(&int_array, array_size, sizeof(int_array[0]), int_compare); for (k = 0; k < array_size; k++) printf(" %d", int_array[k]); puts(""); return (EXIT_SUCCESS); } .Ed .Sh COMPATIBILITY The order of arguments for the comparison function used with .Fn qsort_r is different from the one used by .Fn qsort_s , and the GNU libc implementation of .Fn qsort_r . When porting software written for GNU libc, it is usually possible to replace .Fn qsort_r with .Fn qsort_s to work around this problem. .Pp .Fn qsort_s is part of the .Em optional Annex K portion of .St -isoC-2011 and may not be portable to other standards-conforming platforms. .Pp Previous versions of .Fn qsort did not permit the comparison routine itself to call .Fn qsort 3 . This is no longer true. .Sh ERRORS The .Fn heapsort and .Fn mergesort functions succeed unless: .Bl -tag -width Er .It Bq Er EINVAL The .Fa size argument is zero, or, the .Fa size argument to .Fn mergesort is less than .Dq "sizeof(void *) / 2" . .It Bq Er ENOMEM The .Fn heapsort or .Fn mergesort functions were unable to allocate memory. .El .Sh SEE ALSO .Xr sort 1 , .Xr radixsort 3 .Rs .%A Hoare, C.A.R. .%D 1962 .%T "Quicksort" .%J "The Computer Journal" .%V 5:1 .%P pp. 10-15 .Re .Rs .%A Williams, J.W.J .%D 1964 .%T "Heapsort" .%J "Communications of the ACM" .%V 7:1 .%P pp. 347-348 .Re .Rs .%A Knuth, D.E. .%D 1968 .%B "The Art of Computer Programming" .%V Vol. 3 .%T "Sorting and Searching" .%P pp. 114-123, 145-149 .Re .Rs .%A McIlroy, P.M. .%T "Optimistic Sorting and Information Theoretic Complexity" .%J "Fourth Annual ACM-SIAM Symposium on Discrete Algorithms" .%V January 1992 .Re .Rs .%A Bentley, J.L. .%A McIlroy, M.D. .%T "Engineering a Sort Function" .%J "Software--Practice and Experience" .%V Vol. 23(11) .%P pp. 1249-1265 .%D November\ 1993 .Re .Sh STANDARDS The .Fn qsort function conforms to .St -isoC . .Fn qsort_s conforms to .St -isoC-2011 K.3.6.3.2. .Sh HISTORY The variants of these functions that take blocks as arguments first appeared in Mac OS X. This implementation was created by David Chisnall. +.Pp +In +.Fx 14.0 , +the prototype of +.Fn qsort_r +was updated to match POSIX. diff --git a/lib/libc/stdlib/qsort.c b/lib/libc/stdlib/qsort.c index 0b99c04507d3..11a3c5508143 100644 --- a/lib/libc/stdlib/qsort.c +++ b/lib/libc/stdlib/qsort.c @@ -1,250 +1,262 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined(LIBC_SCCS) && !defined(lint) static char sccsid[] = "@(#)qsort.c 8.1 (Berkeley) 6/4/93"; #endif /* LIBC_SCCS and not lint */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include "libc_private.h" #if defined(I_AM_QSORT_R) +typedef int cmp_t(const void *, const void *, void *); +#elif defined(I_AM_QSORT_R_COMPAT) typedef int cmp_t(void *, const void *, const void *); #elif defined(I_AM_QSORT_S) typedef int cmp_t(const void *, const void *, void *); #else typedef int cmp_t(const void *, const void *); #endif static inline char *med3(char *, char *, char *, cmp_t *, void *); #define MIN(a, b) ((a) < (b) ? a : b) /* * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". */ static inline void swapfunc(char *a, char *b, size_t es) { char t; do { t = *a; *a++ = *b; *b++ = t; } while (--es > 0); } #define vecswap(a, b, n) \ if ((n) > 0) swapfunc(a, b, n) #if defined(I_AM_QSORT_R) +#define CMP(t, x, y) (cmp((x), (y), (t))) +#elif defined(I_AM_QSORT_R_COMPAT) #define CMP(t, x, y) (cmp((t), (x), (y))) #elif defined(I_AM_QSORT_S) #define CMP(t, x, y) (cmp((x), (y), (t))) #else #define CMP(t, x, y) (cmp((x), (y))) #endif static inline char * med3(char *a, char *b, char *c, cmp_t *cmp, void *thunk -#if !defined(I_AM_QSORT_R) && !defined(I_AM_QSORT_S) +#if !defined(I_AM_QSORT_R) && !defined(I_AM_QSORT_R_COMPAT) && !defined(I_AM_QSORT_S) __unused #endif ) { return CMP(thunk, a, b) < 0 ? (CMP(thunk, b, c) < 0 ? b : (CMP(thunk, a, c) < 0 ? c : a )) :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c )); } /* * The actual qsort() implementation is static to avoid preemptible calls when * recursing. Also give them different names for improved debugging. */ #if defined(I_AM_QSORT_R) #define local_qsort local_qsort_r +#elif defined(I_AM_QSORT_R_COMPAT) +#define local_qsort local_qsort_r_compat #elif defined(I_AM_QSORT_S) #define local_qsort local_qsort_s #endif static void local_qsort(void *a, size_t n, size_t es, cmp_t *cmp, void *thunk) { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; size_t d1, d2; int cmp_result; int swap_cnt; if (__predict_false(n == 0)) return; loop: swap_cnt = 0; if (n < 7) { for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es) for (pl = pm; pl > (char *)a && CMP(thunk, pl - es, pl) > 0; pl -= es) swapfunc(pl, pl - es, es); return; } pm = (char *)a + (n / 2) * es; if (n > 7) { pl = a; pn = (char *)a + (n - 1) * es; if (n > 40) { size_t d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp, thunk); pm = med3(pm - d, pm, pm + d, cmp, thunk); pn = med3(pn - 2 * d, pn - d, pn, cmp, thunk); } pm = med3(pl, pm, pn, cmp, thunk); } swapfunc(a, pm, es); pa = pb = (char *)a + es; pc = pd = (char *)a + (n - 1) * es; for (;;) { while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) { if (cmp_result == 0) { swap_cnt = 1; swapfunc(pa, pb, es); pa += es; } pb += es; } while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) { if (cmp_result == 0) { swap_cnt = 1; swapfunc(pc, pd, es); pd -= es; } pc -= es; } if (pb > pc) break; swapfunc(pb, pc, es); swap_cnt = 1; pb += es; pc -= es; } if (swap_cnt == 0) { /* Switch to insertion sort */ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es) for (pl = pm; pl > (char *)a && CMP(thunk, pl - es, pl) > 0; pl -= es) swapfunc(pl, pl - es, es); return; } pn = (char *)a + n * es; d1 = MIN(pa - (char *)a, pb - pa); vecswap(a, pb - d1, d1); /* * Cast es to preserve signedness of right-hand side of MIN() * expression, to avoid sign ambiguity in the implied comparison. es * is safely within [0, SSIZE_MAX]. */ d1 = MIN(pd - pc, pn - pd - (ssize_t)es); vecswap(pb, pn - d1, d1); d1 = pb - pa; d2 = pd - pc; if (d1 <= d2) { /* Recurse on left partition, then iterate on right partition */ if (d1 > es) { local_qsort(a, d1 / es, es, cmp, thunk); } if (d2 > es) { /* Iterate rather than recurse to save stack space */ /* qsort(pn - d2, d2 / es, es, cmp); */ a = pn - d2; n = d2 / es; goto loop; } } else { /* Recurse on right partition, then iterate on left partition */ if (d2 > es) { local_qsort(pn - d2, d2 / es, es, cmp, thunk); } if (d1 > es) { /* Iterate rather than recurse to save stack space */ /* qsort(a, d1 / es, es, cmp); */ n = d1 / es; goto loop; } } } #if defined(I_AM_QSORT_R) void -qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp) +(qsort_r)(void *a, size_t n, size_t es, cmp_t *cmp, void *thunk) { local_qsort_r(a, n, es, cmp, thunk); } +#elif defined(I_AM_QSORT_R_COMPAT) +void +__qsort_r_compat(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp) +{ + local_qsort_r_compat(a, n, es, cmp, thunk); +} #elif defined(I_AM_QSORT_S) errno_t qsort_s(void *a, rsize_t n, rsize_t es, cmp_t *cmp, void *thunk) { if (n > RSIZE_MAX) { __throw_constraint_handler_s("qsort_s : n > RSIZE_MAX", EINVAL); return (EINVAL); } else if (es > RSIZE_MAX) { __throw_constraint_handler_s("qsort_s : es > RSIZE_MAX", EINVAL); return (EINVAL); } else if (n != 0) { if (a == NULL) { __throw_constraint_handler_s("qsort_s : a == NULL", EINVAL); return (EINVAL); } else if (cmp == NULL) { __throw_constraint_handler_s("qsort_s : cmp == NULL", EINVAL); return (EINVAL); } } local_qsort_s(a, n, es, cmp, thunk); return (0); } #else void qsort(void *a, size_t n, size_t es, cmp_t *cmp) { local_qsort(a, n, es, cmp, NULL); } #endif diff --git a/lib/libc/stdlib/qsort_r.c b/lib/libc/stdlib/qsort_r.c index f489d31c2335..d86873604f59 100644 --- a/lib/libc/stdlib/qsort_r.c +++ b/lib/libc/stdlib/qsort_r.c @@ -1,19 +1,8 @@ /* * This file is in the public domain. Originally written by Garrett * A. Wollman. * * $FreeBSD$ */ -#include "block_abi.h" #define I_AM_QSORT_R #include "qsort.c" - -typedef DECLARE_BLOCK(int, qsort_block, const void *, const void *); - -void -qsort_b(void *base, size_t nel, size_t width, qsort_block compar) -{ - qsort_r(base, nel, width, compar, - (int (*)(void *, const void *, const void *)) - GET_BLOCK_FUNCTION(compar)); -} diff --git a/lib/libc/stdlib/qsort_r.c b/lib/libc/stdlib/qsort_r_compat.c similarity index 75% copy from lib/libc/stdlib/qsort_r.c copy to lib/libc/stdlib/qsort_r_compat.c index f489d31c2335..239a5f307ceb 100644 --- a/lib/libc/stdlib/qsort_r.c +++ b/lib/libc/stdlib/qsort_r_compat.c @@ -1,19 +1,21 @@ /* * This file is in the public domain. Originally written by Garrett * A. Wollman. * * $FreeBSD$ */ #include "block_abi.h" -#define I_AM_QSORT_R +#define I_AM_QSORT_R_COMPAT #include "qsort.c" typedef DECLARE_BLOCK(int, qsort_block, const void *, const void *); void qsort_b(void *base, size_t nel, size_t width, qsort_block compar) { - qsort_r(base, nel, width, compar, + __qsort_r_compat(base, nel, width, compar, (int (*)(void *, const void *, const void *)) GET_BLOCK_FUNCTION(compar)); } + +__sym_compat(qsort_r, __qsort_r_compat, FBSD_1.0); diff --git a/lib/libc/tests/stdlib/Makefile b/lib/libc/tests/stdlib/Makefile index 47841a92ba32..df0ecc66b067 100644 --- a/lib/libc/tests/stdlib/Makefile +++ b/lib/libc/tests/stdlib/Makefile @@ -1,80 +1,81 @@ # $FreeBSD$ .include ATF_TESTS_C+= clearenv_test ATF_TESTS_C+= dynthr_test ATF_TESTS_C+= heapsort_test ATF_TESTS_C+= mergesort_test ATF_TESTS_C+= qsort_test ATF_TESTS_C+= qsort_b_test +ATF_TESTS_C+= qsort_r_compat_test ATF_TESTS_C+= qsort_r_test ATF_TESTS_C+= qsort_s_test ATF_TESTS_C+= set_constraint_handler_s_test ATF_TESTS_C+= strfmon_test ATF_TESTS_C+= tsearch_test ATF_TESTS_CXX+= cxa_thread_atexit_test ATF_TESTS_CXX+= cxa_thread_atexit_nothr_test # All architectures on FreeBSD have fenv.h CFLAGS+= -D__HAVE_FENV # Define __HAVE_LONG_DOUBLE for architectures whose long double has greater # precision than their double. .if ${MACHINE_CPUARCH} == "aarch64" || \ ${MACHINE_CPUARCH} == "amd64" || \ ${MACHINE_CPUARCH} == "i386" || \ ${MACHINE_CPUARCH} == "riscv" CFLAGS+= -D__HAVE_LONG_DOUBLE .endif # TODO: t_getenv_thread, t_mi_vector_hash, t_strtoi NETBSD_ATF_TESTS_C+= abs_test NETBSD_ATF_TESTS_C+= atoi_test NETBSD_ATF_TESTS_C+= div_test NETBSD_ATF_TESTS_C+= getenv_test NETBSD_ATF_TESTS_C+= exit_test NETBSD_ATF_TESTS_C+= hsearch_test NETBSD_ATF_TESTS_C+= posix_memalign_test NETBSD_ATF_TESTS_C+= random_test NETBSD_ATF_TESTS_C+= strtod_test NETBSD_ATF_TESTS_C+= strtol_test NETBSD_ATF_TESTS_C+= system_test # TODO: need to come up with a correct explanation of what the patch pho does # with h_atexit #ATF_TESTS_SH= atexit_test NETBSD_ATF_TESTS_SH= getopt_test .include "../Makefile.netbsd-tests" BINDIR= ${TESTSDIR} # TODO: see comment above #PROGS+= h_atexit PROGS+= h_getopt h_getopt_long CFLAGS+= -I${.CURDIR} CXXSTD.cxa_thread_atexit_test= c++11 CXXSTD.cxa_thread_atexit_nothr_test= c++11 LIBADD.cxa_thread_atexit_test+= pthread # Tests that requires Blocks feature .for t in qsort_b_test CFLAGS.${t}.c+= -fblocks LIBADD.${t}+= BlocksRuntime .endfor .for t in h_getopt h_getopt_long CFLAGS.$t+= -I${LIBNETBSD_SRCDIR} -I${SRCTOP}/contrib/netbsd-tests LDFLAGS.$t+= -L${LIBNETBSD_OBJDIR} LIBADD.${t}+= netbsd util .endfor LIBADD.strtod_test+= m SUBDIR+= dynthr_mod .include diff --git a/lib/libc/tests/stdlib/qsort_r_test.c b/lib/libc/tests/stdlib/qsort_r_compat_test.c similarity index 93% copy from lib/libc/tests/stdlib/qsort_r_test.c copy to lib/libc/tests/stdlib/qsort_r_compat_test.c index c27e6d92d587..84fd1b116b82 100644 --- a/lib/libc/tests/stdlib/qsort_r_test.c +++ b/lib/libc/tests/stdlib/qsort_r_compat_test.c @@ -1,92 +1,92 @@ /*- * Copyright (C) 2020 Edward Tomasz Napierala * Copyright (C) 2004 Maxim Sobolev * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* - * Test for qsort_r(3) routine. + * Test for historical qsort_r(3) routine. */ #include __FBSDID("$FreeBSD$"); #include #include #include "test-sort.h" #define THUNK 42 static int sorthelp_r(void *thunk, const void *a, const void *b) { const int *oa, *ob; ATF_REQUIRE_EQ(*(int *)thunk, THUNK); oa = a; ob = b; /* Don't use "return *oa - *ob" since it's easy to cause overflow! */ if (*oa > *ob) return (1); if (*oa < *ob) return (-1); return (0); } -ATF_TC_WITHOUT_HEAD(qsort_r_test); -ATF_TC_BODY(qsort_r_test, tc) +ATF_TC_WITHOUT_HEAD(qsort_r_compat_test); +ATF_TC_BODY(qsort_r_compat_test, tc) { int testvector[IVEC_LEN]; int sresvector[IVEC_LEN]; int i, j; int thunk = THUNK; for (j = 2; j < IVEC_LEN; j++) { /* Populate test vectors */ for (i = 0; i < j; i++) testvector[i] = sresvector[i] = initvector[i]; /* Sort using qsort_r(3) */ qsort_r(testvector, j, sizeof(testvector[0]), &thunk, sorthelp_r); /* Sort using reference slow sorting routine */ ssort(sresvector, j); /* Compare results */ for (i = 0; i < j; i++) ATF_CHECK_MSG(testvector[i] == sresvector[i], "item at index %d didn't match: %d != %d", i, testvector[i], sresvector[i]); } } ATF_TP_ADD_TCS(tp) { - ATF_TP_ADD_TC(tp, qsort_r_test); + ATF_TP_ADD_TC(tp, qsort_r_compat_test); return (atf_no_error()); } diff --git a/lib/libc/tests/stdlib/qsort_r_test.c b/lib/libc/tests/stdlib/qsort_r_test.c index c27e6d92d587..c55563eaea8a 100644 --- a/lib/libc/tests/stdlib/qsort_r_test.c +++ b/lib/libc/tests/stdlib/qsort_r_test.c @@ -1,92 +1,92 @@ /*- * Copyright (C) 2020 Edward Tomasz Napierala * Copyright (C) 2004 Maxim Sobolev * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Test for qsort_r(3) routine. */ #include __FBSDID("$FreeBSD$"); #include #include #include "test-sort.h" #define THUNK 42 static int -sorthelp_r(void *thunk, const void *a, const void *b) +sorthelp_r(const void *a, const void *b, void *thunk) { const int *oa, *ob; ATF_REQUIRE_EQ(*(int *)thunk, THUNK); oa = a; ob = b; /* Don't use "return *oa - *ob" since it's easy to cause overflow! */ if (*oa > *ob) return (1); if (*oa < *ob) return (-1); return (0); } ATF_TC_WITHOUT_HEAD(qsort_r_test); ATF_TC_BODY(qsort_r_test, tc) { int testvector[IVEC_LEN]; int sresvector[IVEC_LEN]; int i, j; int thunk = THUNK; for (j = 2; j < IVEC_LEN; j++) { /* Populate test vectors */ for (i = 0; i < j; i++) testvector[i] = sresvector[i] = initvector[i]; /* Sort using qsort_r(3) */ - qsort_r(testvector, j, sizeof(testvector[0]), &thunk, - sorthelp_r); + qsort_r(testvector, j, sizeof(testvector[0]), sorthelp_r, + &thunk); /* Sort using reference slow sorting routine */ ssort(sresvector, j); /* Compare results */ for (i = 0; i < j; i++) ATF_CHECK_MSG(testvector[i] == sresvector[i], "item at index %d didn't match: %d != %d", i, testvector[i], sresvector[i]); } } ATF_TP_ADD_TCS(tp) { ATF_TP_ADD_TC(tp, qsort_r_test); return (atf_no_error()); } diff --git a/lib/libproc/proc_sym.c b/lib/libproc/proc_sym.c index 27b0fc15a2ab..fdcf133d8457 100644 --- a/lib/libproc/proc_sym.c +++ b/lib/libproc/proc_sym.c @@ -1,717 +1,717 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2016-2017 Mark Johnston * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2008 John Birrell (jb@freebsd.org) * All rights reserved. * * Portions of this software were developed by Rui Paulo under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #ifndef NO_CTF #include #include #endif #include #include #include #include #include #include #include #include #include #ifndef NO_CTF #include #endif #include #include #include "_libproc.h" #define PATH_DEBUG_DIR "/usr/lib/debug" #ifdef NO_CTF typedef struct ctf_file ctf_file_t; #endif #ifndef NO_CXA_DEMANGLE extern char *__cxa_demangle(const char *, char *, size_t *, int *); #endif /* NO_CXA_DEMANGLE */ static int crc32_file(int fd, uint32_t *crc) { char buf[MAXPHYS]; ssize_t nr; *crc = crc32(0L, Z_NULL, 0); while ((nr = read(fd, buf, sizeof(buf))) > 0) { *crc = crc32(*crc, (char *)buf, nr); } return (!!nr); } static void demangle(const char *symbol, char *buf, size_t len) { #ifndef NO_CXA_DEMANGLE char *dembuf; if (symbol[0] == '_' && symbol[1] == 'Z' && symbol[2]) { dembuf = __cxa_demangle(symbol, NULL, NULL, NULL); if (!dembuf) goto fail; strlcpy(buf, dembuf, len); free(dembuf); return; } fail: #endif /* NO_CXA_DEMANGLE */ strlcpy(buf, symbol, len); } struct symsort_thunk { Elf *e; struct symtab *symtab; }; static int -symvalcmp(void *_thunk, const void *a1, const void *a2) +symvalcmp(const void *a1, const void *a2, void *_thunk) { GElf_Sym sym1, sym2; struct symsort_thunk *thunk; const char *s1, *s2; u_int i1, i2; int bind1, bind2; i1 = *(const u_int *)a1; i2 = *(const u_int *)a2; thunk = _thunk; (void)gelf_getsym(thunk->symtab->data, i1, &sym1); (void)gelf_getsym(thunk->symtab->data, i2, &sym2); if (sym1.st_value != sym2.st_value) return (sym1.st_value < sym2.st_value ? -1 : 1); /* Prefer non-local symbols. */ bind1 = GELF_ST_BIND(sym1.st_info); bind2 = GELF_ST_BIND(sym2.st_info); if (bind1 != bind2) { if (bind1 == STB_LOCAL && bind2 != STB_LOCAL) return (-1); if (bind1 != STB_LOCAL && bind2 == STB_LOCAL) return (1); } s1 = elf_strptr(thunk->e, thunk->symtab->stridx, sym1.st_name); s2 = elf_strptr(thunk->e, thunk->symtab->stridx, sym2.st_name); if (s1 != NULL && s2 != NULL) { /* Prefer symbols without a leading '$'. */ if (*s1 == '$') return (-1); if (*s2 == '$') return (1); /* Prefer symbols with fewer leading underscores. */ for (; *s1 == '_' && *s2 == '_'; s1++, s2++) ; if (*s1 == '_') return (-1); if (*s2 == '_') return (1); } return (0); } static int load_symtab(Elf *e, struct symtab *symtab, u_long sh_type) { GElf_Ehdr ehdr; GElf_Shdr shdr; struct symsort_thunk thunk; Elf_Scn *scn; u_int nsyms; if (gelf_getehdr(e, &ehdr) == NULL) return (-1); scn = NULL; while ((scn = elf_nextscn(e, scn)) != NULL) { (void)gelf_getshdr(scn, &shdr); if (shdr.sh_type == sh_type) break; } if (scn == NULL) return (-1); nsyms = shdr.sh_size / shdr.sh_entsize; if (nsyms > (1 << 20)) return (-1); if ((symtab->data = elf_getdata(scn, NULL)) == NULL) return (-1); symtab->index = calloc(nsyms, sizeof(u_int)); if (symtab->index == NULL) return (-1); for (u_int i = 0; i < nsyms; i++) symtab->index[i] = i; symtab->nsyms = nsyms; symtab->stridx = shdr.sh_link; thunk.e = e; thunk.symtab = symtab; - qsort_r(symtab->index, nsyms, sizeof(u_int), &thunk, symvalcmp); + qsort_r(symtab->index, nsyms, sizeof(u_int), symvalcmp, &thunk); return (0); } static void load_symtabs(struct file_info *file) { file->symtab.nsyms = file->dynsymtab.nsyms = 0; (void)load_symtab(file->elf, &file->symtab, SHT_SYMTAB); (void)load_symtab(file->elf, &file->dynsymtab, SHT_DYNSYM); } static int open_debug_file(char *path, const char *debugfile, uint32_t crc) { size_t n; uint32_t compcrc; int fd; fd = -1; if ((n = strlcat(path, "/", PATH_MAX)) >= PATH_MAX) return (fd); if (strlcat(path, debugfile, PATH_MAX) >= PATH_MAX) goto out; if ((fd = open(path, O_RDONLY | O_CLOEXEC)) < 0) goto out; if (crc32_file(fd, &compcrc) != 0 || crc != compcrc) { DPRINTFX("ERROR: CRC32 mismatch for %s", path); (void)close(fd); fd = -1; } out: path[n] = '\0'; return (fd); } /* * Obtain an ELF descriptor for the specified mapped object. If a GNU debuglink * section is present, a descriptor for the corresponding debug file is * returned. */ static int open_object(struct map_info *mapping) { char path[PATH_MAX]; GElf_Shdr shdr; Elf *e, *e2; Elf_Data *data; Elf_Scn *scn; struct file_info *file; prmap_t *map; const char *debugfile, *scnname; size_t ndx; uint32_t crc; int fd, fd2; if (mapping->map.pr_mapname[0] == '\0') return (-1); /* anonymous object */ if (mapping->file->elf != NULL) return (0); /* already loaded */ file = mapping->file; map = &mapping->map; if ((fd = open(map->pr_mapname, O_RDONLY | O_CLOEXEC)) < 0) { DPRINTF("ERROR: open %s failed", map->pr_mapname); return (-1); } if ((e = elf_begin(fd, ELF_C_READ, NULL)) == NULL) { DPRINTFX("ERROR: elf_begin() failed: %s", elf_errmsg(-1)); goto err; } if (gelf_getehdr(e, &file->ehdr) != &file->ehdr) { DPRINTFX("ERROR: elf_getehdr() failed: %s", elf_errmsg(-1)); goto err; } scn = NULL; while ((scn = elf_nextscn(e, scn)) != NULL) { if (gelf_getshdr(scn, &shdr) != &shdr) { DPRINTFX("ERROR: gelf_getshdr failed: %s", elf_errmsg(-1)); goto err; } if (shdr.sh_type != SHT_PROGBITS) continue; if (elf_getshdrstrndx(e, &ndx) != 0) { DPRINTFX("ERROR: elf_getshdrstrndx failed: %s", elf_errmsg(-1)); goto err; } if ((scnname = elf_strptr(e, ndx, shdr.sh_name)) == NULL) continue; if (strcmp(scnname, ".gnu_debuglink") == 0) break; } if (scn == NULL) goto internal; if ((data = elf_getdata(scn, NULL)) == NULL) { DPRINTFX("ERROR: elf_getdata failed: %s", elf_errmsg(-1)); goto err; } /* * The data contains a null-terminated file name followed by a 4-byte * CRC. */ if (data->d_size < sizeof(crc) + 1) { DPRINTFX("ERROR: debuglink section is too small (%zd bytes)", (ssize_t)data->d_size); goto internal; } if (strnlen(data->d_buf, data->d_size) >= data->d_size - sizeof(crc)) { DPRINTFX("ERROR: no null-terminator in gnu_debuglink section"); goto internal; } debugfile = data->d_buf; memcpy(&crc, (char *)data->d_buf + data->d_size - sizeof(crc), sizeof(crc)); /* * Search for the debug file using the algorithm described in the gdb * documentation: * - look in the directory containing the object, * - look in the subdirectory ".debug" of the directory containing the * object, * - look in the global debug directories (currently /usr/lib/debug). */ (void)strlcpy(path, map->pr_mapname, sizeof(path)); (void)dirname(path); if ((fd2 = open_debug_file(path, debugfile, crc)) >= 0) goto external; if (strlcat(path, "/.debug", sizeof(path)) < sizeof(path) && (fd2 = open_debug_file(path, debugfile, crc)) >= 0) goto external; (void)snprintf(path, sizeof(path), PATH_DEBUG_DIR); if (strlcat(path, map->pr_mapname, sizeof(path)) < sizeof(path)) { (void)dirname(path); if ((fd2 = open_debug_file(path, debugfile, crc)) >= 0) goto external; } internal: /* We didn't find a debug file, just return the object's descriptor. */ file->elf = e; file->fd = fd; load_symtabs(file); return (0); external: if ((e2 = elf_begin(fd2, ELF_C_READ, NULL)) == NULL) { DPRINTFX("ERROR: elf_begin failed: %s", elf_errmsg(-1)); (void)close(fd2); goto err; } (void)elf_end(e); (void)close(fd); file->elf = e2; file->fd = fd2; load_symtabs(file); return (0); err: if (e != NULL) (void)elf_end(e); (void)close(fd); return (-1); } char * proc_objname(struct proc_handle *p, uintptr_t addr, char *objname, size_t objnamesz) { prmap_t *map; size_t i; if (p->nmappings == 0) if (proc_rdagent(p) == NULL) return (NULL); for (i = 0; i < p->nmappings; i++) { map = &p->mappings[i].map; if (addr >= map->pr_vaddr && addr < map->pr_vaddr + map->pr_size) { strlcpy(objname, map->pr_mapname, objnamesz); return (objname); } } return (NULL); } int proc_iter_objs(struct proc_handle *p, proc_map_f *func, void *cd) { char last[MAXPATHLEN], path[MAXPATHLEN], *base; prmap_t *map; size_t i; int error; if (p->nmappings == 0) if (proc_rdagent(p) == NULL) return (-1); error = 0; memset(last, 0, sizeof(last)); for (i = 0; i < p->nmappings; i++) { map = &p->mappings[i].map; strlcpy(path, map->pr_mapname, sizeof(path)); base = basename(path); /* * We shouldn't call the callback twice with the same object. * To do that we are assuming the fact that if there are * repeated object names (i.e. different mappings for the * same object) they occur next to each other. */ if (strcmp(base, last) == 0) continue; if ((error = (*func)(cd, map, base)) != 0) break; strlcpy(last, path, sizeof(last)); } return (error); } static struct map_info * _proc_addr2map(struct proc_handle *p, uintptr_t addr) { struct map_info *mapping; size_t i; if (p->nmappings == 0) if (proc_rdagent(p) == NULL) return (NULL); for (i = 0; i < p->nmappings; i++) { mapping = &p->mappings[i]; if (addr >= mapping->map.pr_vaddr && addr < mapping->map.pr_vaddr + mapping->map.pr_size) return (mapping); } return (NULL); } prmap_t * proc_addr2map(struct proc_handle *p, uintptr_t addr) { return (&_proc_addr2map(p, addr)->map); } /* * Look up the symbol at addr using a binary search, returning a copy of the * symbol and its name. */ static int lookup_symbol_by_addr(Elf *e, struct symtab *symtab, uintptr_t addr, const char **namep, GElf_Sym *symp) { GElf_Sym sym; Elf_Data *data; const char *s; u_int i, min, max, mid; if (symtab->nsyms == 0) return (ENOENT); data = symtab->data; min = 0; max = symtab->nsyms - 1; while (min <= max) { mid = (max + min) / 2; (void)gelf_getsym(data, symtab->index[mid], &sym); if (addr >= sym.st_value && addr < sym.st_value + sym.st_size) break; if (addr < sym.st_value) max = mid - 1; else min = mid + 1; } if (min > max) return (ENOENT); /* * Advance until we find the matching symbol with largest index. */ for (i = mid; i < symtab->nsyms; i++) { (void)gelf_getsym(data, symtab->index[i], &sym); if (addr < sym.st_value || addr >= sym.st_value + sym.st_size) break; } (void)gelf_getsym(data, symtab->index[i - 1], symp); s = elf_strptr(e, symtab->stridx, symp->st_name); if (s != NULL && namep != NULL) *namep = s; return (0); } int proc_addr2sym(struct proc_handle *p, uintptr_t addr, char *name, size_t namesz, GElf_Sym *symcopy) { struct file_info *file; struct map_info *mapping; const char *s; uintptr_t off; int error; if ((mapping = _proc_addr2map(p, addr)) == NULL) { DPRINTFX("ERROR: proc_addr2map failed to resolve 0x%jx", (uintmax_t)addr); return (-1); } if (open_object(mapping) != 0) { DPRINTFX("ERROR: failed to open object %s", mapping->map.pr_mapname); return (-1); } file = mapping->file; off = file->ehdr.e_type == ET_DYN ? mapping->map.pr_vaddr - mapping->map.pr_offset : 0; if (addr < off) return (ENOENT); addr -= off; error = lookup_symbol_by_addr(file->elf, &file->dynsymtab, addr, &s, symcopy); if (error == ENOENT) error = lookup_symbol_by_addr(file->elf, &file->symtab, addr, &s, symcopy); if (error == 0) { symcopy->st_value += off; demangle(s, name, namesz); } return (error); } static struct map_info * _proc_name2map(struct proc_handle *p, const char *name) { char path[MAXPATHLEN], *base; struct map_info *mapping; size_t i, len; if ((len = strlen(name)) == 0) return (NULL); if (p->nmappings == 0) if (proc_rdagent(p) == NULL) return (NULL); for (i = 0; i < p->nmappings; i++) { mapping = &p->mappings[i]; (void)strlcpy(path, mapping->map.pr_mapname, sizeof(path)); base = basename(path); if (strcmp(base, name) == 0) return (mapping); } /* If we didn't find a match, try matching prefixes of the basename. */ for (i = 0; i < p->nmappings; i++) { mapping = &p->mappings[i]; strlcpy(path, mapping->map.pr_mapname, sizeof(path)); base = basename(path); if (strncmp(base, name, len) == 0) return (mapping); } if (strcmp(name, "a.out") == 0) return (_proc_addr2map(p, p->mappings[p->exec_map].map.pr_vaddr)); return (NULL); } prmap_t * proc_name2map(struct proc_handle *p, const char *name) { return (&_proc_name2map(p, name)->map); } /* * Look up the symbol with the given name and return a copy of it. */ static int lookup_symbol_by_name(Elf *elf, struct symtab *symtab, const char *symbol, GElf_Sym *symcopy, prsyminfo_t *si) { GElf_Sym sym; Elf_Data *data; char *s; int i; if (symtab->nsyms == 0) return (ENOENT); data = symtab->data; for (i = 0; gelf_getsym(data, i, &sym) != NULL; i++) { s = elf_strptr(elf, symtab->stridx, sym.st_name); if (s != NULL && strcmp(s, symbol) == 0) { memcpy(symcopy, &sym, sizeof(*symcopy)); if (si != NULL) si->prs_id = i; return (0); } } return (ENOENT); } int proc_name2sym(struct proc_handle *p, const char *object, const char *symbol, GElf_Sym *symcopy, prsyminfo_t *si) { struct file_info *file; struct map_info *mapping; uintptr_t off; int error; if ((mapping = _proc_name2map(p, object)) == NULL) { DPRINTFX("ERROR: proc_name2map failed to resolve %s", object); return (-1); } if (open_object(mapping) != 0) { DPRINTFX("ERROR: failed to open object %s", mapping->map.pr_mapname); return (-1); } file = mapping->file; off = file->ehdr.e_type == ET_DYN ? mapping->map.pr_vaddr - mapping->map.pr_offset : 0; error = lookup_symbol_by_name(file->elf, &file->dynsymtab, symbol, symcopy, si); if (error == ENOENT) error = lookup_symbol_by_name(file->elf, &file->symtab, symbol, symcopy, si); if (error == 0) symcopy->st_value += off; return (error); } ctf_file_t * proc_name2ctf(struct proc_handle *p, const char *name) { #ifndef NO_CTF ctf_file_t *ctf; prmap_t *map; int error; if ((map = proc_name2map(p, name)) == NULL) return (NULL); ctf = ctf_open(map->pr_mapname, &error); return (ctf); #else (void)p; (void)name; return (NULL); #endif } int proc_iter_symbyaddr(struct proc_handle *p, const char *object, int which, int mask, proc_sym_f *func, void *cd) { GElf_Sym sym; struct file_info *file; struct map_info *mapping; struct symtab *symtab; const char *s; int error, i; if ((mapping = _proc_name2map(p, object)) == NULL) { DPRINTFX("ERROR: proc_name2map failed to resolve %s", object); return (-1); } if (open_object(mapping) != 0) { DPRINTFX("ERROR: failed to open object %s", mapping->map.pr_mapname); return (-1); } file = mapping->file; symtab = which == PR_SYMTAB ? &file->symtab : &file->dynsymtab; if (symtab->nsyms == 0) return (-1); error = 0; for (i = 0; gelf_getsym(symtab->data, i, &sym) != NULL; i++) { if (GELF_ST_BIND(sym.st_info) == STB_LOCAL && (mask & BIND_LOCAL) == 0) continue; if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL && (mask & BIND_GLOBAL) == 0) continue; if (GELF_ST_BIND(sym.st_info) == STB_WEAK && (mask & BIND_WEAK) == 0) continue; if (GELF_ST_TYPE(sym.st_info) == STT_NOTYPE && (mask & TYPE_NOTYPE) == 0) continue; if (GELF_ST_TYPE(sym.st_info) == STT_OBJECT && (mask & TYPE_OBJECT) == 0) continue; if (GELF_ST_TYPE(sym.st_info) == STT_FUNC && (mask & TYPE_FUNC) == 0) continue; if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && (mask & TYPE_SECTION) == 0) continue; if (GELF_ST_TYPE(sym.st_info) == STT_FILE && (mask & TYPE_FILE) == 0) continue; s = elf_strptr(file->elf, symtab->stridx, sym.st_name); if (file->ehdr.e_type == ET_DYN) sym.st_value += mapping->map.pr_vaddr; if ((error = (*func)(cd, &sym, s)) != 0) break; } return (error); } diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c index 6f1def64fa0f..8c5d2e4ecd16 100644 --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -1,2796 +1,2796 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2021 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_stack.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__i386__) || defined(__amd64__) #include #include #endif SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, "LinuxKPI parameters"); int linuxkpi_debug; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, debug, CTLFLAG_RWTUN, &linuxkpi_debug, 0, "Set to enable pr_debug() prints. Clear to disable."); int linuxkpi_warn_dump_stack = 0; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, warn_dump_stack, CTLFLAG_RWTUN, &linuxkpi_warn_dump_stack, 0, "Set to enable stack traces from WARN_ON(). Clear to disable."); static struct timeval lkpi_net_lastlog; static int lkpi_net_curpps; static int lkpi_net_maxpps = 99; SYSCTL_INT(_compat_linuxkpi, OID_AUTO, net_ratelimit, CTLFLAG_RWTUN, &lkpi_net_maxpps, 0, "Limit number of LinuxKPI net messages per second."); MALLOC_DEFINE(M_KMALLOC, "lkpikmalloc", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root static void linux_destroy_dev(struct linux_cdev *); static void linux_cdev_deref(struct linux_cdev *ldev); static struct vm_area_struct *linux_cdev_handle_find(void *handle); cpumask_t cpu_online_mask; struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; spinlock_t pci_lock; unsigned long linux_timer_hz_mask; wait_queue_head_t linux_bit_waitq; wait_queue_head_t linux_var_waitq; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); #define START(node) ((node)->start) #define LAST(node) ((node)->last) INTERVAL_TREE_DEFINE(struct interval_tree_node, rb, unsigned long,, START, LAST,, lkpi_interval_tree) struct kobject * kobject_create(void) { struct kobject *kobj; kobj = kzalloc(sizeof(*kobj), GFP_KERNEL); if (kobj == NULL) return (NULL); kobject_init(kobj, &linux_kfree_type); return (kobj); } int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) { va_list tmp_va; int len; char *old; char *name; char dummy; old = kobj->name; if (old && fmt == NULL) return (0); /* compute length of string */ va_copy(tmp_va, args); len = vsnprintf(&dummy, 0, fmt, tmp_va); va_end(tmp_va); /* account for zero termination */ len++; /* check for error */ if (len < 1) return (-EINVAL); /* allocate memory for string */ name = kzalloc(len, GFP_KERNEL); if (name == NULL) return (-ENOMEM); vsnprintf(name, len, fmt, args); kobj->name = name; /* free old string */ kfree(old); /* filter new string */ for (; *name != '\0'; name++) if (*name == '/') *name = '!'; return (0); } int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); return (error); } static int kobject_add_complete(struct kobject *kobj, struct kobject *parent) { const struct kobj_type *t; int error; kobj->parent = parent; error = sysfs_create_dir(kobj); if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { struct attribute **attr; t = kobj->ktype; for (attr = t->default_attrs; *attr != NULL; attr++) { error = sysfs_create_file(kobj, *attr); if (error) break; } if (error) sysfs_remove_dir(kobj); } return (error); } int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } void linux_kobject_release(struct kref *kref) { struct kobject *kobj; char *name; kobj = container_of(kref, struct kobject, kref); sysfs_remove_dir(kobj); name = kobj->name; if (kobj->ktype && kobj->ktype->release) kobj->ktype->release(kobj); kfree(name); } static void linux_kobject_kfree(struct kobject *kobj) { kfree(kobj); } static void linux_kobject_kfree_name(struct kobject *kobj) { if (kobj) { kfree(kobj->name); } } const struct kobj_type linux_kfree_type = { .release = linux_kobject_kfree }; static ssize_t lkpi_kobj_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct kobj_attribute *ka = container_of(attr, struct kobj_attribute, attr); if (ka->show == NULL) return (-EIO); return (ka->show(kobj, ka, buf)); } static ssize_t lkpi_kobj_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct kobj_attribute *ka = container_of(attr, struct kobj_attribute, attr); if (ka->store == NULL) return (-EIO); return (ka->store(kobj, ka, buf, count)); } const struct sysfs_ops kobj_sysfs_ops = { .show = lkpi_kobj_attr_show, .store = lkpi_kobj_attr_store, }; static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } struct device * device_create_groups_vargs(struct class *class, struct device *parent, dev_t devt, void *drvdata, const struct attribute_group **groups, const char *fmt, va_list args) { struct device *dev = NULL; int retval = -ENODEV; if (class == NULL || IS_ERR(class)) goto error; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { retval = -ENOMEM; goto error; } dev->devt = devt; dev->class = class; dev->parent = parent; dev->groups = groups; dev->release = device_create_release; /* device_initialize() needs the class and parent to be set */ device_initialize(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); if (retval) goto error; retval = device_add(dev); if (retval) goto error; return dev; error: put_device(dev); return ERR_PTR(retval); } struct class * class_create(struct module *owner, const char *name) { struct class *class; int error; class = kzalloc(sizeof(*class), M_WAITOK); class->owner = owner; class->name = name; class->class_release = linux_class_kfree; error = class_register(class); if (error) { kfree(class); return (NULL); } return (class); } int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int error; kobject_init(kobj, ktype); kobj->ktype = ktype; kobj->parent = parent; kobj->name = NULL; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } static void linux_kq_lock(void *arg) { spinlock_t *s = arg; spin_lock(s); } static void linux_kq_unlock(void *arg) { spinlock_t *s = arg; spin_unlock(s); } static void linux_kq_assert_lock(void *arg, int what) { #ifdef INVARIANTS spinlock_t *s = arg; if (what == LA_LOCKED) mtx_assert(&s->m, MA_OWNED); else mtx_assert(&s->m, MA_NOTOWNED); #endif } static void linux_file_kqfilter_poll(struct linux_file *, int); struct linux_file * linux_file_alloc(void) { struct linux_file *filp; filp = kzalloc(sizeof(*filp), GFP_KERNEL); /* set initial refcount */ filp->f_count = 1; /* setup fields needed by kqueue support */ spin_lock_init(&filp->f_kqlock); knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, linux_kq_lock, linux_kq_unlock, linux_kq_assert_lock); return (filp); } void linux_file_free(struct linux_file *filp) { if (filp->_file == NULL) { if (filp->f_op != NULL && filp->f_op->release != NULL) filp->f_op->release(filp->f_vnode, filp); if (filp->f_shmem != NULL) vm_object_deallocate(filp->f_shmem); kfree_rcu(filp, rcu); } else { /* * The close method of the character device or file * will free the linux_file structure: */ _fdrop(filp->_file, curthread); } } struct linux_cdev * cdev_alloc(void) { struct linux_cdev *cdev; cdev = kzalloc(sizeof(struct linux_cdev), M_WAITOK); kobject_init(&cdev->kobj, &linux_cdev_ktype); cdev->refs = 1; return (cdev); } static int linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, vm_page_t *mres) { struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; vm_page_t page; if (((*mres)->flags & PG_FICTITIOUS) != 0) { /* * If the passed in result page is a fake * page, update it with the new physical * address. */ page = *mres; vm_page_updatefake(page, paddr, vm_obj->memattr); } else { /* * Replace the passed in "mres" page with our * own fake page and free up the all of the * original pages. */ VM_OBJECT_WUNLOCK(vm_obj); page = vm_page_getfake(paddr, vm_obj->memattr); VM_OBJECT_WLOCK(vm_obj); vm_page_replace(page, vm_obj, (*mres)->pindex, *mres); *mres = page; } vm_page_valid(page); return (VM_PAGER_OK); } return (VM_PAGER_FAIL); } static int linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) { struct vm_area_struct *vmap; int err; /* get VM area structure */ vmap = linux_cdev_handle_find(vm_obj->handle); MPASS(vmap != NULL); MPASS(vmap->vm_private_data == vm_obj->handle); VM_OBJECT_WUNLOCK(vm_obj); linux_set_current(curthread); down_write(&vmap->vm_mm->mmap_sem); if (unlikely(vmap->vm_ops == NULL)) { err = VM_FAULT_SIGBUS; } else { struct vm_fault vmf; /* fill out VM fault structure */ vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx); vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; vmf.pgoff = 0; vmf.page = NULL; vmf.vma = vmap; vmap->vm_pfn_count = 0; vmap->vm_pfn_pcount = &vmap->vm_pfn_count; vmap->vm_obj = vm_obj; err = vmap->vm_ops->fault(&vmf); while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { kern_yield(PRI_USER); err = vmap->vm_ops->fault(&vmf); } } /* translate return code */ switch (err) { case VM_FAULT_OOM: err = VM_PAGER_AGAIN; break; case VM_FAULT_SIGBUS: err = VM_PAGER_BAD; break; case VM_FAULT_NOPAGE: /* * By contract the fault handler will return having * busied all the pages itself. If pidx is already * found in the object, it will simply xbusy the first * page and return with vm_pfn_count set to 1. */ *first = vmap->vm_pfn_first; *last = *first + vmap->vm_pfn_count - 1; err = VM_PAGER_OK; break; default: err = VM_PAGER_ERROR; break; } up_write(&vmap->vm_mm->mmap_sem); VM_OBJECT_WLOCK(vm_obj); return (err); } static struct rwlock linux_vma_lock; static TAILQ_HEAD(, vm_area_struct) linux_vma_head = TAILQ_HEAD_INITIALIZER(linux_vma_head); static void linux_cdev_handle_free(struct vm_area_struct *vmap) { /* Drop reference on vm_file */ if (vmap->vm_file != NULL) fput(vmap->vm_file); /* Drop reference on mm_struct */ mmput(vmap->vm_mm); kfree(vmap); } static void linux_cdev_handle_remove(struct vm_area_struct *vmap) { rw_wlock(&linux_vma_lock); TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); rw_wunlock(&linux_vma_lock); } static struct vm_area_struct * linux_cdev_handle_find(void *handle) { struct vm_area_struct *vmap; rw_rlock(&linux_vma_lock); TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { if (vmap->vm_private_data == handle) break; } rw_runlock(&linux_vma_lock); return (vmap); } static int linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color) { MPASS(linux_cdev_handle_find(handle) != NULL); *color = 0; return (0); } static void linux_cdev_pager_dtor(void *handle) { const struct vm_operations_struct *vm_ops; struct vm_area_struct *vmap; vmap = linux_cdev_handle_find(handle); MPASS(vmap != NULL); /* * Remove handle before calling close operation to prevent * other threads from reusing the handle pointer. */ linux_cdev_handle_remove(vmap); down_write(&vmap->vm_mm->mmap_sem); vm_ops = vmap->vm_ops; if (likely(vm_ops != NULL)) vm_ops->close(vmap); up_write(&vmap->vm_mm->mmap_sem); linux_cdev_handle_free(vmap); } static struct cdev_pager_ops linux_cdev_pager_ops[2] = { { /* OBJT_MGTDEVICE */ .cdev_pg_populate = linux_cdev_pager_populate, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, { /* OBJT_DEVICE */ .cdev_pg_fault = linux_cdev_pager_fault, .cdev_pg_ctor = linux_cdev_pager_ctor, .cdev_pg_dtor = linux_cdev_pager_dtor }, }; int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) { vm_object_t obj; vm_page_t m; obj = vma->vm_obj; if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0) return (-ENOTSUP); VM_OBJECT_RLOCK(obj); for (m = vm_page_find_least(obj, OFF_TO_IDX(address)); m != NULL && m->pindex < OFF_TO_IDX(address + size); m = TAILQ_NEXT(m, listq)) pmap_remove_all(m); VM_OBJECT_RUNLOCK(obj); return (0); } static struct file_operations dummy_ldev_ops = { /* XXXKIB */ }; static struct linux_cdev dummy_ldev = { .ops = &dummy_ldev_ops, }; #define LDEV_SI_DTR 0x0001 #define LDEV_SI_REF 0x0002 static void linux_get_fop(struct linux_file *filp, const struct file_operations **fop, struct linux_cdev **dev) { struct linux_cdev *ldev; u_int siref; ldev = filp->f_cdev; *fop = filp->f_op; if (ldev != NULL) { if (ldev->kobj.ktype == &linux_cdev_static_ktype) { refcount_acquire(&ldev->refs); } else { for (siref = ldev->siref;;) { if ((siref & LDEV_SI_DTR) != 0) { ldev = &dummy_ldev; *fop = ldev->ops; siref = ldev->siref; MPASS((ldev->siref & LDEV_SI_DTR) == 0); } else if (atomic_fcmpset_int(&ldev->siref, &siref, siref + LDEV_SI_REF)) { break; } } } } *dev = ldev; } static void linux_drop_fop(struct linux_cdev *ldev) { if (ldev == NULL) return; if (ldev->kobj.ktype == &linux_cdev_static_ktype) { linux_cdev_deref(ldev); } else { MPASS(ldev->kobj.ktype == &linux_cdev_ktype); MPASS((ldev->siref & ~LDEV_SI_DTR) != 0); atomic_subtract_int(&ldev->siref, LDEV_SI_REF); } } #define OPW(fp,td,code) ({ \ struct file *__fpop; \ __typeof(code) __retval; \ \ __fpop = (td)->td_fpop; \ (td)->td_fpop = (fp); \ __retval = (code); \ (td)->td_fpop = __fpop; \ __retval; \ }) static int linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) { struct linux_cdev *ldev; struct linux_file *filp; const struct file_operations *fop; int error; ldev = dev->si_drv1; filp = linux_file_alloc(); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_mode = file->f_flag; filp->f_flags = file->f_flag; filp->f_vnode = file->f_vnode; filp->_file = file; refcount_acquire(&ldev->refs); filp->f_cdev = ldev; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->open != NULL) { error = -fop->open(file->f_vnode, filp); if (error != 0) { linux_drop_fop(ldev); linux_cdev_deref(filp->f_cdev); kfree(filp); return (error); } } /* hold on to the vnode - used for fstat() */ vhold(filp->f_vnode); /* release the file from devfs */ finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); linux_drop_fop(ldev); return (ENXIO); } #define LINUX_IOCTL_MIN_PTR 0x10000UL #define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) static inline int linux_remap_address(void **uaddr, size_t len) { uintptr_t uaddr_val = (uintptr_t)(*uaddr); if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && uaddr_val < LINUX_IOCTL_MAX_PTR)) { struct task_struct *pts = current; if (pts == NULL) { *uaddr = NULL; return (1); } /* compute data offset */ uaddr_val -= LINUX_IOCTL_MIN_PTR; /* check that length is within bounds */ if ((len > IOCPARM_MAX) || (uaddr_val + len) > pts->bsd_ioctl_len) { *uaddr = NULL; return (1); } /* re-add kernel buffer address */ uaddr_val += (uintptr_t)pts->bsd_ioctl_data; /* update address location */ *uaddr = (void *)uaddr_val; return (1); } return (0); } int linux_copyin(const void *uaddr, void *kaddr, size_t len) { if (linux_remap_address(__DECONST(void **, &uaddr), len)) { if (uaddr == NULL) return (-EFAULT); memcpy(kaddr, uaddr, len); return (0); } return (-copyin(uaddr, kaddr, len)); } int linux_copyout(const void *kaddr, void *uaddr, size_t len) { if (linux_remap_address(&uaddr, len)) { if (uaddr == NULL) return (-EFAULT); memcpy(uaddr, kaddr, len); return (0); } return (-copyout(kaddr, uaddr, len)); } size_t linux_clear_user(void *_uaddr, size_t _len) { uint8_t *uaddr = _uaddr; size_t len = _len; /* make sure uaddr is aligned before going into the fast loop */ while (((uintptr_t)uaddr & 7) != 0 && len > 7) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } /* zero 8 bytes at a time */ while (len > 7) { #ifdef __LP64__ if (suword64(uaddr, 0)) return (_len); #else if (suword32(uaddr, 0)) return (_len); if (suword32(uaddr + 4, 0)) return (_len); #endif uaddr += 8; len -= 8; } /* zero fill end, if any */ while (len > 0) { if (subyte(uaddr, 0)) return (_len); uaddr++; len--; } return (0); } int linux_access_ok(const void *uaddr, size_t len) { uintptr_t saddr; uintptr_t eaddr; /* get start and end address */ saddr = (uintptr_t)uaddr; eaddr = (uintptr_t)uaddr + len; /* verify addresses are valid for userspace */ return ((saddr == eaddr) || (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); } /* * This function should return either EINTR or ERESTART depending on * the signal type sent to this thread: */ static int linux_get_error(struct task_struct *task, int error) { /* check for signal type interrupt code */ if (error == EINTR || error == ERESTARTSYS || error == ERESTART) { error = -linux_schedule_get_interrupt_value(task); if (error == 0) error = EINTR; } return (error); } static int linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, const struct file_operations *fop, u_long cmd, caddr_t data, struct thread *td) { struct task_struct *task = current; unsigned size; int error; size = IOCPARM_LEN(cmd); /* refer to logic in sys_ioctl() */ if (size > 0) { /* * Setup hint for linux_copyin() and linux_copyout(). * * Background: Linux code expects a user-space address * while FreeBSD supplies a kernel-space address. */ task->bsd_ioctl_data = data; task->bsd_ioctl_len = size; data = (void *)LINUX_IOCTL_MIN_PTR; } else { /* fetch user-space pointer */ data = *(void **)data; } #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* try the compat IOCTL handler first */ if (fop->compat_ioctl != NULL) { error = -OPW(fp, td, fop->compat_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } /* fallback to the regular IOCTL handler, if any */ if (error == ENOTTY && fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } } else #endif { if (fop->unlocked_ioctl != NULL) { error = -OPW(fp, td, fop->unlocked_ioctl(filp, cmd, (u_long)data)); } else { error = ENOTTY; } } if (size > 0) { task->bsd_ioctl_data = NULL; task->bsd_ioctl_len = 0; } if (error == EWOULDBLOCK) { /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } else { error = linux_get_error(task, error); } return (error); } #define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) /* * This function atomically updates the poll wakeup state and returns * the previous state at the time of update. */ static uint8_t linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) { int c, old; c = v->counter; while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) c = old; return (c); } static int linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ }; struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_QUEUED: linux_poll_wakeup(filp); return (1); default: return (0); } } void linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, }; /* check if we are called inside the select system call */ if (p == LINUX_POLL_TABLE_NORMAL) selrecord(curthread, &filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_INIT: /* NOTE: file handles can only belong to one wait-queue */ filp->f_wait_queue.wqh = wqh; filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; add_wait_queue(wqh, &filp->f_wait_queue.wq); atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); break; default: break; } } static void linux_poll_wait_dequeue(struct linux_file *filp) { static const uint8_t state[LINUX_FWQ_STATE_MAX] = { [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, }; seldrain(&filp->f_selinfo); switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { case LINUX_FWQ_STATE_NOT_READY: case LINUX_FWQ_STATE_QUEUED: case LINUX_FWQ_STATE_READY: remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); break; default: break; } } void linux_poll_wakeup(struct linux_file *filp) { /* this function should be NULL-safe */ if (filp == NULL) return; selwakeup(&filp->f_selinfo); spin_lock(&filp->f_kqlock); filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); spin_unlock(&filp->f_kqlock); } static void linux_file_kqfilter_detach(struct knote *kn) { struct linux_file *filp = kn->kn_hook; spin_lock(&filp->f_kqlock); knlist_remove(&filp->f_selinfo.si_note, kn, 1); spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter_read_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); } static int linux_file_kqfilter_write_event(struct knote *kn, long hint) { struct linux_file *filp = kn->kn_hook; mtx_assert(&filp->f_kqlock.m, MA_OWNED); return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); } static struct filterops linux_dev_kqfiltops_read = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_read_event, }; static struct filterops linux_dev_kqfiltops_write = { .f_isfd = 1, .f_detach = linux_file_kqfilter_detach, .f_event = linux_file_kqfilter_write_event, }; static void linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) { struct thread *td; const struct file_operations *fop; struct linux_cdev *ldev; int temp; if ((filp->f_kqflags & kqflags) == 0) return; td = curthread; linux_get_fop(filp, &fop, &ldev); /* get the latest polling state */ temp = OPW(filp->_file, td, fop->poll(filp, NULL)); linux_drop_fop(ldev); spin_lock(&filp->f_kqlock); /* clear kqflags */ filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | LINUX_KQ_FLAG_NEED_WRITE); /* update kqflags */ if ((temp & (POLLIN | POLLOUT)) != 0) { if ((temp & POLLIN) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; if ((temp & POLLOUT) != 0) filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; /* make sure the "knote" gets woken up */ KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); } spin_unlock(&filp->f_kqlock); } static int linux_file_kqfilter(struct file *file, struct knote *kn) { struct linux_file *filp; struct thread *td; int error; td = curthread; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll == NULL) return (EINVAL); spin_lock(&filp->f_kqlock); switch (kn->kn_filter) { case EVFILT_READ: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; kn->kn_fop = &linux_dev_kqfiltops_read; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; case EVFILT_WRITE: filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; kn->kn_fop = &linux_dev_kqfiltops_write; kn->kn_hook = filp; knlist_add(&filp->f_selinfo.si_note, kn, 1); error = 0; break; default: error = EINVAL; break; } spin_unlock(&filp->f_kqlock); if (error == 0) { linux_set_current(td); /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); } return (error); } static int linux_file_mmap_single(struct file *fp, const struct file_operations *fop, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot, bool is_shared, struct thread *td) { struct task_struct *task; struct vm_area_struct *vmap; struct mm_struct *mm; struct linux_file *filp; vm_memattr_t attr; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; if (fop->mmap == NULL) return (EOPNOTSUPP); linux_set_current(td); /* * The same VM object might be shared by multiple processes * and the mm_struct is usually freed when a process exits. * * The atomic reference below makes sure the mm_struct is * available as long as the vmap is in the linux_vma_head. */ task = current; mm = task->mm; if (atomic_inc_not_zero(&mm->mm_users) == 0) return (EINVAL); vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); vmap->vm_start = 0; vmap->vm_end = size; vmap->vm_pgoff = *offset / PAGE_SIZE; vmap->vm_pfn = 0; vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); if (is_shared) vmap->vm_flags |= VM_SHARED; vmap->vm_ops = NULL; vmap->vm_file = get_file(filp); vmap->vm_mm = mm; if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { error = linux_get_error(task, EINTR); } else { error = -OPW(fp, td, fop->mmap(filp, vmap)); error = linux_get_error(task, error); up_write(&vmap->vm_mm->mmap_sem); } if (error != 0) { linux_cdev_handle_free(vmap); return (error); } attr = pgprot2cachemode(vmap->vm_page_prot); if (vmap->vm_ops != NULL) { struct vm_area_struct *ptr; void *vm_private_data; bool vm_no_fault; if (vmap->vm_ops->open == NULL || vmap->vm_ops->close == NULL || vmap->vm_private_data == NULL) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); return (EINVAL); } vm_private_data = vmap->vm_private_data; rw_wlock(&linux_vma_lock); TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { if (ptr->vm_private_data == vm_private_data) break; } /* check if there is an existing VM area struct */ if (ptr != NULL) { /* check if the VM area structure is invalid */ if (ptr->vm_ops == NULL || ptr->vm_ops->open == NULL || ptr->vm_ops->close == NULL) { error = ESTALE; vm_no_fault = 1; } else { error = EEXIST; vm_no_fault = (ptr->vm_ops->fault == NULL); } } else { /* insert VM area structure into list */ TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); error = 0; vm_no_fault = (vmap->vm_ops->fault == NULL); } rw_wunlock(&linux_vma_lock); if (error != 0) { /* free allocated VM area struct */ linux_cdev_handle_free(vmap); /* check for stale VM area struct */ if (error != EEXIST) return (error); } /* check if there is no fault handler */ if (vm_no_fault) { *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, &linux_cdev_pager_ops[1], size, nprot, *offset, td->td_ucred); } else { *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, &linux_cdev_pager_ops[0], size, nprot, *offset, td->td_ucred); } /* check if allocating the VM object failed */ if (*object == NULL) { if (error == 0) { /* remove VM area struct from list */ linux_cdev_handle_remove(vmap); /* free allocated VM area struct */ linux_cdev_handle_free(vmap); } return (EINVAL); } } else { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, nprot, 0, td->td_ucred); linux_cdev_handle_free(vmap); if (*object == NULL) { sglist_free(sg); return (EINVAL); } } if (attr != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, attr); VM_OBJECT_WUNLOCK(*object); } *offset = 0; return (0); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_fdopen = linux_dev_fdopen, .d_name = "lkpidev", }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->read != NULL) { bytes = OPW(file, td, fop->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); linux_drop_fop(ldev); return (error); } static int linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; ssize_t bytes; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; /* XXX no support for I/O vectors currently */ if (uio->uio_iovcnt != 1) return (EOPNOTSUPP); if (uio->uio_resid > DEVFS_IOSIZE_MAX) return (EINVAL); linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->write != NULL) { bytes = OPW(file, td, fop->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset)); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; error = 0; } else { error = linux_get_error(current, -bytes); } } else error = ENXIO; /* update kqfilter status, if any */ linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); linux_drop_fop(ldev); return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; linux_set_current(td); linux_get_fop(filp, &fop, &ldev); if (fop->poll != NULL) { revents = OPW(file, td, fop->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; } else { revents = 0; } linux_drop_fop(ldev); return (revents); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int (*release)(struct inode *, struct linux_file *); const struct file_operations *fop; struct linux_cdev *ldev; int error; filp = (struct linux_file *)file->f_data; KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); if (td == NULL) td = curthread; error = 0; filp->f_flags = file->f_flag; linux_set_current(td); linux_poll_wait_dequeue(filp); linux_get_fop(filp, &fop, &ldev); /* * Always use the real release function, if any, to avoid * leaking device resources: */ release = filp->f_op->release; if (release != NULL) error = -OPW(file, td, release(filp->f_vnode, filp)); funsetown(&filp->f_sigio); if (filp->f_vnode != NULL) vdrop(filp->f_vnode); linux_drop_fop(ldev); ldev = filp->f_cdev; if (ldev != NULL) linux_cdev_deref(ldev); linux_synchronize_rcu(RCU_TYPE_REGULAR); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct fiodgname_arg *fgn; const char *p; int error, i; error = 0; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; linux_get_fop(filp, &fop, &ldev); linux_set_current(td); switch (cmd) { case FIONBIO: break; case FIOASYNC: if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) { if (fop->fasync == NULL) break; error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC)); } break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; case FIODGNAME: #ifdef COMPAT_FREEBSD32 case FIODGNAME_32: #endif if (filp->f_cdev == NULL || filp->f_cdev->cdev == NULL) { error = ENXIO; break; } fgn = data; p = devtoname(filp->f_cdev->cdev); i = strlen(p) + 1; if (i > fgn->len) { error = EINVAL; break; } error = copyout(p, fiodgname_buf_get_ptr(fgn, cmd), i); break; default: error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td); break; } linux_drop_fop(ldev); return (error); } static int linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, vm_prot_t maxprot, int flags, struct file *fp, vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp) { /* * Character devices do not provide private mappings * of any kind: */ if ((maxprot & VM_PROT_WRITE) == 0 && (prot & VM_PROT_WRITE) != 0) return (EACCES); if ((flags & (MAP_PRIVATE | MAP_COPY)) != 0) return (EINVAL); return (linux_file_mmap_single(fp, fop, foff, objsize, objp, (int)prot, (flags & MAP_SHARED) ? true : false, td)); } static int linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, struct thread *td) { struct linux_file *filp; const struct file_operations *fop; struct linux_cdev *ldev; struct mount *mp; struct vnode *vp; vm_object_t object; vm_prot_t maxprot; int error; filp = (struct linux_file *)fp->f_data; vp = filp->f_vnode; if (vp == NULL) return (EOPNOTSUPP); /* * Ensure that file and memory protections are * compatible. */ mp = vp->v_mount; if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { maxprot = VM_PROT_NONE; if ((prot & VM_PROT_EXECUTE) != 0) return (EACCES); } else maxprot = VM_PROT_EXECUTE; if ((fp->f_flag & FREAD) != 0) maxprot |= VM_PROT_READ; else if ((prot & VM_PROT_READ) != 0) return (EACCES); /* * If we are sharing potential changes via MAP_SHARED and we * are trying to get write permission although we opened it * without asking for it, bail out. * * Note that most character devices always share mappings. * * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE * requests rather than doing it here. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_flag & FWRITE) != 0) maxprot |= VM_PROT_WRITE; else if ((prot & VM_PROT_WRITE) != 0) return (EACCES); } maxprot &= cap_maxprot; linux_get_fop(filp, &fop, &ldev); error = linux_file_mmap_sub(td, size, prot, maxprot, flags, fp, &foff, fop, &object); if (error != 0) goto out; error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, foff, FALSE, td); if (error != 0) vm_object_deallocate(object); out: linux_drop_fop(ldev); return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred) { struct linux_file *filp; struct vnode *vp; int error; filp = (struct linux_file *)fp->f_data; if (filp->f_vnode == NULL) return (EOPNOTSUPP); vp = filp->f_vnode; vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_STAT(vp, sb, curthread->td_ucred, NOCRED); VOP_UNLOCK(vp); return (error); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { struct linux_file *filp; struct vnode *vp; int error; filp = fp->f_data; vp = filp->f_vnode; if (vp == NULL) { error = 0; kif->kf_type = KF_TYPE_DEV; } else { vref(vp); FILEDESC_SUNLOCK(fdp); error = vn_fill_kinfo_vnode(vp, kif); vrele(vp); kif->kf_type = KF_TYPE_VNODE; FILEDESC_SLOCK(fdp); } return (error); } unsigned int linux_iminor(struct inode *inode) { struct linux_cdev *ldev; if (inode == NULL || inode->v_rdev == NULL || inode->v_rdev->si_devsw != &linuxcdevsw) return (-1U); ldev = inode->v_rdev->si_drv1; if (ldev == NULL) return (-1U); return (minor(ldev->dev)); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = linux_file_write, .fo_truncate = invfo_truncate, .fo_kqfilter = linux_file_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_mmap = linux_file_mmap, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, .fo_flags = DFLAG_PASSABLE, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv) pmap_unmapdev(addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } static char * devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); if (dev != NULL) p = devm_kmalloc(dev, len + 1, gfp); else p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { return (devm_kvasprintf(NULL, gfp, fmt, ap)); } char * lkpi_devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = devm_kvasprintf(dev, gfp, fmt, ap); va_end(ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; timer = context; if (linux_set_current_flags(curthread, M_NOWAIT)) { /* try again later */ callout_reset(&timer->callout, 1, &linux_timer_callback_wrapper, timer); return; } timer->function(timer->data); } int mod_timer(struct timer_list *timer, int expires) { int ret; timer->expires = expires; ret = callout_reset(&timer->callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); MPASS(ret == 0 || ret == 1); return (ret == 1); } void add_timer(struct timer_list *timer) { callout_reset(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } void add_timer_on(struct timer_list *timer, int cpu) { callout_reset_on(&timer->callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer, cpu); } int del_timer(struct timer_list *timer) { if (callout_stop(&(timer)->callout) == -1) return (0); return (1); } int del_timer_sync(struct timer_list *timer) { if (callout_drain(&(timer)->callout) == -1) return (0); return (1); } /* greatest common divisor, Euclid equation */ static uint64_t lkpi_gcd_64(uint64_t a, uint64_t b) { uint64_t an; uint64_t bn; while (b != 0) { an = b; bn = a % b; a = an; b = bn; } return (a); } uint64_t lkpi_nsec2hz_rem; uint64_t lkpi_nsec2hz_div = 1000000000ULL; uint64_t lkpi_nsec2hz_max; uint64_t lkpi_usec2hz_rem; uint64_t lkpi_usec2hz_div = 1000000ULL; uint64_t lkpi_usec2hz_max; uint64_t lkpi_msec2hz_rem; uint64_t lkpi_msec2hz_div = 1000ULL; uint64_t lkpi_msec2hz_max; static void linux_timer_init(void *arg) { uint64_t gcd; /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; /* compute some internal constants */ lkpi_nsec2hz_rem = hz; lkpi_usec2hz_rem = hz; lkpi_msec2hz_rem = hz; gcd = lkpi_gcd_64(lkpi_nsec2hz_rem, lkpi_nsec2hz_div); lkpi_nsec2hz_rem /= gcd; lkpi_nsec2hz_div /= gcd; lkpi_nsec2hz_max = -1ULL / lkpi_nsec2hz_rem; gcd = lkpi_gcd_64(lkpi_usec2hz_rem, lkpi_usec2hz_div); lkpi_usec2hz_rem /= gcd; lkpi_usec2hz_div /= gcd; lkpi_usec2hz_max = -1ULL / lkpi_usec2hz_rem; gcd = lkpi_gcd_64(lkpi_msec2hz_rem, lkpi_msec2hz_div); lkpi_msec2hz_rem /= gcd; lkpi_msec2hz_div /= gcd; lkpi_msec2hz_max = -1ULL / lkpi_msec2hz_rem; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); if (all) { c->done = UINT_MAX; wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); } else { if (c->done != UINT_MAX) c->done++; wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); } sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ int linux_wait_for_common(struct completion *c, int flags) { struct task_struct *task; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; error = 0; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { DROP_GIANT(); error = -sleepq_wait_sig(c, 0); PICKUP_GIANT(); if (error != 0) { linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; goto intr; } } else { DROP_GIANT(); sleepq_wait(c, 0); PICKUP_GIANT(); } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); intr: return (error); } /* * Time limited wait for done != 0 with or without signals. */ int linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) { struct task_struct *task; int end = jiffies + timeout; int error; if (SCHEDULER_STOPPED()) return (0); task = current; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); DROP_GIANT(); if (flags & SLEEPQ_INTERRUPTIBLE) error = -sleepq_timedwait_sig(c, 0); else error = -sleepq_timedwait(c, 0); PICKUP_GIANT(); if (error != 0) { /* check for timeout */ if (error == -EWOULDBLOCK) { error = 0; /* timeout */ } else { /* signal happened */ linux_schedule_save_interrupt_value(task, error); error = -ERESTARTSYS; } goto done; } } if (c->done != UINT_MAX) c->done--; sleepq_release(c); /* return how many jiffies are left */ error = linux_timer_jiffies_until(end); done: return (error); } int linux_try_wait_for_completion(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); if (c->done != 0 && c->done != UINT_MAX) c->done--; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; sleepq_lock(c); isdone = (c->done != 0); sleepq_release(c); return (isdone); } static void linux_cdev_deref(struct linux_cdev *ldev) { if (refcount_release(&ldev->refs) && ldev->kobj.ktype == &linux_cdev_ktype) kfree(ldev); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; linux_destroy_dev(cdev); linux_cdev_deref(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct cdev *cdev; struct linux_cdev *ldev; ldev = container_of(kobj, struct linux_cdev, kobj); cdev = ldev->cdev; if (cdev != NULL) { destroy_dev(cdev); ldev->cdev = NULL; } kobject_put(kobj->parent); } int linux_cdev_device_add(struct linux_cdev *ldev, struct device *dev) { int ret; if (dev->devt != 0) { /* Set parent kernel object. */ ldev->kobj.parent = &dev->kobj; /* * Unlike Linux we require the kobject of the * character device structure to have a valid name * before calling this function: */ if (ldev->kobj.name == NULL) return (-EINVAL); ret = cdev_add(ldev, dev->devt, 1); if (ret) return (ret); } ret = device_add(dev); if (ret != 0 && dev->devt != 0) cdev_del(ldev); return (ret); } void linux_cdev_device_del(struct linux_cdev *ldev, struct device *dev) { device_del(dev); if (dev->devt != 0) cdev_del(ldev); } static void linux_destroy_dev(struct linux_cdev *ldev) { if (ldev->cdev == NULL) return; MPASS((ldev->siref & LDEV_SI_DTR) == 0); MPASS(ldev->kobj.ktype == &linux_cdev_ktype); atomic_set_int(&ldev->siref, LDEV_SI_DTR); while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0) pause("ldevdtr", hz / 4); destroy_dev(ldev->cdev); ldev->cdev = NULL; } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, &ni); else nb->notifier_call(nb, NETDEV_DOWN, &ni); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_REGISTER, &ni); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_UNREGISTER, &ni); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEADDR, &ni); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; struct netdev_notifier_info ni; nb = arg; ni.ifp = ifp; ni.dev = (struct net_device *)ifp; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, &ni); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } struct list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static inline int -linux_le_cmp(void *priv, const void *d1, const void *d2) +linux_le_cmp(const void *d1, const void *d2, void *priv) { struct list_head *le1, *le2; struct list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_sort_thunk thunk; struct list_head **ar, *le; size_t count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; - qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); + qsort_r(ar, count, sizeof(struct list_head *), linux_le_cmp, &thunk); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_KMALLOC); } #if defined(__i386__) || defined(__amd64__) int linux_wbinvd_on_all_cpus(void) { pmap_invalidate_cache(); return (0); } #endif int linux_on_each_cpu(void callback(void *), void *data) { smp_rendezvous(smp_no_rendezvous_barrier, callback, smp_no_rendezvous_barrier, data); return (0); } int linux_in_atomic(void) { return ((curthread->td_pflags & TDP_NOFAULTING) != 0); } struct linux_cdev * linux_find_cdev(const char *name, unsigned major, unsigned minor) { dev_t dev = MKDEV(major, minor); struct cdev *cdev; dev_lock(); LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { struct linux_cdev *ldev = cdev->si_drv1; if (ldev->dev == dev && strcmp(kobject_name(&ldev->kobj), name) == 0) { break; } } dev_unlock(); return (cdev != NULL ? cdev->si_drv1 : NULL); } int __register_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, makedev(major, i), 1); if (ret != 0) break; } return (ret); } int __register_chrdev_p(unsigned int major, unsigned int baseminor, unsigned int count, const char *name, const struct file_operations *fops, uid_t uid, gid_t gid, int mode) { struct linux_cdev *cdev; int ret = 0; int i; for (i = baseminor; i < baseminor + count; i++) { cdev = cdev_alloc(); cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); if (ret != 0) break; } return (ret); } void __unregister_chrdev(unsigned int major, unsigned int baseminor, unsigned int count, const char *name) { struct linux_cdev *cdevp; int i; for (i = baseminor; i < baseminor + count; i++) { cdevp = linux_find_cdev(name, major, i); if (cdevp != NULL) cdev_del(cdevp); } } void linux_dump_stack(void) { #ifdef STACK struct stack st; stack_save(&st); stack_print(&st); #endif } int linuxkpi_net_ratelimit(void) { return (ppsratecheck(&lkpi_net_lastlog, &lkpi_net_curpps, lkpi_net_maxpps)); } struct io_mapping * io_mapping_create_wc(resource_size_t base, unsigned long size) { struct io_mapping *mapping; mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (mapping == NULL) return (NULL); return (io_mapping_init_wc(mapping, base, size)); } #if defined(__i386__) || defined(__amd64__) bool linux_cpu_has_clflush; struct cpuinfo_x86 boot_cpu_data; #endif static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; #if defined(__i386__) || defined(__amd64__) linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); boot_cpu_data.x86_clflush_size = cpu_clflush_line_size; boot_cpu_data.x86 = ((cpu_id & 0xf0000) >> 12) | ((cpu_id & 0xf0) >> 4); #endif rw_init(&linux_vma_lock, "lkpi-vma-lock"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); init_waitqueue_head(&linux_bit_waitq); init_waitqueue_head(&linux_var_waitq); CPU_COPY(&all_cpus, &cpu_online_mask); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); mtx_destroy(&vmmaplock); spin_lock_destroy(&pci_lock); rw_destroy(&linux_vma_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); diff --git a/sys/dev/bhnd/nvram/bhnd_nvram_store_subr.c b/sys/dev/bhnd/nvram/bhnd_nvram_store_subr.c index 730b9d51da45..fd05648147f5 100644 --- a/sys/dev/bhnd/nvram/bhnd_nvram_store_subr.c +++ b/sys/dev/bhnd/nvram/bhnd_nvram_store_subr.c @@ -1,1182 +1,1181 @@ /*- * Copyright (c) 2015-2016 Landon Fuller * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include #include #include #ifdef _KERNEL #include #include #include #else /* !_KERNEL */ #include #include #include #include #include #include #include #include #endif /* _KERNEL */ #include "bhnd_nvram_private.h" #include "bhnd_nvram_datavar.h" #include "bhnd_nvram_storevar.h" -static int bhnd_nvstore_idx_cmp(void *ctx, - const void *lhs, const void *rhs); +static int bhnd_nvstore_idx_cmp(const void *lhs, const void *rhs, void *ctx); /** * Allocate and initialize a new path instance. * * The caller is responsible for deallocating the instance via * bhnd_nvstore_path_free(). * * @param path_str The path's canonical string representation. * @param path_len The length of @p path_str. * * @retval non-NULL success * @retval NULL if allocation fails. */ bhnd_nvstore_path * bhnd_nvstore_path_new(const char *path_str, size_t path_len) { bhnd_nvstore_path *path; /* Allocate new entry */ path = bhnd_nv_malloc(sizeof(*path)); if (path == NULL) return (NULL); path->index = NULL; path->num_vars = 0; path->pending = bhnd_nvram_plist_new(); if (path->pending == NULL) goto failed; path->path_str = bhnd_nv_strndup(path_str, path_len); if (path->path_str == NULL) goto failed; return (path); failed: if (path->pending != NULL) bhnd_nvram_plist_release(path->pending); if (path->path_str != NULL) bhnd_nv_free(path->path_str); bhnd_nv_free(path); return (NULL); } /** * Free an NVRAM path instance, releasing all associated resources. */ void bhnd_nvstore_path_free(struct bhnd_nvstore_path *path) { /* Free the per-path index */ if (path->index != NULL) bhnd_nvstore_index_free(path->index); bhnd_nvram_plist_release(path->pending); bhnd_nv_free(path->path_str); bhnd_nv_free(path); } /** * Allocate and initialize a new index instance with @p capacity. * * The caller is responsible for deallocating the instance via * bhnd_nvstore_index_free(). * * @param capacity The maximum number of variables to be indexed. * * @retval non-NULL success * @retval NULL if allocation fails. */ bhnd_nvstore_index * bhnd_nvstore_index_new(size_t capacity) { bhnd_nvstore_index *index; size_t bytes; /* Allocate and populate variable index */ bytes = sizeof(struct bhnd_nvstore_index) + (sizeof(void *) * capacity); index = bhnd_nv_malloc(bytes); if (index == NULL) { BHND_NV_LOG("error allocating %zu byte index\n", bytes); return (NULL); } index->count = 0; index->capacity = capacity; return (index); } /** * Free an index instance, releasing all associated resources. * * @param index An index instance previously allocated via * bhnd_nvstore_index_new(). */ void bhnd_nvstore_index_free(bhnd_nvstore_index *index) { bhnd_nv_free(index); } /** * Append a new NVRAM variable's @p cookiep value to @p index. * * After one or more append requests, the index must be prepared via * bhnd_nvstore_index_prepare() before any indexed lookups are performed. * * @param sc The NVRAM store from which NVRAM values will be queried. * @param index The index to be modified. * @param cookiep The cookiep value (as provided by the backing NVRAM * data instance of @p sc) to be included in @p index. * * @retval 0 success * @retval ENOMEM if appending an additional entry would exceed the * capacity of @p index. */ int bhnd_nvstore_index_append(struct bhnd_nvram_store *sc, bhnd_nvstore_index *index, void *cookiep) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); if (index->count >= index->capacity) return (ENOMEM); index->cookiep[index->count] = cookiep; index->count++; return (0); } /* sort function for bhnd_nvstore_index_prepare() */ static int -bhnd_nvstore_idx_cmp(void *ctx, const void *lhs, const void *rhs) +bhnd_nvstore_idx_cmp(const void *lhs, const void *rhs, void *ctx) { struct bhnd_nvram_store *sc; void *l_cookiep, *r_cookiep; const char *l_str, *r_str; const char *l_name, *r_name; int order; sc = ctx; l_cookiep = *(void * const *)lhs; r_cookiep = *(void * const *)rhs; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Fetch string pointers from the cookiep values */ l_str = bhnd_nvram_data_getvar_name(sc->data, l_cookiep); r_str = bhnd_nvram_data_getvar_name(sc->data, r_cookiep); /* Trim device path prefixes */ if (sc->data_caps & BHND_NVRAM_DATA_CAP_DEVPATHS) { l_name = bhnd_nvram_trim_path_name(l_str); r_name = bhnd_nvram_trim_path_name(r_str); } else { l_name = l_str; r_name = r_str; } /* Perform comparison */ order = strcmp(l_name, r_name); if (order != 0 || lhs == rhs) return (order); /* If the backing data incorrectly contains variables with duplicate * names, we need a sort order that provides stable behavior. * * Since Broadcom's own code varies wildly on this question, we just * use a simple precedence rule: The first declaration of a variable * takes precedence. */ return (bhnd_nvram_data_getvar_order(sc->data, l_cookiep, r_cookiep)); } /** * Prepare @p index for querying via bhnd_nvstore_index_lookup(). * * After one or more append requests, the index must be prepared via * bhnd_nvstore_index_prepare() before any indexed lookups are performed. * * @param sc The NVRAM store from which NVRAM values will be queried. * @param index The index to be prepared. * * @retval 0 success * @retval non-zero if preparing @p index otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvstore_index_prepare(struct bhnd_nvram_store *sc, bhnd_nvstore_index *index) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Sort the index table */ - qsort_r(index->cookiep, index->count, sizeof(index->cookiep[0]), sc, - bhnd_nvstore_idx_cmp); + qsort_r(index->cookiep, index->count, sizeof(index->cookiep[0]), + bhnd_nvstore_idx_cmp, sc); return (0); } /** * Return a borrowed reference to the root path node. * * @param sc The NVRAM store. */ bhnd_nvstore_path * bhnd_nvstore_get_root_path(struct bhnd_nvram_store *sc) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); return (sc->root_path); } /** * Return true if @p path is the root path node. * * @param sc The NVRAM store. * @param path The path to query. */ bool bhnd_nvstore_is_root_path(struct bhnd_nvram_store *sc, bhnd_nvstore_path *path) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); return (sc->root_path == path); } /** * Return the update entry matching @p name in @p path, or NULL if no entry * found. * * @param sc The NVRAM store. * @param path The path to query. * @param name The NVRAM variable name to search for in @p path's update list. * * @retval non-NULL success * @retval NULL if @p name is not found in @p path. */ bhnd_nvram_prop * bhnd_nvstore_path_get_update(struct bhnd_nvram_store *sc, bhnd_nvstore_path *path, const char *name) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); return (bhnd_nvram_plist_get_prop(path->pending, name)); } /** * Register or remove an update record for @p name in @p path. * * @param sc The NVRAM store. * @param path The path to be modified. * @param name The path-relative variable name to be modified. * @param value The new value. A value of BHND_NVRAM_TYPE_NULL denotes deletion. * * @retval 0 success * @retval ENOMEM if allocation fails. * @retval ENOENT if @p name is unknown. * @retval EINVAL if @p value is NULL, and deletion of @p is not * supported. * @retval EINVAL if @p value cannot be converted to a supported value * type. */ int bhnd_nvstore_path_register_update(struct bhnd_nvram_store *sc, bhnd_nvstore_path *path, const char *name, bhnd_nvram_val *value) { bhnd_nvram_val *prop_val; const char *full_name; void *cookiep; char *namebuf; int error; bool nvram_committed; namebuf = NULL; prop_val = NULL; /* Determine whether the variable is currently defined in the * backing NVRAM data, and derive its full path-prefixed name */ nvram_committed = false; cookiep = bhnd_nvstore_path_data_lookup(sc, path, name); if (cookiep != NULL) { /* Variable is defined in the backing data */ nvram_committed = true; /* Use the existing variable name */ full_name = bhnd_nvram_data_getvar_name(sc->data, cookiep); } else if (path == sc->root_path) { /* No prefix required for root path */ full_name = name; } else { bhnd_nvstore_alias *alias; int len; /* New variable is being set; we need to determine the * appropriate path prefix */ alias = bhnd_nvstore_find_alias(sc, path->path_str); if (alias != NULL) { /* Use :name */ len = bhnd_nv_asprintf(&namebuf, "%lu:%s", alias->alias, name); } else { /* Use path/name */ len = bhnd_nv_asprintf(&namebuf, "%s/%s", path->path_str, name); } if (len < 0) return (ENOMEM); full_name = namebuf; } /* Allow the data store to filter the NVRAM operation */ if (bhnd_nvram_val_type(value) == BHND_NVRAM_TYPE_NULL) { error = bhnd_nvram_data_filter_unsetvar(sc->data, full_name); if (error) { BHND_NV_LOG("cannot unset property %s: %d\n", full_name, error); goto cleanup; } if ((prop_val = bhnd_nvram_val_copy(value)) == NULL) { error = ENOMEM; goto cleanup; } } else { error = bhnd_nvram_data_filter_setvar(sc->data, full_name, value, &prop_val); if (error) { BHND_NV_LOG("cannot set property %s: %d\n", full_name, error); goto cleanup; } } /* Add relative variable name to the per-path update list */ if (bhnd_nvram_val_type(value) == BHND_NVRAM_TYPE_NULL && !nvram_committed) { /* This is a deletion request for a variable not defined in * out backing store; we can simply remove the corresponding * update entry. */ bhnd_nvram_plist_remove(path->pending, name); } else { /* Update or append a pending update entry */ error = bhnd_nvram_plist_replace_val(path->pending, name, prop_val); if (error) goto cleanup; } /* Success */ error = 0; cleanup: if (namebuf != NULL) bhnd_nv_free(namebuf); if (prop_val != NULL) bhnd_nvram_val_release(prop_val); return (error); } /** * Iterate over all variable cookiep values retrievable from the backing * data store in @p path. * * @warning Pending updates in @p path are ignored by this function. * * @param sc The NVRAM store. * @param path The NVRAM path to be iterated. * @param[in,out] indexp A pointer to an opaque indexp value previously * returned by bhnd_nvstore_path_data_next(), or a * NULL value to begin iteration. * * @return Returns the next variable name, or NULL if there are no more * variables defined in @p path. */ void * bhnd_nvstore_path_data_next(struct bhnd_nvram_store *sc, bhnd_nvstore_path *path, void **indexp) { void **index_ref; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* No index */ if (path->index == NULL) { /* An index is required for all non-empty, non-root path * instances */ BHND_NV_ASSERT(bhnd_nvstore_is_root_path(sc, path), ("missing index for non-root path %s", path->path_str)); /* Iterate NVRAM data directly, using the NVRAM data's cookiep * value as our indexp context */ if ((bhnd_nvram_data_next(sc->data, indexp)) == NULL) return (NULL); return (*indexp); } /* Empty index */ if (path->index->count == 0) return (NULL); if (*indexp == NULL) { /* First index entry */ index_ref = &path->index->cookiep[0]; } else { size_t idxpos; /* Advance to next index entry */ index_ref = *indexp; index_ref++; /* Hit end of index? */ BHND_NV_ASSERT(index_ref > path->index->cookiep, ("invalid indexp")); idxpos = (index_ref - path->index->cookiep); if (idxpos >= path->index->count) return (NULL); } /* Provide new index position */ *indexp = index_ref; /* Return the data's cookiep value */ return (*index_ref); } /** * Perform an lookup of @p name in the backing NVRAM data for @p path, * returning the associated cookiep value, or NULL if the variable is not found * in the backing NVRAM data. * * @warning Pending updates in @p path are ignored by this function. * * @param sc The NVRAM store from which NVRAM values will be queried. * @param path The path to be queried. * @param name The variable name to be queried. * * @retval non-NULL success * @retval NULL if @p name is not found in @p index. */ void * bhnd_nvstore_path_data_lookup(struct bhnd_nvram_store *sc, bhnd_nvstore_path *path, const char *name) { BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* No index */ if (path->index == NULL) { /* An index is required for all non-empty, non-root path * instances */ BHND_NV_ASSERT(bhnd_nvstore_is_root_path(sc, path), ("missing index for non-root path %s", path->path_str)); /* Look up directly in NVRAM data */ return (bhnd_nvram_data_find(sc->data, name)); } /* Otherwise, delegate to an index-based lookup */ return (bhnd_nvstore_index_lookup(sc, path->index, name)); } /** * Perform an index lookup of @p name, returning the associated cookiep * value, or NULL if the variable does not exist. * * @param sc The NVRAM store from which NVRAM values will be queried. * @param index The index to be queried. * @param name The variable name to be queried. * * @retval non-NULL success * @retval NULL if @p name is not found in @p index. */ void * bhnd_nvstore_index_lookup(struct bhnd_nvram_store *sc, bhnd_nvstore_index *index, const char *name) { void *cookiep; const char *indexed_name; size_t min, mid, max; uint32_t data_caps; int order; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); BHND_NV_ASSERT(index != NULL, ("NULL index")); /* * Locate the requested variable using a binary search. */ if (index->count == 0) return (NULL); data_caps = sc->data_caps; min = 0; max = index->count - 1; while (max >= min) { /* Select midpoint */ mid = (min + max) / 2; cookiep = index->cookiep[mid]; /* Fetch variable name */ indexed_name = bhnd_nvram_data_getvar_name(sc->data, cookiep); /* Trim any path prefix */ if (data_caps & BHND_NVRAM_DATA_CAP_DEVPATHS) indexed_name = bhnd_nvram_trim_path_name(indexed_name); /* Determine which side of the partition to search */ order = strcmp(indexed_name, name); if (order < 0) { /* Search upper partition */ min = mid + 1; } else if (order > 0) { /* Search (non-empty) lower partition */ if (mid == 0) break; max = mid - 1; } else if (order == 0) { size_t idx; /* * Match found. * * If this happens to be a key with multiple definitions * in the backing store, we need to find the entry with * the highest declaration precedence. * * Duplicates are sorted in order of descending * precedence; to find the highest precedence entry, * we search backwards through the index. */ idx = mid; while (idx > 0) { void *dup_cookiep; const char *dup_name; /* Fetch preceding index entry */ idx--; dup_cookiep = index->cookiep[idx]; dup_name = bhnd_nvram_data_getvar_name(sc->data, dup_cookiep); /* Trim any path prefix */ if (data_caps & BHND_NVRAM_DATA_CAP_DEVPATHS) { dup_name = bhnd_nvram_trim_path_name( dup_name); } /* If no match, current cookiep is the variable * definition with the highest precedence */ if (strcmp(indexed_name, dup_name) != 0) return (cookiep); /* Otherwise, prefer this earlier definition, * and keep searching for a higher-precedence * definitions */ cookiep = dup_cookiep; } return (cookiep); } } /* Not found */ return (NULL); } /** * Return the device path entry registered for @p path, if any. * * @param sc The NVRAM store to be queried. * @param path The device path to search for. * @param path_len The length of @p path. * * @retval non-NULL if found. * @retval NULL if not found. */ bhnd_nvstore_path * bhnd_nvstore_get_path(struct bhnd_nvram_store *sc, const char *path, size_t path_len) { bhnd_nvstore_path_list *plist; bhnd_nvstore_path *p; uint32_t h; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Use hash lookup */ h = hash32_strn(path, path_len, HASHINIT); plist = &sc->paths[h % nitems(sc->paths)]; LIST_FOREACH(p, plist, np_link) { /* Check for prefix match */ if (strncmp(p->path_str, path, path_len) != 0) continue; /* Check for complete match */ if (strnlen(path, path_len) != strlen(p->path_str)) continue; return (p); } /* Not found */ return (NULL); } /** * Resolve @p aval to its corresponding device path entry, if any. * * @param sc The NVRAM store to be queried. * @param aval The device path alias value to search for. * * @retval non-NULL if found. * @retval NULL if not found. */ bhnd_nvstore_path * bhnd_nvstore_resolve_path_alias(struct bhnd_nvram_store *sc, u_long aval) { bhnd_nvstore_alias *alias; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Fetch alias entry */ if ((alias = bhnd_nvstore_get_alias(sc, aval)) == NULL) return (NULL); return (alias->path); } /** * Register a device path entry for the path referenced by variable name * @p info, if any. * * @param sc The NVRAM store to be updated. * @param info The NVRAM variable name info. * @param cookiep The NVRAM variable's cookiep value. * * @retval 0 if the path was successfully registered, or an identical * path or alias entry exists. * @retval EEXIST if a conflicting entry already exists for the path or * alias referenced by @p info. * @retval ENOENT if @p info contains a dangling alias reference. * @retval EINVAL if @p info contains an unsupported bhnd_nvstore_var_type * and bhnd_nvstore_path_type combination. * @retval ENOMEM if allocation fails. */ int bhnd_nvstore_var_register_path(struct bhnd_nvram_store *sc, bhnd_nvstore_name_info *info, void *cookiep) { switch (info->type) { case BHND_NVSTORE_VAR: /* Variable */ switch (info->path_type) { case BHND_NVSTORE_PATH_STRING: /* Variable contains a full path string * (pci/1/1/varname); register the path */ return (bhnd_nvstore_register_path(sc, info->path.str.value, info->path.str.value_len)); case BHND_NVSTORE_PATH_ALIAS: /* Variable contains an alias reference (0:varname). * There's no path to register */ return (0); } BHND_NV_PANIC("unsupported path type %d", info->path_type); break; case BHND_NVSTORE_ALIAS_DECL: /* Alias declaration */ return (bhnd_nvstore_register_alias(sc, info, cookiep)); } BHND_NV_PANIC("unsupported var type %d", info->type); } /** * Resolve the device path entry referenced by @p info. * * @param sc The NVRAM store to be updated. * @param info Variable name information descriptor containing * the path or path alias to be resolved. * * @retval non-NULL if found. * @retval NULL if not found. */ bhnd_nvstore_path * bhnd_nvstore_var_get_path(struct bhnd_nvram_store *sc, bhnd_nvstore_name_info *info) { switch (info->path_type) { case BHND_NVSTORE_PATH_STRING: return (bhnd_nvstore_get_path(sc, info->path.str.value, info->path.str.value_len)); case BHND_NVSTORE_PATH_ALIAS: return (bhnd_nvstore_resolve_path_alias(sc, info->path.alias.value)); } BHND_NV_PANIC("unsupported path type %d", info->path_type); } /** * Return the device path alias entry registered for @p alias_val, if any. * * @param sc The NVRAM store to be queried. * @param alias_val The alias value to search for. * * @retval non-NULL if found. * @retval NULL if not found. */ bhnd_nvstore_alias * bhnd_nvstore_get_alias(struct bhnd_nvram_store *sc, u_long alias_val) { bhnd_nvstore_alias_list *alist; bhnd_nvstore_alias *alias; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Can use hash lookup */ alist = &sc->aliases[alias_val % nitems(sc->aliases)]; LIST_FOREACH(alias, alist, na_link) { if (alias->alias == alias_val) return (alias); } /* Not found */ return (NULL); } /** * Return the device path alias entry registered for @p path, if any. * * @param sc The NVRAM store to be queried. * @param path The alias path to search for. * * @retval non-NULL if found. * @retval NULL if not found. */ bhnd_nvstore_alias * bhnd_nvstore_find_alias(struct bhnd_nvram_store *sc, const char *path) { bhnd_nvstore_alias *alias; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Have to scan the full table */ for (size_t i = 0; i < nitems(sc->aliases); i++) { LIST_FOREACH(alias, &sc->aliases[i], na_link) { if (strcmp(alias->path->path_str, path) == 0) return (alias); } } /* Not found */ return (NULL); } /** * Register a device path entry for @p path. * * @param sc The NVRAM store to be updated. * @param path_str The absolute device path string. * @param path_len The length of @p path_str. * * @retval 0 if the path was successfully registered, or an identical * path/alias entry already exists. * @retval ENOMEM if allocation fails. */ int bhnd_nvstore_register_path(struct bhnd_nvram_store *sc, const char *path_str, size_t path_len) { bhnd_nvstore_path_list *plist; bhnd_nvstore_path *path; uint32_t h; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); /* Already exists? */ if (bhnd_nvstore_get_path(sc, path_str, path_len) != NULL) return (0); /* Can't represent more than SIZE_MAX paths */ if (sc->num_paths == SIZE_MAX) return (ENOMEM); /* Allocate new entry */ path = bhnd_nvstore_path_new(path_str, path_len); if (path == NULL) return (ENOMEM); /* Insert in path hash table */ h = hash32_str(path->path_str, HASHINIT); plist = &sc->paths[h % nitems(sc->paths)]; LIST_INSERT_HEAD(plist, path, np_link); /* Increment path count */ sc->num_paths++; return (0); } /** * Register a device path alias for an NVRAM 'devpathX' variable. * * The path value for the alias will be fetched from the backing NVRAM data. * * @param sc The NVRAM store to be updated. * @param info The NVRAM variable name info. * @param cookiep The NVRAM variable's cookiep value. * * @retval 0 if the alias was successfully registered, or an * identical alias entry exists. * @retval EEXIST if a conflicting alias or path entry already exists. * @retval EINVAL if @p info is not a BHND_NVSTORE_ALIAS_DECL or does * not contain a BHND_NVSTORE_PATH_ALIAS entry. * @retval ENOMEM if allocation fails. */ int bhnd_nvstore_register_alias(struct bhnd_nvram_store *sc, const bhnd_nvstore_name_info *info, void *cookiep) { bhnd_nvstore_alias_list *alist; bhnd_nvstore_alias *alias; bhnd_nvstore_path *path; char *path_str; size_t path_len; int error; BHND_NVSTORE_LOCK_ASSERT(sc, MA_OWNED); path_str = NULL; alias = NULL; /* Can't represent more than SIZE_MAX aliases */ if (sc->num_aliases == SIZE_MAX) return (ENOMEM); /* Must be an alias declaration */ if (info->type != BHND_NVSTORE_ALIAS_DECL) return (EINVAL); if (info->path_type != BHND_NVSTORE_PATH_ALIAS) return (EINVAL); /* Fetch the devpath variable's value length */ error = bhnd_nvram_data_getvar(sc->data, cookiep, NULL, &path_len, BHND_NVRAM_TYPE_STRING); if (error) return (ENOMEM); /* Allocate path string buffer */ if ((path_str = bhnd_nv_malloc(path_len)) == NULL) return (ENOMEM); /* Decode to our new buffer */ error = bhnd_nvram_data_getvar(sc->data, cookiep, path_str, &path_len, BHND_NVRAM_TYPE_STRING); if (error) goto failed; /* Trim trailing '/' character(s) from the path length */ path_len = strnlen(path_str, path_len); while (path_len > 0 && path_str[path_len-1] == '/') { path_str[path_len-1] = '\0'; path_len--; } /* Is a conflicting alias entry already registered for this alias * value? */ alias = bhnd_nvstore_get_alias(sc, info->path.alias.value); if (alias != NULL) { if (alias->cookiep != cookiep || strcmp(alias->path->path_str, path_str) != 0) { error = EEXIST; goto failed; } } /* Is a conflicting entry already registered for the alias path? */ if ((alias = bhnd_nvstore_find_alias(sc, path_str)) != NULL) { if (alias->alias != info->path.alias.value || alias->cookiep != cookiep || strcmp(alias->path->path_str, path_str) != 0) { error = EEXIST; goto failed; } } /* Get (or register) the target path entry */ path = bhnd_nvstore_get_path(sc, path_str, path_len); if (path == NULL) { error = bhnd_nvstore_register_path(sc, path_str, path_len); if (error) goto failed; path = bhnd_nvstore_get_path(sc, path_str, path_len); BHND_NV_ASSERT(path != NULL, ("missing registered path")); } /* Allocate alias entry */ alias = bhnd_nv_calloc(1, sizeof(*alias)); if (alias == NULL) { error = ENOMEM; goto failed; } alias->path = path; alias->cookiep = cookiep; alias->alias = info->path.alias.value; /* Insert in alias hash table */ alist = &sc->aliases[alias->alias % nitems(sc->aliases)]; LIST_INSERT_HEAD(alist, alias, na_link); /* Increment alias count */ sc->num_aliases++; bhnd_nv_free(path_str); return (0); failed: if (path_str != NULL) bhnd_nv_free(path_str); if (alias != NULL) bhnd_nv_free(alias); return (error); } /** * If @p child is equal to or a child path of @p parent, return a pointer to * @p child's path component(s) relative to @p parent; otherwise, return NULL. */ const char * bhnd_nvstore_parse_relpath(const char *parent, const char *child) { size_t prefix_len; /* All paths have an implicit leading '/'; this allows us to treat * our manufactured root path of "/" as a prefix to all NVRAM-defined * paths (which do not necessarily include a leading '/' */ if (*parent == '/') parent++; if (*child == '/') child++; /* Is parent a prefix of child? */ prefix_len = strlen(parent); if (strncmp(parent, child, prefix_len) != 0) return (NULL); /* A zero-length prefix matches everything */ if (prefix_len == 0) return (child); /* Is child equal to parent? */ if (child[prefix_len] == '\0') return (child + prefix_len); /* Is child actually a child of parent? */ if (child[prefix_len] == '/') return (child + prefix_len + 1); /* No match (e.g. parent=/foo..., child=/fooo...) */ return (NULL); } /** * Parse a raw NVRAM variable name and return its @p entry_type, its * type-specific @p prefix (e.g. '0:', 'pci/1/1', 'devpath'), and its * type-specific @p suffix (e.g. 'varname', '0'). * * @param name The NVRAM variable name to be parsed. This * value must remain valid for the lifetime of * @p info. * @param type The NVRAM name type -- either INTERNAL for names * parsed from backing NVRAM data, or EXTERNAL for * names provided by external NVRAM store clients. * @param data_caps The backing NVRAM data capabilities * (see bhnd_nvram_data_caps()). * @param[out] info On success, the parsed variable name info. * * @retval 0 success * @retval non-zero if parsing @p name otherwise fails, a regular unix * error code will be returned. */ int bhnd_nvstore_parse_name_info(const char *name, bhnd_nvstore_name_type type, uint32_t data_caps, bhnd_nvstore_name_info *info) { const char *p; char *endp; /* Skip path parsing? */ if (data_caps & BHND_NVRAM_DATA_CAP_DEVPATHS) { /* devpath declaration? (devpath0=pci/1/1) */ if (strncmp(name, "devpath", strlen("devpath")) == 0) { u_long alias; /* Perform standard validation on the relative * variable name */ if (type != BHND_NVSTORE_NAME_INTERNAL && !bhnd_nvram_validate_name(name)) { return (ENOENT); } /* Parse alias value that should follow a 'devpath' * prefix */ p = name + strlen("devpath"); alias = strtoul(p, &endp, 10); if (endp != p && *endp == '\0') { info->type = BHND_NVSTORE_ALIAS_DECL; info->path_type = BHND_NVSTORE_PATH_ALIAS; info->name = name; info->path.alias.value = alias; return (0); } } /* device aliased variable? (0:varname) */ if (bhnd_nv_isdigit(*name)) { u_long alias; /* Parse '0:' alias prefix */ alias = strtoul(name, &endp, 10); if (endp != name && *endp == ':') { /* Perform standard validation on the relative * variable name */ if (type != BHND_NVSTORE_NAME_INTERNAL && !bhnd_nvram_validate_name(name)) { return (ENOENT); } info->type = BHND_NVSTORE_VAR; info->path_type = BHND_NVSTORE_PATH_ALIAS; /* name follows 0: prefix */ info->name = endp + 1; info->path.alias.value = alias; return (0); } } /* device variable? (pci/1/1/varname) */ if ((p = strrchr(name, '/')) != NULL) { const char *path, *relative_name; size_t path_len; /* Determine the path length; 'p' points at the last * path separator in 'name' */ path_len = p - name; path = name; /* The relative variable name directly follows the * final path separator '/' */ relative_name = path + path_len + 1; /* Now that we calculated the name offset, exclude all * trailing '/' characters from the path length */ while (path_len > 0 && path[path_len-1] == '/') path_len--; /* Perform standard validation on the relative * variable name */ if (type != BHND_NVSTORE_NAME_INTERNAL && !bhnd_nvram_validate_name(relative_name)) { return (ENOENT); } /* Initialize result with pointers into the name * buffer */ info->type = BHND_NVSTORE_VAR; info->path_type = BHND_NVSTORE_PATH_STRING; info->name = relative_name; info->path.str.value = path; info->path.str.value_len = path_len; return (0); } } /* If all other parsing fails, the result is a simple variable with * an implicit path of "/" */ if (type != BHND_NVSTORE_NAME_INTERNAL && !bhnd_nvram_validate_name(name)) { /* Invalid relative name */ return (ENOENT); } info->type = BHND_NVSTORE_VAR; info->path_type = BHND_NVSTORE_PATH_STRING; info->name = name; info->path.str.value = BHND_NVSTORE_ROOT_PATH; info->path.str.value_len = BHND_NVSTORE_ROOT_PATH_LEN; return (0); } diff --git a/sys/dev/drm2/drm_linux_list_sort.c b/sys/dev/drm2/drm_linux_list_sort.c index f9a64154c796..04cdf28180c8 100644 --- a/sys/dev/drm2/drm_linux_list_sort.c +++ b/sys/dev/drm2/drm_linux_list_sort.c @@ -1,76 +1,76 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011 The FreeBSD Foundation * * This software was developed by Konstantin Belousov under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); struct drm_list_sort_thunk { int (*cmp)(void *, struct list_head *, struct list_head *); void *priv; }; static int -drm_le_cmp(void *priv, const void *d1, const void *d2) +drm_le_cmp(const void *d1, const void *d2, void *priv) { struct list_head *le1, *le2; struct drm_list_sort_thunk *thunk; thunk = priv; le1 = *(__DECONST(struct list_head **, d1)); le2 = *(__DECONST(struct list_head **, d2)); return ((thunk->cmp)(thunk->priv, le1, le2)); } /* * Punt and use array sort. */ void drm_list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct drm_list_sort_thunk thunk; struct list_head **ar, *le; int count, i; count = 0; list_for_each(le, head) count++; ar = malloc(sizeof(struct list_head *) * count, M_TEMP, M_WAITOK); i = 0; list_for_each(le, head) ar[i++] = le; thunk.cmp = cmp; thunk.priv = priv; - qsort_r(ar, count, sizeof(struct list_head *), &thunk, drm_le_cmp); + qsort_r(ar, count, sizeof(struct list_head *), drm_le_cmp, &thunk); INIT_LIST_HEAD(head); for (i = 0; i < count; i++) list_add_tail(ar[i], head); free(ar, M_TEMP); } diff --git a/sys/libkern/qsort.c b/sys/libkern/qsort.c index 7602127a59d6..66ca826e265c 100644 --- a/sys/libkern/qsort.c +++ b/sys/libkern/qsort.c @@ -1,223 +1,223 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #ifdef I_AM_QSORT_R -typedef int cmp_t(void *, const void *, const void *); +typedef int cmp_t(const void *, const void *, void *); #else typedef int cmp_t(const void *, const void *); #endif static inline char *med3(char *, char *, char *, cmp_t *, void *); static inline void swapfunc(char *, char *, size_t, int, int); /* * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". */ #define swapcode(TYPE, parmi, parmj, n) { \ size_t i = (n) / sizeof (TYPE); \ TYPE *pi = (TYPE *) (parmi); \ TYPE *pj = (TYPE *) (parmj); \ do { \ TYPE t = *pi; \ *pi++ = *pj; \ *pj++ = t; \ } while (--i > 0); \ } #define SWAPINIT(TYPE, a, es) swaptype_ ## TYPE = \ ((char *)a - (char *)0) % sizeof(TYPE) || \ es % sizeof(TYPE) ? 2 : es == sizeof(TYPE) ? 0 : 1; static inline void swapfunc(char *a, char *b, size_t n, int swaptype_long, int swaptype_int) { if (swaptype_long <= 1) swapcode(long, a, b, n) else if (swaptype_int <= 1) swapcode(int, a, b, n) else swapcode(char, a, b, n) } #define swap(a, b) \ if (swaptype_long == 0) { \ long t = *(long *)(a); \ *(long *)(a) = *(long *)(b); \ *(long *)(b) = t; \ } else if (swaptype_int == 0) { \ int t = *(int *)(a); \ *(int *)(a) = *(int *)(b); \ *(int *)(b) = t; \ } else \ swapfunc(a, b, es, swaptype_long, swaptype_int) #define vecswap(a, b, n) \ if ((n) > 0) swapfunc(a, b, n, swaptype_long, swaptype_int) #ifdef I_AM_QSORT_R -#define CMP(t, x, y) (cmp((t), (x), (y))) +#define CMP(t, x, y) (cmp((x), (y), (t))) #else #define CMP(t, x, y) (cmp((x), (y))) #endif static inline char * med3(char *a, char *b, char *c, cmp_t *cmp, void *thunk #ifndef I_AM_QSORT_R __unused #endif ) { return CMP(thunk, a, b) < 0 ? (CMP(thunk, b, c) < 0 ? b : (CMP(thunk, a, c) < 0 ? c : a )) :(CMP(thunk, b, c) > 0 ? b : (CMP(thunk, a, c) < 0 ? a : c )); } #ifdef I_AM_QSORT_R void -qsort_r(void *a, size_t n, size_t es, void *thunk, cmp_t *cmp) +(qsort_r)(void *a, size_t n, size_t es, cmp_t *cmp, void *thunk) #else #define thunk NULL void qsort(void *a, size_t n, size_t es, cmp_t *cmp) #endif { char *pa, *pb, *pc, *pd, *pl, *pm, *pn; size_t d1, d2; int cmp_result; int swaptype_long, swaptype_int, swap_cnt; loop: SWAPINIT(long, a, es); SWAPINIT(int, a, es); swap_cnt = 0; if (n < 7) { for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es) for (pl = pm; pl > (char *)a && CMP(thunk, pl - es, pl) > 0; pl -= es) swap(pl, pl - es); return; } pm = (char *)a + (n / 2) * es; if (n > 7) { pl = a; pn = (char *)a + (n - 1) * es; if (n > 40) { size_t d = (n / 8) * es; pl = med3(pl, pl + d, pl + 2 * d, cmp, thunk); pm = med3(pm - d, pm, pm + d, cmp, thunk); pn = med3(pn - 2 * d, pn - d, pn, cmp, thunk); } pm = med3(pl, pm, pn, cmp, thunk); } swap(a, pm); pa = pb = (char *)a + es; pc = pd = (char *)a + (n - 1) * es; for (;;) { while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) { if (cmp_result == 0) { swap_cnt = 1; swap(pa, pb); pa += es; } pb += es; } while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) { if (cmp_result == 0) { swap_cnt = 1; swap(pc, pd); pd -= es; } pc -= es; } if (pb > pc) break; swap(pb, pc); swap_cnt = 1; pb += es; pc -= es; } if (swap_cnt == 0) { /* Switch to insertion sort */ for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es) for (pl = pm; pl > (char *)a && CMP(thunk, pl - es, pl) > 0; pl -= es) swap(pl, pl - es); return; } pn = (char *)a + n * es; d1 = MIN(pa - (char *)a, pb - pa); vecswap(a, pb - d1, d1); d1 = MIN(pd - pc, pn - pd - es); vecswap(pb, pn - d1, d1); d1 = pb - pa; d2 = pd - pc; if (d1 <= d2) { /* Recurse on left partition, then iterate on right partition */ if (d1 > es) { #ifdef I_AM_QSORT_R - qsort_r(a, d1 / es, es, thunk, cmp); + qsort_r(a, d1 / es, es, cmp, thunk); #else qsort(a, d1 / es, es, cmp); #endif } if (d2 > es) { /* Iterate rather than recurse to save stack space */ /* qsort(pn - d2, d2 / es, es, cmp); */ a = pn - d2; n = d2 / es; goto loop; } } else { /* Recurse on right partition, then iterate on left partition */ if (d2 > es) { #ifdef I_AM_QSORT_R - qsort_r(pn - d2, d2 / es, es, thunk, cmp); + qsort_r(pn - d2, d2 / es, es, cmp, thunk); #else qsort(pn - d2, d2 / es, es, cmp); #endif } if (d1 > es) { /* Iterate rather than recurse to save stack space */ /* qsort(a, d1 / es, es, cmp); */ n = d1 / es; goto loop; } } } diff --git a/sys/netgraph/ng_ppp.c b/sys/netgraph/ng_ppp.c index 6c61bcda0ae3..aafed858a26b 100644 --- a/sys/netgraph/ng_ppp.c +++ b/sys/netgraph/ng_ppp.c @@ -1,2644 +1,2644 @@ /*- * SPDX-License-Identifier: BSD-2-Clause AND BSD-2-Clause-FreeBSD * * Copyright (c) 1996-2000 Whistle Communications, Inc. * All rights reserved. * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; * provided, however, that: * 1. Any and all reproductions of the source or object code must include the * copyright notice above and the following disclaimer of warranties; and * 2. No rights are granted, in any manner or form, to use Whistle * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, * INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT. * WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY * REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS * SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE. * IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES * RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING * WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * Copyright (c) 2007 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Authors: Archie Cobbs , Alexander Motin * * $FreeBSD$ * $Whistle: ng_ppp.c,v 1.24 1999/11/01 09:24:52 julian Exp $ */ /* * PPP node type data-flow. * * hook xmit layer recv hook * ------------------------------------ * inet -> -> inet * ipv6 -> -> ipv6 * ipx -> proto -> ipx * atalk -> -> atalk * bypass -> -> bypass * -hcomp_xmit()----------proto_recv()- * vjc_ip <- <- vjc_ip * vjc_comp -> header compression -> vjc_comp * vjc_uncomp -> -> vjc_uncomp * vjc_vjip -> * -comp_xmit()-----------hcomp_recv()- * compress <- compression <- decompress * compress -> -> decompress * -crypt_xmit()-----------comp_recv()- * encrypt <- encryption <- decrypt * encrypt -> -> decrypt * -ml_xmit()-------------crypt_recv()- * multilink * -link_xmit()--------------ml_recv()- * linkX <- link <- linkX * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef NG_SEPARATE_MALLOC static MALLOC_DEFINE(M_NETGRAPH_PPP, "netgraph_ppp", "netgraph ppp node"); #else #define M_NETGRAPH_PPP M_NETGRAPH #endif #define PROT_VALID(p) (((p) & 0x0101) == 0x0001) #define PROT_COMPRESSABLE(p) (((p) & 0xff00) == 0x0000) /* Some PPP protocol numbers we're interested in */ #define PROT_ATALK 0x0029 #define PROT_COMPD 0x00fd #define PROT_CRYPTD 0x0053 #define PROT_IP 0x0021 #define PROT_IPV6 0x0057 #define PROT_IPX 0x002b #define PROT_LCP 0xc021 #define PROT_MP 0x003d #define PROT_VJCOMP 0x002d #define PROT_VJUNCOMP 0x002f /* Multilink PPP definitions */ #define MP_INITIAL_SEQ 0 /* per RFC 1990 */ #define MP_MIN_LINK_MRU 32 #define MP_SHORT_SEQ_MASK 0x00000fff /* short seq # mask */ #define MP_SHORT_SEQ_HIBIT 0x00000800 /* short seq # high bit */ #define MP_SHORT_FIRST_FLAG 0x00008000 /* first fragment in frame */ #define MP_SHORT_LAST_FLAG 0x00004000 /* last fragment in frame */ #define MP_LONG_SEQ_MASK 0x00ffffff /* long seq # mask */ #define MP_LONG_SEQ_HIBIT 0x00800000 /* long seq # high bit */ #define MP_LONG_FIRST_FLAG 0x80000000 /* first fragment in frame */ #define MP_LONG_LAST_FLAG 0x40000000 /* last fragment in frame */ #define MP_NOSEQ 0x7fffffff /* impossible sequence number */ /* Sign extension of MP sequence numbers */ #define MP_SHORT_EXTEND(s) (((s) & MP_SHORT_SEQ_HIBIT) ? \ ((s) | ~MP_SHORT_SEQ_MASK) \ : ((s) & MP_SHORT_SEQ_MASK)) #define MP_LONG_EXTEND(s) (((s) & MP_LONG_SEQ_HIBIT) ? \ ((s) | ~MP_LONG_SEQ_MASK) \ : ((s) & MP_LONG_SEQ_MASK)) /* Comparison of MP sequence numbers. Note: all sequence numbers except priv->xseq are stored with the sign bit extended. */ #define MP_SHORT_SEQ_DIFF(x,y) MP_SHORT_EXTEND((x) - (y)) #define MP_LONG_SEQ_DIFF(x,y) MP_LONG_EXTEND((x) - (y)) #define MP_RECV_SEQ_DIFF(priv,x,y) \ ((priv)->conf.recvShortSeq ? \ MP_SHORT_SEQ_DIFF((x), (y)) : \ MP_LONG_SEQ_DIFF((x), (y))) /* Increment receive sequence number */ #define MP_NEXT_RECV_SEQ(priv,seq) \ ((priv)->conf.recvShortSeq ? \ MP_SHORT_EXTEND((seq) + 1) : \ MP_LONG_EXTEND((seq) + 1)) /* Don't fragment transmitted packets to parts smaller than this */ #define MP_MIN_FRAG_LEN 32 /* Maximum fragment reasssembly queue length */ #define MP_MAX_QUEUE_LEN 128 /* Fragment queue scanner period */ #define MP_FRAGTIMER_INTERVAL (hz/2) /* Average link overhead. XXX: Should be given by user-level */ #define MP_AVERAGE_LINK_OVERHEAD 16 /* Keep this equal to ng_ppp_hook_names lower! */ #define HOOK_INDEX_MAX 13 /* We store incoming fragments this way */ struct ng_ppp_frag { int seq; /* fragment seq# */ uint8_t first; /* First in packet? */ uint8_t last; /* Last in packet? */ struct timeval timestamp; /* time of reception */ struct mbuf *data; /* Fragment data */ TAILQ_ENTRY(ng_ppp_frag) f_qent; /* Fragment queue */ }; /* Per-link private information */ struct ng_ppp_link { struct ng_ppp_link_conf conf; /* link configuration */ struct ng_ppp_link_stat64 stats; /* link stats */ hook_p hook; /* connection to link data */ int32_t seq; /* highest rec'd seq# - MSEQ */ uint32_t latency; /* calculated link latency */ struct timeval lastWrite; /* time of last write for MP */ int bytesInQueue; /* bytes in the output queue for MP */ }; /* Total per-node private information */ struct ng_ppp_private { struct ng_ppp_bund_conf conf; /* bundle config */ struct ng_ppp_link_stat64 bundleStats; /* bundle stats */ struct ng_ppp_link links[NG_PPP_MAX_LINKS];/* per-link info */ int32_t xseq; /* next out MP seq # */ int32_t mseq; /* min links[i].seq */ uint16_t activeLinks[NG_PPP_MAX_LINKS]; /* indices */ uint16_t numActiveLinks; /* how many links up */ uint16_t lastLink; /* for round robin */ uint8_t vjCompHooked; /* VJ comp hooked up? */ uint8_t allLinksEqual; /* all xmit the same? */ hook_p hooks[HOOK_INDEX_MAX]; /* non-link hooks */ struct ng_ppp_frag fragsmem[MP_MAX_QUEUE_LEN]; /* fragments storage */ TAILQ_HEAD(ng_ppp_fraglist, ng_ppp_frag) /* fragment queue */ frags; TAILQ_HEAD(ng_ppp_fragfreelist, ng_ppp_frag) /* free fragment queue */ fragsfree; struct callout fragTimer; /* fraq queue check */ struct mtx rmtx; /* recv mutex */ struct mtx xmtx; /* xmit mutex */ }; typedef struct ng_ppp_private *priv_p; /* Netgraph node methods */ static ng_constructor_t ng_ppp_constructor; static ng_rcvmsg_t ng_ppp_rcvmsg; static ng_shutdown_t ng_ppp_shutdown; static ng_newhook_t ng_ppp_newhook; static ng_rcvdata_t ng_ppp_rcvdata; static ng_disconnect_t ng_ppp_disconnect; static ng_rcvdata_t ng_ppp_rcvdata_inet; static ng_rcvdata_t ng_ppp_rcvdata_inet_fast; static ng_rcvdata_t ng_ppp_rcvdata_ipv6; static ng_rcvdata_t ng_ppp_rcvdata_ipx; static ng_rcvdata_t ng_ppp_rcvdata_atalk; static ng_rcvdata_t ng_ppp_rcvdata_bypass; static ng_rcvdata_t ng_ppp_rcvdata_vjc_ip; static ng_rcvdata_t ng_ppp_rcvdata_vjc_comp; static ng_rcvdata_t ng_ppp_rcvdata_vjc_uncomp; static ng_rcvdata_t ng_ppp_rcvdata_vjc_vjip; static ng_rcvdata_t ng_ppp_rcvdata_compress; static ng_rcvdata_t ng_ppp_rcvdata_decompress; static ng_rcvdata_t ng_ppp_rcvdata_encrypt; static ng_rcvdata_t ng_ppp_rcvdata_decrypt; /* We use integer indices to refer to the non-link hooks. */ static const struct { char *const name; ng_rcvdata_t *fn; } ng_ppp_hook_names[] = { #define HOOK_INDEX_ATALK 0 { NG_PPP_HOOK_ATALK, ng_ppp_rcvdata_atalk }, #define HOOK_INDEX_BYPASS 1 { NG_PPP_HOOK_BYPASS, ng_ppp_rcvdata_bypass }, #define HOOK_INDEX_COMPRESS 2 { NG_PPP_HOOK_COMPRESS, ng_ppp_rcvdata_compress }, #define HOOK_INDEX_ENCRYPT 3 { NG_PPP_HOOK_ENCRYPT, ng_ppp_rcvdata_encrypt }, #define HOOK_INDEX_DECOMPRESS 4 { NG_PPP_HOOK_DECOMPRESS, ng_ppp_rcvdata_decompress }, #define HOOK_INDEX_DECRYPT 5 { NG_PPP_HOOK_DECRYPT, ng_ppp_rcvdata_decrypt }, #define HOOK_INDEX_INET 6 { NG_PPP_HOOK_INET, ng_ppp_rcvdata_inet }, #define HOOK_INDEX_IPX 7 { NG_PPP_HOOK_IPX, ng_ppp_rcvdata_ipx }, #define HOOK_INDEX_VJC_COMP 8 { NG_PPP_HOOK_VJC_COMP, ng_ppp_rcvdata_vjc_comp }, #define HOOK_INDEX_VJC_IP 9 { NG_PPP_HOOK_VJC_IP, ng_ppp_rcvdata_vjc_ip }, #define HOOK_INDEX_VJC_UNCOMP 10 { NG_PPP_HOOK_VJC_UNCOMP, ng_ppp_rcvdata_vjc_uncomp }, #define HOOK_INDEX_VJC_VJIP 11 { NG_PPP_HOOK_VJC_VJIP, ng_ppp_rcvdata_vjc_vjip }, #define HOOK_INDEX_IPV6 12 { NG_PPP_HOOK_IPV6, ng_ppp_rcvdata_ipv6 }, { NULL, NULL } }; /* Helper functions */ static int ng_ppp_proto_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static int ng_ppp_hcomp_xmit(node_p node, item_p item, uint16_t proto); static int ng_ppp_hcomp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static int ng_ppp_comp_xmit(node_p node, item_p item, uint16_t proto); static int ng_ppp_comp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static int ng_ppp_crypt_xmit(node_p node, item_p item, uint16_t proto); static int ng_ppp_crypt_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static int ng_ppp_mp_xmit(node_p node, item_p item, uint16_t proto); static int ng_ppp_mp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static int ng_ppp_link_xmit(node_p node, item_p item, uint16_t proto, uint16_t linkNum, int plen); static int ng_ppp_bypass(node_p node, item_p item, uint16_t proto, uint16_t linkNum); static void ng_ppp_bump_mseq(node_p node, int32_t new_mseq); static int ng_ppp_frag_drop(node_p node); static int ng_ppp_check_packet(node_p node); static void ng_ppp_get_packet(node_p node, struct mbuf **mp); static int ng_ppp_frag_process(node_p node, item_p oitem); static int ng_ppp_frag_trim(node_p node); static void ng_ppp_frag_timeout(node_p node, hook_p hook, void *arg1, int arg2); static void ng_ppp_frag_checkstale(node_p node); static void ng_ppp_frag_reset(node_p node); static void ng_ppp_mp_strategy(node_p node, int len, int *distrib); -static int ng_ppp_intcmp(void *latency, const void *v1, const void *v2); +static int ng_ppp_intcmp(const void *v1, const void *v2, void *latency); static struct mbuf *ng_ppp_addproto(struct mbuf *m, uint16_t proto, int compOK); static struct mbuf *ng_ppp_cutproto(struct mbuf *m, uint16_t *proto); static struct mbuf *ng_ppp_prepend(struct mbuf *m, const void *buf, int len); static int ng_ppp_config_valid(node_p node, const struct ng_ppp_node_conf *newConf); static void ng_ppp_update(node_p node, int newConf); static void ng_ppp_start_frag_timer(node_p node); static void ng_ppp_stop_frag_timer(node_p node); /* Parse type for struct ng_ppp_mp_state_type */ static const struct ng_parse_fixedarray_info ng_ppp_rseq_array_info = { &ng_parse_hint32_type, NG_PPP_MAX_LINKS }; static const struct ng_parse_type ng_ppp_rseq_array_type = { &ng_parse_fixedarray_type, &ng_ppp_rseq_array_info, }; static const struct ng_parse_struct_field ng_ppp_mp_state_type_fields[] = NG_PPP_MP_STATE_TYPE_INFO(&ng_ppp_rseq_array_type); static const struct ng_parse_type ng_ppp_mp_state_type = { &ng_parse_struct_type, &ng_ppp_mp_state_type_fields }; /* Parse type for struct ng_ppp_link_conf */ static const struct ng_parse_struct_field ng_ppp_link_type_fields[] = NG_PPP_LINK_TYPE_INFO; static const struct ng_parse_type ng_ppp_link_type = { &ng_parse_struct_type, &ng_ppp_link_type_fields }; /* Parse type for struct ng_ppp_bund_conf */ static const struct ng_parse_struct_field ng_ppp_bund_type_fields[] = NG_PPP_BUND_TYPE_INFO; static const struct ng_parse_type ng_ppp_bund_type = { &ng_parse_struct_type, &ng_ppp_bund_type_fields }; /* Parse type for struct ng_ppp_node_conf */ static const struct ng_parse_fixedarray_info ng_ppp_array_info = { &ng_ppp_link_type, NG_PPP_MAX_LINKS }; static const struct ng_parse_type ng_ppp_link_array_type = { &ng_parse_fixedarray_type, &ng_ppp_array_info, }; static const struct ng_parse_struct_field ng_ppp_conf_type_fields[] = NG_PPP_CONFIG_TYPE_INFO(&ng_ppp_bund_type, &ng_ppp_link_array_type); static const struct ng_parse_type ng_ppp_conf_type = { &ng_parse_struct_type, &ng_ppp_conf_type_fields }; /* Parse type for struct ng_ppp_link_stat */ static const struct ng_parse_struct_field ng_ppp_stats_type_fields[] = NG_PPP_STATS_TYPE_INFO; static const struct ng_parse_type ng_ppp_stats_type = { &ng_parse_struct_type, &ng_ppp_stats_type_fields }; /* Parse type for struct ng_ppp_link_stat64 */ static const struct ng_parse_struct_field ng_ppp_stats64_type_fields[] = NG_PPP_STATS64_TYPE_INFO; static const struct ng_parse_type ng_ppp_stats64_type = { &ng_parse_struct_type, &ng_ppp_stats64_type_fields }; /* List of commands and how to convert arguments to/from ASCII */ static const struct ng_cmdlist ng_ppp_cmds[] = { { NGM_PPP_COOKIE, NGM_PPP_SET_CONFIG, "setconfig", &ng_ppp_conf_type, NULL }, { NGM_PPP_COOKIE, NGM_PPP_GET_CONFIG, "getconfig", NULL, &ng_ppp_conf_type }, { NGM_PPP_COOKIE, NGM_PPP_GET_MP_STATE, "getmpstate", NULL, &ng_ppp_mp_state_type }, { NGM_PPP_COOKIE, NGM_PPP_GET_LINK_STATS, "getstats", &ng_parse_int16_type, &ng_ppp_stats_type }, { NGM_PPP_COOKIE, NGM_PPP_CLR_LINK_STATS, "clrstats", &ng_parse_int16_type, NULL }, { NGM_PPP_COOKIE, NGM_PPP_GETCLR_LINK_STATS, "getclrstats", &ng_parse_int16_type, &ng_ppp_stats_type }, { NGM_PPP_COOKIE, NGM_PPP_GET_LINK_STATS64, "getstats64", &ng_parse_int16_type, &ng_ppp_stats64_type }, { NGM_PPP_COOKIE, NGM_PPP_GETCLR_LINK_STATS64, "getclrstats64", &ng_parse_int16_type, &ng_ppp_stats64_type }, { 0 } }; /* Node type descriptor */ static struct ng_type ng_ppp_typestruct = { .version = NG_ABI_VERSION, .name = NG_PPP_NODE_TYPE, .constructor = ng_ppp_constructor, .rcvmsg = ng_ppp_rcvmsg, .shutdown = ng_ppp_shutdown, .newhook = ng_ppp_newhook, .rcvdata = ng_ppp_rcvdata, .disconnect = ng_ppp_disconnect, .cmdlist = ng_ppp_cmds, }; NETGRAPH_INIT(ppp, &ng_ppp_typestruct); /* Address and control field header */ static const uint8_t ng_ppp_acf[2] = { 0xff, 0x03 }; /* Maximum time we'll let a complete incoming packet sit in the queue */ static const struct timeval ng_ppp_max_staleness = { 2, 0 }; /* 2 seconds */ #define ERROUT(x) do { error = (x); goto done; } while (0) /************************************************************************ NETGRAPH NODE STUFF ************************************************************************/ /* * Node type constructor */ static int ng_ppp_constructor(node_p node) { priv_p priv; int i; /* Allocate private structure */ priv = malloc(sizeof(*priv), M_NETGRAPH_PPP, M_WAITOK | M_ZERO); NG_NODE_SET_PRIVATE(node, priv); /* Initialize state */ TAILQ_INIT(&priv->frags); TAILQ_INIT(&priv->fragsfree); for (i = 0; i < MP_MAX_QUEUE_LEN; i++) TAILQ_INSERT_TAIL(&priv->fragsfree, &priv->fragsmem[i], f_qent); for (i = 0; i < NG_PPP_MAX_LINKS; i++) priv->links[i].seq = MP_NOSEQ; ng_callout_init(&priv->fragTimer); mtx_init(&priv->rmtx, "ng_ppp_recv", NULL, MTX_DEF); mtx_init(&priv->xmtx, "ng_ppp_xmit", NULL, MTX_DEF); /* Done */ return (0); } /* * Give our OK for a hook to be added */ static int ng_ppp_newhook(node_p node, hook_p hook, const char *name) { const priv_p priv = NG_NODE_PRIVATE(node); hook_p *hookPtr = NULL; int linkNum = -1; int hookIndex = -1; /* Figure out which hook it is */ if (strncmp(name, NG_PPP_HOOK_LINK_PREFIX, /* a link hook? */ strlen(NG_PPP_HOOK_LINK_PREFIX)) == 0) { const char *cp; char *eptr; cp = name + strlen(NG_PPP_HOOK_LINK_PREFIX); if (!isdigit(*cp) || (cp[0] == '0' && cp[1] != '\0')) return (EINVAL); linkNum = (int)strtoul(cp, &eptr, 10); if (*eptr != '\0' || linkNum < 0 || linkNum >= NG_PPP_MAX_LINKS) return (EINVAL); hookPtr = &priv->links[linkNum].hook; hookIndex = ~linkNum; /* See if hook is already connected. */ if (*hookPtr != NULL) return (EISCONN); /* Disallow more than one link unless multilink is enabled. */ if (priv->links[linkNum].conf.enableLink && !priv->conf.enableMultilink && priv->numActiveLinks >= 1) return (ENODEV); } else { /* must be a non-link hook */ int i; for (i = 0; ng_ppp_hook_names[i].name != NULL; i++) { if (strcmp(name, ng_ppp_hook_names[i].name) == 0) { hookPtr = &priv->hooks[i]; hookIndex = i; break; } } if (ng_ppp_hook_names[i].name == NULL) return (EINVAL); /* no such hook */ /* See if hook is already connected */ if (*hookPtr != NULL) return (EISCONN); /* Every non-linkX hook have it's own function. */ NG_HOOK_SET_RCVDATA(hook, ng_ppp_hook_names[i].fn); } /* OK */ *hookPtr = hook; NG_HOOK_SET_PRIVATE(hook, (void *)(intptr_t)hookIndex); ng_ppp_update(node, 0); return (0); } /* * Receive a control message */ static int ng_ppp_rcvmsg(node_p node, item_p item, hook_p lasthook) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_mesg *resp = NULL; int error = 0; struct ng_mesg *msg; NGI_GET_MSG(item, msg); switch (msg->header.typecookie) { case NGM_PPP_COOKIE: switch (msg->header.cmd) { case NGM_PPP_SET_CONFIG: { struct ng_ppp_node_conf *const conf = (struct ng_ppp_node_conf *)msg->data; int i; /* Check for invalid or illegal config */ if (msg->header.arglen != sizeof(*conf)) ERROUT(EINVAL); if (!ng_ppp_config_valid(node, conf)) ERROUT(EINVAL); /* Copy config */ priv->conf = conf->bund; for (i = 0; i < NG_PPP_MAX_LINKS; i++) priv->links[i].conf = conf->links[i]; ng_ppp_update(node, 1); break; } case NGM_PPP_GET_CONFIG: { struct ng_ppp_node_conf *conf; int i; NG_MKRESPONSE(resp, msg, sizeof(*conf), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); conf = (struct ng_ppp_node_conf *)resp->data; conf->bund = priv->conf; for (i = 0; i < NG_PPP_MAX_LINKS; i++) conf->links[i] = priv->links[i].conf; break; } case NGM_PPP_GET_MP_STATE: { struct ng_ppp_mp_state *info; int i; NG_MKRESPONSE(resp, msg, sizeof(*info), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); info = (struct ng_ppp_mp_state *)resp->data; bzero(info, sizeof(*info)); for (i = 0; i < NG_PPP_MAX_LINKS; i++) { if (priv->links[i].seq != MP_NOSEQ) info->rseq[i] = priv->links[i].seq; } info->mseq = priv->mseq; info->xseq = priv->xseq; break; } case NGM_PPP_GET_LINK_STATS: case NGM_PPP_CLR_LINK_STATS: case NGM_PPP_GETCLR_LINK_STATS: case NGM_PPP_GET_LINK_STATS64: case NGM_PPP_GETCLR_LINK_STATS64: { struct ng_ppp_link_stat64 *stats; uint16_t linkNum; /* Process request. */ if (msg->header.arglen != sizeof(uint16_t)) ERROUT(EINVAL); linkNum = *((uint16_t *) msg->data); if (linkNum >= NG_PPP_MAX_LINKS && linkNum != NG_PPP_BUNDLE_LINKNUM) ERROUT(EINVAL); stats = (linkNum == NG_PPP_BUNDLE_LINKNUM) ? &priv->bundleStats : &priv->links[linkNum].stats; /* Make 64bit reply. */ if (msg->header.cmd == NGM_PPP_GET_LINK_STATS64 || msg->header.cmd == NGM_PPP_GETCLR_LINK_STATS64) { NG_MKRESPONSE(resp, msg, sizeof(struct ng_ppp_link_stat64), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); bcopy(stats, resp->data, sizeof(*stats)); } else /* Make 32bit reply. */ if (msg->header.cmd == NGM_PPP_GET_LINK_STATS || msg->header.cmd == NGM_PPP_GETCLR_LINK_STATS) { struct ng_ppp_link_stat *rs; NG_MKRESPONSE(resp, msg, sizeof(struct ng_ppp_link_stat), M_NOWAIT); if (resp == NULL) ERROUT(ENOMEM); rs = (struct ng_ppp_link_stat *)resp->data; /* Truncate 64->32 bits. */ rs->xmitFrames = stats->xmitFrames; rs->xmitOctets = stats->xmitOctets; rs->recvFrames = stats->recvFrames; rs->recvOctets = stats->recvOctets; rs->badProtos = stats->badProtos; rs->runts = stats->runts; rs->dupFragments = stats->dupFragments; rs->dropFragments = stats->dropFragments; } /* Clear stats. */ if (msg->header.cmd != NGM_PPP_GET_LINK_STATS && msg->header.cmd != NGM_PPP_GET_LINK_STATS64) bzero(stats, sizeof(*stats)); break; } default: error = EINVAL; break; } break; case NGM_VJC_COOKIE: { /* * Forward it to the vjc node. leave the * old return address alone. * If we have no hook, let NG_RESPOND_MSG * clean up any remaining resources. * Because we have no resp, the item will be freed * along with anything it references. Don't * let msg be freed twice. */ NGI_MSG(item) = msg; /* put it back in the item */ msg = NULL; if ((lasthook = priv->hooks[HOOK_INDEX_VJC_IP])) { NG_FWD_ITEM_HOOK(error, item, lasthook); } return (error); } default: error = EINVAL; break; } done: NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); return (error); } /* * Destroy node */ static int ng_ppp_shutdown(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); /* Stop fragment queue timer */ ng_ppp_stop_frag_timer(node); /* Take down netgraph node */ ng_ppp_frag_reset(node); mtx_destroy(&priv->rmtx); mtx_destroy(&priv->xmtx); bzero(priv, sizeof(*priv)); free(priv, M_NETGRAPH_PPP); NG_NODE_SET_PRIVATE(node, NULL); NG_NODE_UNREF(node); /* let the node escape */ return (0); } /* * Hook disconnection */ static int ng_ppp_disconnect(hook_p hook) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); const int index = (intptr_t)NG_HOOK_PRIVATE(hook); /* Zero out hook pointer */ if (index < 0) priv->links[~index].hook = NULL; else priv->hooks[index] = NULL; /* Update derived info (or go away if no hooks left). */ if (NG_NODE_NUMHOOKS(node) > 0) ng_ppp_update(node, 0); else if (NG_NODE_IS_VALID(node)) ng_rmnode_self(node); return (0); } /* * Proto layer */ /* * Receive data on a hook inet. */ static int ng_ppp_rcvdata_inet(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableIP) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_hcomp_xmit(NG_HOOK_NODE(hook), item, PROT_IP)); } /* * Receive data on a hook inet and pass it directly to first link. */ static int ng_ppp_rcvdata_inet_fast(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); return (ng_ppp_link_xmit(node, item, PROT_IP, priv->activeLinks[0], NGI_M(item)->m_pkthdr.len)); } /* * Receive data on a hook ipv6. */ static int ng_ppp_rcvdata_ipv6(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableIPv6) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_hcomp_xmit(NG_HOOK_NODE(hook), item, PROT_IPV6)); } /* * Receive data on a hook atalk. */ static int ng_ppp_rcvdata_atalk(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableAtalk) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_hcomp_xmit(NG_HOOK_NODE(hook), item, PROT_ATALK)); } /* * Receive data on a hook ipx */ static int ng_ppp_rcvdata_ipx(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableIPX) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_hcomp_xmit(NG_HOOK_NODE(hook), item, PROT_IPX)); } /* * Receive data on a hook bypass */ static int ng_ppp_rcvdata_bypass(hook_p hook, item_p item) { uint16_t linkNum; uint16_t proto; struct mbuf *m; NGI_GET_M(item, m); if (m->m_pkthdr.len < 4) { NG_FREE_ITEM(item); return (EINVAL); } if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } linkNum = be16dec(mtod(m, uint8_t *)); proto = be16dec(mtod(m, uint8_t *) + 2); m_adj(m, 4); NGI_M(item) = m; if (linkNum == NG_PPP_BUNDLE_LINKNUM) return (ng_ppp_hcomp_xmit(NG_HOOK_NODE(hook), item, proto)); else return (ng_ppp_link_xmit(NG_HOOK_NODE(hook), item, proto, linkNum, 0)); } static int ng_ppp_bypass(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); uint16_t hdr[2]; struct mbuf *m; int error; if (priv->hooks[HOOK_INDEX_BYPASS] == NULL) { NG_FREE_ITEM(item); return (ENXIO); } /* Add 4-byte bypass header. */ hdr[0] = htons(linkNum); hdr[1] = htons(proto); NGI_GET_M(item, m); if ((m = ng_ppp_prepend(m, &hdr, 4)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } NGI_M(item) = m; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_BYPASS]); return (error); } static int ng_ppp_proto_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); hook_p outHook = NULL; int error; #ifdef ALIGNED_POINTER struct mbuf *m, *n; NGI_GET_M(item, m); if (!ALIGNED_POINTER(mtod(m, caddr_t), uint32_t)) { n = m_defrag(m, M_NOWAIT); if (n == NULL) { m_freem(m); NG_FREE_ITEM(item); return (ENOBUFS); } m = n; } NGI_M(item) = m; #endif /* ALIGNED_POINTER */ switch (proto) { case PROT_IP: if (priv->conf.enableIP) outHook = priv->hooks[HOOK_INDEX_INET]; break; case PROT_IPV6: if (priv->conf.enableIPv6) outHook = priv->hooks[HOOK_INDEX_IPV6]; break; case PROT_ATALK: if (priv->conf.enableAtalk) outHook = priv->hooks[HOOK_INDEX_ATALK]; break; case PROT_IPX: if (priv->conf.enableIPX) outHook = priv->hooks[HOOK_INDEX_IPX]; break; } if (outHook == NULL) return (ng_ppp_bypass(node, item, proto, linkNum)); /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, outHook); return (error); } /* * Header compression layer */ static int ng_ppp_hcomp_xmit(node_p node, item_p item, uint16_t proto) { const priv_p priv = NG_NODE_PRIVATE(node); if (proto == PROT_IP && priv->conf.enableVJCompression && priv->vjCompHooked) { int error; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_VJC_IP]); return (error); } return (ng_ppp_comp_xmit(node, item, proto)); } /* * Receive data on a hook vjc_comp. */ static int ng_ppp_rcvdata_vjc_comp(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableVJCompression) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_comp_xmit(node, item, PROT_VJCOMP)); } /* * Receive data on a hook vjc_uncomp. */ static int ng_ppp_rcvdata_vjc_uncomp(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableVJCompression) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_comp_xmit(node, item, PROT_VJUNCOMP)); } /* * Receive data on a hook vjc_vjip. */ static int ng_ppp_rcvdata_vjc_vjip(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableVJCompression) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_comp_xmit(node, item, PROT_IP)); } static int ng_ppp_hcomp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); if (priv->conf.enableVJDecompression && priv->vjCompHooked) { hook_p outHook = NULL; switch (proto) { case PROT_VJCOMP: outHook = priv->hooks[HOOK_INDEX_VJC_COMP]; break; case PROT_VJUNCOMP: outHook = priv->hooks[HOOK_INDEX_VJC_UNCOMP]; break; } if (outHook) { int error; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, outHook); return (error); } } return (ng_ppp_proto_recv(node, item, proto, linkNum)); } /* * Receive data on a hook vjc_ip. */ static int ng_ppp_rcvdata_vjc_ip(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableVJDecompression) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_proto_recv(node, item, PROT_IP, NG_PPP_BUNDLE_LINKNUM)); } /* * Compression layer */ static int ng_ppp_comp_xmit(node_p node, item_p item, uint16_t proto) { const priv_p priv = NG_NODE_PRIVATE(node); if (priv->conf.enableCompression && proto < 0x4000 && proto != PROT_COMPD && proto != PROT_CRYPTD && priv->hooks[HOOK_INDEX_COMPRESS] != NULL) { struct mbuf *m; int error; NGI_GET_M(item, m); if ((m = ng_ppp_addproto(m, proto, 0)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } NGI_M(item) = m; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_COMPRESS]); return (error); } return (ng_ppp_crypt_xmit(node, item, proto)); } /* * Receive data on a hook compress. */ static int ng_ppp_rcvdata_compress(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); uint16_t proto; switch (priv->conf.enableCompression) { case NG_PPP_COMPRESS_NONE: NG_FREE_ITEM(item); return (ENXIO); case NG_PPP_COMPRESS_FULL: { struct mbuf *m; NGI_GET_M(item, m); if ((m = ng_ppp_cutproto(m, &proto)) == NULL) { NG_FREE_ITEM(item); return (EIO); } NGI_M(item) = m; if (!PROT_VALID(proto)) { NG_FREE_ITEM(item); return (EIO); } } break; default: proto = PROT_COMPD; break; } return (ng_ppp_crypt_xmit(node, item, proto)); } static int ng_ppp_comp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); if (proto < 0x4000 && ((proto == PROT_COMPD && priv->conf.enableDecompression) || priv->conf.enableDecompression == NG_PPP_DECOMPRESS_FULL) && priv->hooks[HOOK_INDEX_DECOMPRESS] != NULL) { int error; if (priv->conf.enableDecompression == NG_PPP_DECOMPRESS_FULL) { struct mbuf *m; NGI_GET_M(item, m); if ((m = ng_ppp_addproto(m, proto, 0)) == NULL) { NG_FREE_ITEM(item); return (EIO); } NGI_M(item) = m; } /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_DECOMPRESS]); return (error); } else if (proto == PROT_COMPD) { /* Disabled protos MUST be silently discarded, but * unsupported MUST not. Let user-level decide this. */ return (ng_ppp_bypass(node, item, proto, linkNum)); } return (ng_ppp_hcomp_recv(node, item, proto, linkNum)); } /* * Receive data on a hook decompress. */ static int ng_ppp_rcvdata_decompress(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); uint16_t proto; struct mbuf *m; if (!priv->conf.enableDecompression) { NG_FREE_ITEM(item); return (ENXIO); } NGI_GET_M(item, m); if ((m = ng_ppp_cutproto(m, &proto)) == NULL) { NG_FREE_ITEM(item); return (EIO); } NGI_M(item) = m; if (!PROT_VALID(proto)) { priv->bundleStats.badProtos++; NG_FREE_ITEM(item); return (EIO); } return (ng_ppp_hcomp_recv(node, item, proto, NG_PPP_BUNDLE_LINKNUM)); } /* * Encryption layer */ static int ng_ppp_crypt_xmit(node_p node, item_p item, uint16_t proto) { const priv_p priv = NG_NODE_PRIVATE(node); if (priv->conf.enableEncryption && proto < 0x4000 && proto != PROT_CRYPTD && priv->hooks[HOOK_INDEX_ENCRYPT] != NULL) { struct mbuf *m; int error; NGI_GET_M(item, m); if ((m = ng_ppp_addproto(m, proto, 0)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } NGI_M(item) = m; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_ENCRYPT]); return (error); } return (ng_ppp_mp_xmit(node, item, proto)); } /* * Receive data on a hook encrypt. */ static int ng_ppp_rcvdata_encrypt(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); if (!priv->conf.enableEncryption) { NG_FREE_ITEM(item); return (ENXIO); } return (ng_ppp_mp_xmit(node, item, PROT_CRYPTD)); } static int ng_ppp_crypt_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); if (proto == PROT_CRYPTD) { if (priv->conf.enableDecryption && priv->hooks[HOOK_INDEX_DECRYPT] != NULL) { int error; /* Send packet out hook. */ NG_FWD_ITEM_HOOK(error, item, priv->hooks[HOOK_INDEX_DECRYPT]); return (error); } else { /* Disabled protos MUST be silently discarded, but * unsupported MUST not. Let user-level decide this. */ return (ng_ppp_bypass(node, item, proto, linkNum)); } } return (ng_ppp_comp_recv(node, item, proto, linkNum)); } /* * Receive data on a hook decrypt. */ static int ng_ppp_rcvdata_decrypt(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); uint16_t proto; struct mbuf *m; if (!priv->conf.enableDecryption) { NG_FREE_ITEM(item); return (ENXIO); } NGI_GET_M(item, m); if ((m = ng_ppp_cutproto(m, &proto)) == NULL) { NG_FREE_ITEM(item); return (EIO); } NGI_M(item) = m; if (!PROT_VALID(proto)) { priv->bundleStats.badProtos++; NG_FREE_ITEM(item); return (EIO); } return (ng_ppp_comp_recv(node, item, proto, NG_PPP_BUNDLE_LINKNUM)); } /* * Link layer */ static int ng_ppp_link_xmit(node_p node, item_p item, uint16_t proto, uint16_t linkNum, int plen) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_link *link; int len, error; struct mbuf *m; uint16_t mru; /* Check if link correct. */ if (linkNum >= NG_PPP_MAX_LINKS) { ERROUT(ENETDOWN); } /* Get link pointer (optimization). */ link = &priv->links[linkNum]; /* Check link status (if real). */ if (link->hook == NULL) { ERROUT(ENETDOWN); } /* Extract mbuf. */ NGI_GET_M(item, m); /* Check peer's MRU for this link. */ mru = link->conf.mru; if (mru != 0 && m->m_pkthdr.len > mru) { NG_FREE_M(m); ERROUT(EMSGSIZE); } /* Prepend protocol number, possibly compressed. */ if ((m = ng_ppp_addproto(m, proto, link->conf.enableProtoComp)) == NULL) { ERROUT(ENOBUFS); } /* Prepend address and control field (unless compressed). */ if (proto == PROT_LCP || !link->conf.enableACFComp) { if ((m = ng_ppp_prepend(m, &ng_ppp_acf, 2)) == NULL) ERROUT(ENOBUFS); } /* Deliver frame. */ len = m->m_pkthdr.len; NG_FWD_NEW_DATA(error, item, link->hook, m); mtx_lock(&priv->xmtx); /* Update link stats. */ link->stats.xmitFrames++; link->stats.xmitOctets += len; /* Update bundle stats. */ if (plen > 0) { priv->bundleStats.xmitFrames++; priv->bundleStats.xmitOctets += plen; } /* Update 'bytes in queue' counter. */ if (error == 0) { /* bytesInQueue and lastWrite required only for mp_strategy. */ if (priv->conf.enableMultilink && !priv->allLinksEqual && !priv->conf.enableRoundRobin) { /* If queue was empty, then mark this time. */ if (link->bytesInQueue == 0) getmicrouptime(&link->lastWrite); link->bytesInQueue += len + MP_AVERAGE_LINK_OVERHEAD; /* Limit max queue length to 50 pkts. BW can be defined incorrectly and link may not signal overload. */ if (link->bytesInQueue > 50 * 1600) link->bytesInQueue = 50 * 1600; } } mtx_unlock(&priv->xmtx); return (error); done: NG_FREE_ITEM(item); return (error); } /* * Receive data on a hook linkX. */ static int ng_ppp_rcvdata(hook_p hook, item_p item) { const node_p node = NG_HOOK_NODE(hook); const priv_p priv = NG_NODE_PRIVATE(node); const int index = (intptr_t)NG_HOOK_PRIVATE(hook); const uint16_t linkNum = (uint16_t)~index; struct ng_ppp_link * const link = &priv->links[linkNum]; uint16_t proto; struct mbuf *m; int error = 0; KASSERT(linkNum < NG_PPP_MAX_LINKS, ("%s: bogus index 0x%x", __func__, index)); NGI_GET_M(item, m); mtx_lock(&priv->rmtx); /* Stats */ link->stats.recvFrames++; link->stats.recvOctets += m->m_pkthdr.len; /* Strip address and control fields, if present. */ if (m->m_len < 2 && (m = m_pullup(m, 2)) == NULL) ERROUT(ENOBUFS); if (mtod(m, uint8_t *)[0] == 0xff && mtod(m, uint8_t *)[1] == 0x03) m_adj(m, 2); /* Get protocol number */ if ((m = ng_ppp_cutproto(m, &proto)) == NULL) ERROUT(ENOBUFS); NGI_M(item) = m; /* Put changed m back into item. */ if (!PROT_VALID(proto)) { link->stats.badProtos++; ERROUT(EIO); } /* LCP packets must go directly to bypass. */ if (proto >= 0xB000) { mtx_unlock(&priv->rmtx); return (ng_ppp_bypass(node, item, proto, linkNum)); } /* Other packets are denied on a disabled link. */ if (!link->conf.enableLink) ERROUT(ENXIO); /* Proceed to multilink layer. Mutex will be unlocked inside. */ error = ng_ppp_mp_recv(node, item, proto, linkNum); mtx_assert(&priv->rmtx, MA_NOTOWNED); return (error); done: mtx_unlock(&priv->rmtx); NG_FREE_ITEM(item); return (error); } /* * Multilink layer */ /* * Handle an incoming multi-link fragment * * The fragment reassembly algorithm is somewhat complex. This is mainly * because we are required not to reorder the reconstructed packets, yet * fragments are only guaranteed to arrive in order on a per-link basis. * In other words, when we have a complete packet ready, but the previous * packet is still incomplete, we have to decide between delivering the * complete packet and throwing away the incomplete one, or waiting to * see if the remainder of the incomplete one arrives, at which time we * can deliver both packets, in order. * * This problem is exacerbated by "sequence number slew", which is when * the sequence numbers coming in from different links are far apart from * each other. In particular, certain unnamed equipment (*cough* Ascend) * has been seen to generate sequence number slew of up to 10 on an ISDN * 2B-channel MP link. There is nothing invalid about sequence number slew * but it makes the reasssembly process have to work harder. * * However, the peer is required to transmit fragments in order on each * link. That means if we define MSEQ as the minimum over all links of * the highest sequence number received on that link, then we can always * give up any hope of receiving a fragment with sequence number < MSEQ in * the future (all of this using 'wraparound' sequence number space). * Therefore we can always immediately throw away incomplete packets * missing fragments with sequence numbers < MSEQ. * * Here is an overview of our algorithm: * * o Received fragments are inserted into a queue, for which we * maintain these invariants between calls to this function: * * - Fragments are ordered in the queue by sequence number * - If a complete packet is at the head of the queue, then * the first fragment in the packet has seq# > MSEQ + 1 * (otherwise, we could deliver it immediately) * - If any fragments have seq# < MSEQ, then they are necessarily * part of a packet whose missing seq#'s are all > MSEQ (otherwise, * we can throw them away because they'll never be completed) * - The queue contains at most MP_MAX_QUEUE_LEN fragments * * o We have a periodic timer that checks the queue for the first * complete packet that has been sitting in the queue "too long". * When one is detected, all previous (incomplete) fragments are * discarded, their missing fragments are declared lost and MSEQ * is increased. * * o If we receive a fragment with seq# < MSEQ, we throw it away * because we've already declared it lost. * * This assumes linkNum != NG_PPP_BUNDLE_LINKNUM. */ static int ng_ppp_mp_recv(node_p node, item_p item, uint16_t proto, uint16_t linkNum) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_link *const link = &priv->links[linkNum]; struct ng_ppp_frag *frag; struct ng_ppp_frag *qent; int i, diff, inserted; struct mbuf *m; int error = 0; if ((!priv->conf.enableMultilink) || proto != PROT_MP) { /* Stats */ priv->bundleStats.recvFrames++; priv->bundleStats.recvOctets += NGI_M(item)->m_pkthdr.len; mtx_unlock(&priv->rmtx); return (ng_ppp_crypt_recv(node, item, proto, linkNum)); } NGI_GET_M(item, m); /* Get a new frag struct from the free queue */ if ((frag = TAILQ_FIRST(&priv->fragsfree)) == NULL) { printf("No free fragments headers in ng_ppp!\n"); NG_FREE_M(m); goto process; } /* Extract fragment information from MP header */ if (priv->conf.recvShortSeq) { uint16_t shdr; if (m->m_pkthdr.len < 2) { link->stats.runts++; NG_FREE_M(m); ERROUT(EINVAL); } if (m->m_len < 2 && (m = m_pullup(m, 2)) == NULL) ERROUT(ENOBUFS); shdr = be16dec(mtod(m, void *)); frag->seq = MP_SHORT_EXTEND(shdr); frag->first = (shdr & MP_SHORT_FIRST_FLAG) != 0; frag->last = (shdr & MP_SHORT_LAST_FLAG) != 0; diff = MP_SHORT_SEQ_DIFF(frag->seq, priv->mseq); m_adj(m, 2); } else { uint32_t lhdr; if (m->m_pkthdr.len < 4) { link->stats.runts++; NG_FREE_M(m); ERROUT(EINVAL); } if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL) ERROUT(ENOBUFS); lhdr = be32dec(mtod(m, void *)); frag->seq = MP_LONG_EXTEND(lhdr); frag->first = (lhdr & MP_LONG_FIRST_FLAG) != 0; frag->last = (lhdr & MP_LONG_LAST_FLAG) != 0; diff = MP_LONG_SEQ_DIFF(frag->seq, priv->mseq); m_adj(m, 4); } frag->data = m; getmicrouptime(&frag->timestamp); /* If sequence number is < MSEQ, we've already declared this fragment as lost, so we have no choice now but to drop it */ if (diff < 0) { link->stats.dropFragments++; NG_FREE_M(m); ERROUT(0); } /* Update highest received sequence number on this link and MSEQ */ priv->mseq = link->seq = frag->seq; for (i = 0; i < priv->numActiveLinks; i++) { struct ng_ppp_link *const alink = &priv->links[priv->activeLinks[i]]; if (MP_RECV_SEQ_DIFF(priv, alink->seq, priv->mseq) < 0) priv->mseq = alink->seq; } /* Remove frag struct from free queue. */ TAILQ_REMOVE(&priv->fragsfree, frag, f_qent); /* Add fragment to queue, which is sorted by sequence number */ inserted = 0; TAILQ_FOREACH_REVERSE(qent, &priv->frags, ng_ppp_fraglist, f_qent) { diff = MP_RECV_SEQ_DIFF(priv, frag->seq, qent->seq); if (diff > 0) { TAILQ_INSERT_AFTER(&priv->frags, qent, frag, f_qent); inserted = 1; break; } else if (diff == 0) { /* should never happen! */ link->stats.dupFragments++; NG_FREE_M(frag->data); TAILQ_INSERT_HEAD(&priv->fragsfree, frag, f_qent); ERROUT(EINVAL); } } if (!inserted) TAILQ_INSERT_HEAD(&priv->frags, frag, f_qent); process: /* Process the queue */ /* NOTE: rmtx will be unlocked for sending time! */ error = ng_ppp_frag_process(node, item); mtx_unlock(&priv->rmtx); return (error); done: mtx_unlock(&priv->rmtx); NG_FREE_ITEM(item); return (error); } /************************************************************************ HELPER STUFF ************************************************************************/ /* * If new mseq > current then set it and update all active links */ static void ng_ppp_bump_mseq(node_p node, int32_t new_mseq) { const priv_p priv = NG_NODE_PRIVATE(node); int i; if (MP_RECV_SEQ_DIFF(priv, priv->mseq, new_mseq) < 0) { priv->mseq = new_mseq; for (i = 0; i < priv->numActiveLinks; i++) { struct ng_ppp_link *const alink = &priv->links[priv->activeLinks[i]]; if (MP_RECV_SEQ_DIFF(priv, alink->seq, new_mseq) < 0) alink->seq = new_mseq; } } } /* * Examine our list of fragments, and determine if there is a * complete and deliverable packet at the head of the list. * Return 1 if so, zero otherwise. */ static int ng_ppp_check_packet(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_frag *qent, *qnext; /* Check for empty queue */ if (TAILQ_EMPTY(&priv->frags)) return (0); /* Check first fragment is the start of a deliverable packet */ qent = TAILQ_FIRST(&priv->frags); if (!qent->first || MP_RECV_SEQ_DIFF(priv, qent->seq, priv->mseq) > 1) return (0); /* Check that all the fragments are there */ while (!qent->last) { qnext = TAILQ_NEXT(qent, f_qent); if (qnext == NULL) /* end of queue */ return (0); if (qnext->seq != MP_NEXT_RECV_SEQ(priv, qent->seq)) return (0); qent = qnext; } /* Got one */ return (1); } /* * Pull a completed packet off the head of the incoming fragment queue. * This assumes there is a completed packet there to pull off. */ static void ng_ppp_get_packet(node_p node, struct mbuf **mp) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_frag *qent, *qnext; struct mbuf *m = NULL, *tail; qent = TAILQ_FIRST(&priv->frags); KASSERT(!TAILQ_EMPTY(&priv->frags) && qent->first, ("%s: no packet", __func__)); for (tail = NULL; qent != NULL; qent = qnext) { qnext = TAILQ_NEXT(qent, f_qent); KASSERT(!TAILQ_EMPTY(&priv->frags), ("%s: empty q", __func__)); TAILQ_REMOVE(&priv->frags, qent, f_qent); if (tail == NULL) tail = m = qent->data; else { m->m_pkthdr.len += qent->data->m_pkthdr.len; tail->m_next = qent->data; } while (tail->m_next != NULL) tail = tail->m_next; if (qent->last) { qnext = NULL; /* Bump MSEQ if necessary */ ng_ppp_bump_mseq(node, qent->seq); } TAILQ_INSERT_HEAD(&priv->fragsfree, qent, f_qent); } *mp = m; } /* * Trim fragments from the queue whose packets can never be completed. * This assumes a complete packet is NOT at the beginning of the queue. * Returns 1 if fragments were removed, zero otherwise. */ static int ng_ppp_frag_trim(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_frag *qent, *qnext = NULL; int removed = 0; /* Scan for "dead" fragments and remove them */ while (1) { int dead = 0; /* If queue is empty, we're done */ if (TAILQ_EMPTY(&priv->frags)) break; /* Determine whether first fragment can ever be completed */ TAILQ_FOREACH(qent, &priv->frags, f_qent) { if (MP_RECV_SEQ_DIFF(priv, qent->seq, priv->mseq) >= 0) break; qnext = TAILQ_NEXT(qent, f_qent); KASSERT(qnext != NULL, ("%s: last frag < MSEQ?", __func__)); if (qnext->seq != MP_NEXT_RECV_SEQ(priv, qent->seq) || qent->last || qnext->first) { dead = 1; break; } } if (!dead) break; /* Remove fragment and all others in the same packet */ while ((qent = TAILQ_FIRST(&priv->frags)) != qnext) { KASSERT(!TAILQ_EMPTY(&priv->frags), ("%s: empty q", __func__)); priv->bundleStats.dropFragments++; TAILQ_REMOVE(&priv->frags, qent, f_qent); NG_FREE_M(qent->data); TAILQ_INSERT_HEAD(&priv->fragsfree, qent, f_qent); removed = 1; } } return (removed); } /* * Drop fragments on queue overflow. * Returns 1 if fragments were removed, zero otherwise. */ static int ng_ppp_frag_drop(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); /* Check queue length */ if (TAILQ_EMPTY(&priv->fragsfree)) { struct ng_ppp_frag *qent; /* Get oldest fragment */ KASSERT(!TAILQ_EMPTY(&priv->frags), ("%s: empty q", __func__)); qent = TAILQ_FIRST(&priv->frags); /* Bump MSEQ if necessary */ ng_ppp_bump_mseq(node, qent->seq); /* Drop it */ priv->bundleStats.dropFragments++; TAILQ_REMOVE(&priv->frags, qent, f_qent); NG_FREE_M(qent->data); TAILQ_INSERT_HEAD(&priv->fragsfree, qent, f_qent); return (1); } return (0); } /* * Run the queue, restoring the queue invariants */ static int ng_ppp_frag_process(node_p node, item_p oitem) { const priv_p priv = NG_NODE_PRIVATE(node); struct mbuf *m; item_p item; uint16_t proto; do { /* Deliver any deliverable packets */ while (ng_ppp_check_packet(node)) { ng_ppp_get_packet(node, &m); if ((m = ng_ppp_cutproto(m, &proto)) == NULL) continue; if (!PROT_VALID(proto)) { priv->bundleStats.badProtos++; NG_FREE_M(m); continue; } if (oitem) { /* If original item present - reuse it. */ item = oitem; oitem = NULL; NGI_M(item) = m; } else { item = ng_package_data(m, NG_NOFLAGS); } if (item != NULL) { /* Stats */ priv->bundleStats.recvFrames++; priv->bundleStats.recvOctets += NGI_M(item)->m_pkthdr.len; /* Drop mutex for the sending time. * Priv may change, but we are ready! */ mtx_unlock(&priv->rmtx); ng_ppp_crypt_recv(node, item, proto, NG_PPP_BUNDLE_LINKNUM); mtx_lock(&priv->rmtx); } } /* Delete dead fragments and try again */ } while (ng_ppp_frag_trim(node) || ng_ppp_frag_drop(node)); /* If we haven't reused original item - free it. */ if (oitem) NG_FREE_ITEM(oitem); /* Done */ return (0); } /* * Check for 'stale' completed packets that need to be delivered * * If a link goes down or has a temporary failure, MSEQ can get * "stuck", because no new incoming fragments appear on that link. * This can cause completed packets to never get delivered if * their sequence numbers are all > MSEQ + 1. * * This routine checks how long all of the completed packets have * been sitting in the queue, and if too long, removes fragments * from the queue and increments MSEQ to allow them to be delivered. */ static void ng_ppp_frag_checkstale(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_frag *qent, *beg, *end; struct timeval now, age; struct mbuf *m; int seq; item_p item; uint16_t proto; now.tv_sec = 0; /* uninitialized state */ while (1) { /* If queue is empty, we're done */ if (TAILQ_EMPTY(&priv->frags)) break; /* Find the first complete packet in the queue */ beg = end = NULL; seq = TAILQ_FIRST(&priv->frags)->seq; TAILQ_FOREACH(qent, &priv->frags, f_qent) { if (qent->first) beg = qent; else if (qent->seq != seq) beg = NULL; if (beg != NULL && qent->last) { end = qent; break; } seq = MP_NEXT_RECV_SEQ(priv, seq); } /* If none found, exit */ if (end == NULL) break; /* Get current time (we assume we've been up for >= 1 second) */ if (now.tv_sec == 0) getmicrouptime(&now); /* Check if packet has been queued too long */ age = now; timevalsub(&age, &beg->timestamp); if (timevalcmp(&age, &ng_ppp_max_staleness, < )) break; /* Throw away junk fragments in front of the completed packet */ while ((qent = TAILQ_FIRST(&priv->frags)) != beg) { KASSERT(!TAILQ_EMPTY(&priv->frags), ("%s: empty q", __func__)); priv->bundleStats.dropFragments++; TAILQ_REMOVE(&priv->frags, qent, f_qent); NG_FREE_M(qent->data); TAILQ_INSERT_HEAD(&priv->fragsfree, qent, f_qent); } /* Extract completed packet */ ng_ppp_get_packet(node, &m); if ((m = ng_ppp_cutproto(m, &proto)) == NULL) continue; if (!PROT_VALID(proto)) { priv->bundleStats.badProtos++; NG_FREE_M(m); continue; } /* Deliver packet */ if ((item = ng_package_data(m, NG_NOFLAGS)) != NULL) { /* Stats */ priv->bundleStats.recvFrames++; priv->bundleStats.recvOctets += NGI_M(item)->m_pkthdr.len; ng_ppp_crypt_recv(node, item, proto, NG_PPP_BUNDLE_LINKNUM); } } } /* * Periodically call ng_ppp_frag_checkstale() */ static void ng_ppp_frag_timeout(node_p node, hook_p hook, void *arg1, int arg2) { /* XXX: is this needed? */ if (NG_NODE_NOT_VALID(node)) return; /* Scan the fragment queue */ ng_ppp_frag_checkstale(node); /* Start timer again */ ng_ppp_start_frag_timer(node); } /* * Deliver a frame out on the bundle, i.e., figure out how to fragment * the frame across the individual PPP links and do so. */ static int ng_ppp_mp_xmit(node_p node, item_p item, uint16_t proto) { const priv_p priv = NG_NODE_PRIVATE(node); const int hdr_len = priv->conf.xmitShortSeq ? 2 : 4; int distrib[NG_PPP_MAX_LINKS]; int firstFragment; int activeLinkNum; struct mbuf *m; int plen; int frags; int32_t seq; /* At least one link must be active */ if (priv->numActiveLinks == 0) { NG_FREE_ITEM(item); return (ENETDOWN); } /* Save length for later stats. */ plen = NGI_M(item)->m_pkthdr.len; if (!priv->conf.enableMultilink) { return (ng_ppp_link_xmit(node, item, proto, priv->activeLinks[0], plen)); } /* Check peer's MRRU for this bundle. */ if (plen > priv->conf.mrru) { NG_FREE_ITEM(item); return (EMSGSIZE); } /* Extract mbuf. */ NGI_GET_M(item, m); /* Prepend protocol number, possibly compressed. */ if ((m = ng_ppp_addproto(m, proto, 1)) == NULL) { NG_FREE_ITEM(item); return (ENOBUFS); } /* Clear distribution plan */ bzero(&distrib, priv->numActiveLinks * sizeof(distrib[0])); mtx_lock(&priv->xmtx); /* Round-robin strategy */ if (priv->conf.enableRoundRobin) { activeLinkNum = priv->lastLink++ % priv->numActiveLinks; distrib[activeLinkNum] = m->m_pkthdr.len; goto deliver; } /* Strategy when all links are equivalent (optimize the common case) */ if (priv->allLinksEqual) { int numFrags, fraction, remain; int i; /* Calculate optimal fragment count */ numFrags = priv->numActiveLinks; if (numFrags > m->m_pkthdr.len / MP_MIN_FRAG_LEN) numFrags = m->m_pkthdr.len / MP_MIN_FRAG_LEN; if (numFrags == 0) numFrags = 1; fraction = m->m_pkthdr.len / numFrags; remain = m->m_pkthdr.len - (fraction * numFrags); /* Assign distribution */ for (i = 0; i < numFrags; i++) { distrib[priv->lastLink++ % priv->numActiveLinks] = fraction + (((remain--) > 0)?1:0); } goto deliver; } /* Strategy when all links are not equivalent */ ng_ppp_mp_strategy(node, m->m_pkthdr.len, distrib); deliver: /* Estimate fragments count */ frags = 0; for (activeLinkNum = priv->numActiveLinks - 1; activeLinkNum >= 0; activeLinkNum--) { const uint16_t linkNum = priv->activeLinks[activeLinkNum]; struct ng_ppp_link *const link = &priv->links[linkNum]; frags += (distrib[activeLinkNum] + link->conf.mru - hdr_len - 1) / (link->conf.mru - hdr_len); } /* Get out initial sequence number */ seq = priv->xseq; /* Update next sequence number */ if (priv->conf.xmitShortSeq) { priv->xseq = (seq + frags) & MP_SHORT_SEQ_MASK; } else { priv->xseq = (seq + frags) & MP_LONG_SEQ_MASK; } mtx_unlock(&priv->xmtx); /* Send alloted portions of frame out on the link(s) */ for (firstFragment = 1, activeLinkNum = priv->numActiveLinks - 1; activeLinkNum >= 0; activeLinkNum--) { const uint16_t linkNum = priv->activeLinks[activeLinkNum]; struct ng_ppp_link *const link = &priv->links[linkNum]; /* Deliver fragment(s) out the next link */ for ( ; distrib[activeLinkNum] > 0; firstFragment = 0) { int len, lastFragment, error; struct mbuf *m2; /* Calculate fragment length; don't exceed link MTU */ len = distrib[activeLinkNum]; if (len > link->conf.mru - hdr_len) len = link->conf.mru - hdr_len; distrib[activeLinkNum] -= len; lastFragment = (len == m->m_pkthdr.len); /* Split off next fragment as "m2" */ m2 = m; if (!lastFragment) { struct mbuf *n = m_split(m, len, M_NOWAIT); if (n == NULL) { NG_FREE_M(m); if (firstFragment) NG_FREE_ITEM(item); return (ENOMEM); } m_tag_copy_chain(n, m, M_NOWAIT); m = n; } /* Prepend MP header */ if (priv->conf.xmitShortSeq) { uint16_t shdr; shdr = seq; seq = (seq + 1) & MP_SHORT_SEQ_MASK; if (firstFragment) shdr |= MP_SHORT_FIRST_FLAG; if (lastFragment) shdr |= MP_SHORT_LAST_FLAG; shdr = htons(shdr); m2 = ng_ppp_prepend(m2, &shdr, 2); } else { uint32_t lhdr; lhdr = seq; seq = (seq + 1) & MP_LONG_SEQ_MASK; if (firstFragment) lhdr |= MP_LONG_FIRST_FLAG; if (lastFragment) lhdr |= MP_LONG_LAST_FLAG; lhdr = htonl(lhdr); m2 = ng_ppp_prepend(m2, &lhdr, 4); } if (m2 == NULL) { if (!lastFragment) m_freem(m); if (firstFragment) NG_FREE_ITEM(item); return (ENOBUFS); } /* Send fragment */ if (firstFragment) { NGI_M(item) = m2; /* Reuse original item. */ } else { item = ng_package_data(m2, NG_NOFLAGS); } if (item != NULL) { error = ng_ppp_link_xmit(node, item, PROT_MP, linkNum, (firstFragment?plen:0)); if (error != 0) { if (!lastFragment) NG_FREE_M(m); return (error); } } } } /* Done */ return (0); } /* * Computing the optimal fragmentation * ----------------------------------- * * This routine tries to compute the optimal fragmentation pattern based * on each link's latency, bandwidth, and calculated additional latency. * The latter quantity is the additional latency caused by previously * written data that has not been transmitted yet. * * This algorithm is only useful when not all of the links have the * same latency and bandwidth values. * * The essential idea is to make the last bit of each fragment of the * frame arrive at the opposite end at the exact same time. This greedy * algorithm is optimal, in that no other scheduling could result in any * packet arriving any sooner unless packets are delivered out of order. * * Suppose link i has bandwidth b_i (in tens of bytes per milisecond) and * latency l_i (in miliseconds). Consider the function function f_i(t) * which is equal to the number of bytes that will have arrived at * the peer after t miliseconds if we start writing continuously at * time t = 0. Then f_i(t) = b_i * (t - l_i) = ((b_i * t) - (l_i * b_i). * That is, f_i(t) is a line with slope b_i and y-intersect -(l_i * b_i). * Note that the y-intersect is always <= zero because latency can't be * negative. Note also that really the function is f_i(t) except when * f_i(t) is negative, in which case the function is zero. To take * care of this, let Q_i(t) = { if (f_i(t) > 0) return 1; else return 0; }. * So the actual number of bytes that will have arrived at the peer after * t miliseconds is f_i(t) * Q_i(t). * * At any given time, each link has some additional latency a_i >= 0 * due to previously written fragment(s) which are still in the queue. * This value is easily computed from the time since last transmission, * the previous latency value, the number of bytes written, and the * link's bandwidth. * * Assume that l_i includes any a_i already, and that the links are * sorted by latency, so that l_i <= l_{i+1}. * * Let N be the total number of bytes in the current frame we are sending. * * Suppose we were to start writing bytes at time t = 0 on all links * simultaneously, which is the most we can possibly do. Then let * F(t) be equal to the total number of bytes received by the peer * after t miliseconds. Then F(t) = Sum_i (f_i(t) * Q_i(t)). * * Our goal is simply this: fragment the frame across the links such * that the peer is able to reconstruct the completed frame as soon as * possible, i.e., at the least possible value of t. Call this value t_0. * * Then it follows that F(t_0) = N. Our strategy is first to find the value * of t_0, and then deduce how many bytes to write to each link. * * Rewriting F(t_0): * * t_0 = ( N + Sum_i ( l_i * b_i * Q_i(t_0) ) ) / Sum_i ( b_i * Q_i(t_0) ) * * Now, we note that Q_i(t) is constant for l_i <= t <= l_{i+1}. t_0 will * lie in one of these ranges. To find it, we just need to find the i such * that F(l_i) <= N <= F(l_{i+1}). Then we compute all the constant values * for Q_i() in this range, plug in the remaining values, solving for t_0. * * Once t_0 is known, then the number of bytes to send on link i is * just f_i(t_0) * Q_i(t_0). * * In other words, we start allocating bytes to the links one at a time. * We keep adding links until the frame is completely sent. Some links * may not get any bytes because their latency is too high. * * Is all this work really worth the trouble? Depends on the situation. * The bigger the ratio of computer speed to link speed, and the more * important total bundle latency is (e.g., for interactive response time), * the more it's worth it. There is however the cost of calling this * function for every frame. The running time is O(n^2) where n is the * number of links that receive a non-zero number of bytes. * * Since latency is measured in miliseconds, the "resolution" of this * algorithm is one milisecond. * * To avoid this algorithm altogether, configure all links to have the * same latency and bandwidth. */ static void ng_ppp_mp_strategy(node_p node, int len, int *distrib) { const priv_p priv = NG_NODE_PRIVATE(node); int latency[NG_PPP_MAX_LINKS]; int sortByLatency[NG_PPP_MAX_LINKS]; int activeLinkNum; int t0, total, topSum, botSum; struct timeval now; int i, numFragments; /* If only one link, this gets real easy */ if (priv->numActiveLinks == 1) { distrib[0] = len; return; } /* Get current time */ getmicrouptime(&now); /* Compute latencies for each link at this point in time */ for (activeLinkNum = 0; activeLinkNum < priv->numActiveLinks; activeLinkNum++) { struct ng_ppp_link *alink; struct timeval diff; int xmitBytes; /* Start with base latency value */ alink = &priv->links[priv->activeLinks[activeLinkNum]]; latency[activeLinkNum] = alink->latency; sortByLatency[activeLinkNum] = activeLinkNum; /* see below */ /* Any additional latency? */ if (alink->bytesInQueue == 0) continue; /* Compute time delta since last write */ diff = now; timevalsub(&diff, &alink->lastWrite); /* alink->bytesInQueue will be changed, mark change time. */ alink->lastWrite = now; if (now.tv_sec < 0 || diff.tv_sec >= 10) { /* sanity */ alink->bytesInQueue = 0; continue; } /* How many bytes could have transmitted since last write? */ xmitBytes = (alink->conf.bandwidth * 10 * diff.tv_sec) + (alink->conf.bandwidth * (diff.tv_usec / 1000)) / 100; alink->bytesInQueue -= xmitBytes; if (alink->bytesInQueue < 0) alink->bytesInQueue = 0; else latency[activeLinkNum] += (100 * alink->bytesInQueue) / alink->conf.bandwidth; } /* Sort active links by latency */ - qsort_r(sortByLatency, - priv->numActiveLinks, sizeof(*sortByLatency), latency, ng_ppp_intcmp); + qsort_r(sortByLatency, priv->numActiveLinks, sizeof(*sortByLatency), + ng_ppp_intcmp, latency); /* Find the interval we need (add links in sortByLatency[] order) */ for (numFragments = 1; numFragments < priv->numActiveLinks; numFragments++) { for (total = i = 0; i < numFragments; i++) { int flowTime; flowTime = latency[sortByLatency[numFragments]] - latency[sortByLatency[i]]; total += ((flowTime * priv->links[ priv->activeLinks[sortByLatency[i]]].conf.bandwidth) + 99) / 100; } if (total >= len) break; } /* Solve for t_0 in that interval */ for (topSum = botSum = i = 0; i < numFragments; i++) { int bw = priv->links[ priv->activeLinks[sortByLatency[i]]].conf.bandwidth; topSum += latency[sortByLatency[i]] * bw; /* / 100 */ botSum += bw; /* / 100 */ } t0 = ((len * 100) + topSum + botSum / 2) / botSum; /* Compute f_i(t_0) all i */ for (total = i = 0; i < numFragments; i++) { int bw = priv->links[ priv->activeLinks[sortByLatency[i]]].conf.bandwidth; distrib[sortByLatency[i]] = (bw * (t0 - latency[sortByLatency[i]]) + 50) / 100; total += distrib[sortByLatency[i]]; } /* Deal with any rounding error */ if (total < len) { struct ng_ppp_link *fastLink = &priv->links[priv->activeLinks[sortByLatency[0]]]; int fast = 0; /* Find the fastest link */ for (i = 1; i < numFragments; i++) { struct ng_ppp_link *const link = &priv->links[priv->activeLinks[sortByLatency[i]]]; if (link->conf.bandwidth > fastLink->conf.bandwidth) { fast = i; fastLink = link; } } distrib[sortByLatency[fast]] += len - total; } else while (total > len) { struct ng_ppp_link *slowLink = &priv->links[priv->activeLinks[sortByLatency[0]]]; int delta, slow = 0; /* Find the slowest link that still has bytes to remove */ for (i = 1; i < numFragments; i++) { struct ng_ppp_link *const link = &priv->links[priv->activeLinks[sortByLatency[i]]]; if (distrib[sortByLatency[slow]] == 0 || (distrib[sortByLatency[i]] > 0 && link->conf.bandwidth < slowLink->conf.bandwidth)) { slow = i; slowLink = link; } } delta = total - len; if (delta > distrib[sortByLatency[slow]]) delta = distrib[sortByLatency[slow]]; distrib[sortByLatency[slow]] -= delta; total -= delta; } } /* * Compare two integers */ static int -ng_ppp_intcmp(void *latency, const void *v1, const void *v2) +ng_ppp_intcmp(const void *v1, const void *v2, void *latency) { const int index1 = *((const int *) v1); const int index2 = *((const int *) v2); return ((int *)latency)[index1] - ((int *)latency)[index2]; } /* * Prepend a possibly compressed PPP protocol number in front of a frame */ static struct mbuf * ng_ppp_addproto(struct mbuf *m, uint16_t proto, int compOK) { if (compOK && PROT_COMPRESSABLE(proto)) { uint8_t pbyte = (uint8_t)proto; return ng_ppp_prepend(m, &pbyte, 1); } else { uint16_t pword = htons((uint16_t)proto); return ng_ppp_prepend(m, &pword, 2); } } /* * Cut a possibly compressed PPP protocol number from the front of a frame. */ static struct mbuf * ng_ppp_cutproto(struct mbuf *m, uint16_t *proto) { *proto = 0; if (m->m_len < 1 && (m = m_pullup(m, 1)) == NULL) return (NULL); *proto = *mtod(m, uint8_t *); m_adj(m, 1); if (!PROT_VALID(*proto)) { if (m->m_len < 1 && (m = m_pullup(m, 1)) == NULL) return (NULL); *proto = (*proto << 8) + *mtod(m, uint8_t *); m_adj(m, 1); } return (m); } /* * Prepend some bytes to an mbuf. */ static struct mbuf * ng_ppp_prepend(struct mbuf *m, const void *buf, int len) { M_PREPEND(m, len, M_NOWAIT); if (m == NULL || (m->m_len < len && (m = m_pullup(m, len)) == NULL)) return (NULL); bcopy(buf, mtod(m, uint8_t *), len); return (m); } /* * Update private information that is derived from other private information */ static void ng_ppp_update(node_p node, int newConf) { const priv_p priv = NG_NODE_PRIVATE(node); int i; /* Update active status for VJ Compression */ priv->vjCompHooked = priv->hooks[HOOK_INDEX_VJC_IP] != NULL && priv->hooks[HOOK_INDEX_VJC_COMP] != NULL && priv->hooks[HOOK_INDEX_VJC_UNCOMP] != NULL && priv->hooks[HOOK_INDEX_VJC_VJIP] != NULL; /* Increase latency for each link an amount equal to one MP header */ if (newConf) { for (i = 0; i < NG_PPP_MAX_LINKS; i++) { int hdrBytes; if (priv->links[i].conf.bandwidth == 0) continue; hdrBytes = MP_AVERAGE_LINK_OVERHEAD + (priv->links[i].conf.enableACFComp ? 0 : 2) + (priv->links[i].conf.enableProtoComp ? 1 : 2) + (priv->conf.xmitShortSeq ? 2 : 4); priv->links[i].latency = priv->links[i].conf.latency + (hdrBytes / priv->links[i].conf.bandwidth + 50) / 100; } } /* Update list of active links */ bzero(&priv->activeLinks, sizeof(priv->activeLinks)); priv->numActiveLinks = 0; priv->allLinksEqual = 1; for (i = 0; i < NG_PPP_MAX_LINKS; i++) { struct ng_ppp_link *const link = &priv->links[i]; /* Is link active? */ if (link->conf.enableLink && link->hook != NULL) { struct ng_ppp_link *link0; /* Add link to list of active links */ priv->activeLinks[priv->numActiveLinks++] = i; link0 = &priv->links[priv->activeLinks[0]]; /* Determine if all links are still equal */ if (link->latency != link0->latency || link->conf.bandwidth != link0->conf.bandwidth) priv->allLinksEqual = 0; /* Initialize rec'd sequence number */ if (link->seq == MP_NOSEQ) { link->seq = (link == link0) ? MP_INITIAL_SEQ : link0->seq; } } else link->seq = MP_NOSEQ; } /* Update MP state as multi-link is active or not */ if (priv->conf.enableMultilink && priv->numActiveLinks > 0) ng_ppp_start_frag_timer(node); else { ng_ppp_stop_frag_timer(node); ng_ppp_frag_reset(node); priv->xseq = MP_INITIAL_SEQ; priv->mseq = MP_INITIAL_SEQ; for (i = 0; i < NG_PPP_MAX_LINKS; i++) { struct ng_ppp_link *const link = &priv->links[i]; bzero(&link->lastWrite, sizeof(link->lastWrite)); link->bytesInQueue = 0; link->seq = MP_NOSEQ; } } if (priv->hooks[HOOK_INDEX_INET] != NULL) { if (priv->conf.enableIP == 1 && priv->numActiveLinks == 1 && priv->conf.enableMultilink == 0 && priv->conf.enableCompression == 0 && priv->conf.enableEncryption == 0 && priv->conf.enableVJCompression == 0) NG_HOOK_SET_RCVDATA(priv->hooks[HOOK_INDEX_INET], ng_ppp_rcvdata_inet_fast); else NG_HOOK_SET_RCVDATA(priv->hooks[HOOK_INDEX_INET], ng_ppp_rcvdata_inet); } } /* * Determine if a new configuration would represent a valid change * from the current configuration and link activity status. */ static int ng_ppp_config_valid(node_p node, const struct ng_ppp_node_conf *newConf) { const priv_p priv = NG_NODE_PRIVATE(node); int i, newNumLinksActive; /* Check per-link config and count how many links would be active */ for (newNumLinksActive = i = 0; i < NG_PPP_MAX_LINKS; i++) { if (newConf->links[i].enableLink && priv->links[i].hook != NULL) newNumLinksActive++; if (!newConf->links[i].enableLink) continue; if (newConf->links[i].mru < MP_MIN_LINK_MRU) return (0); if (newConf->links[i].bandwidth == 0) return (0); if (newConf->links[i].bandwidth > NG_PPP_MAX_BANDWIDTH) return (0); if (newConf->links[i].latency > NG_PPP_MAX_LATENCY) return (0); } /* Disallow changes to multi-link configuration while MP is active */ if (priv->numActiveLinks > 0 && newNumLinksActive > 0) { if (!priv->conf.enableMultilink != !newConf->bund.enableMultilink || !priv->conf.xmitShortSeq != !newConf->bund.xmitShortSeq || !priv->conf.recvShortSeq != !newConf->bund.recvShortSeq) return (0); } /* At most one link can be active unless multi-link is enabled */ if (!newConf->bund.enableMultilink && newNumLinksActive > 1) return (0); /* Configuration change would be valid */ return (1); } /* * Free all entries in the fragment queue */ static void ng_ppp_frag_reset(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); struct ng_ppp_frag *qent, *qnext; for (qent = TAILQ_FIRST(&priv->frags); qent; qent = qnext) { qnext = TAILQ_NEXT(qent, f_qent); NG_FREE_M(qent->data); TAILQ_INSERT_HEAD(&priv->fragsfree, qent, f_qent); } TAILQ_INIT(&priv->frags); } /* * Start fragment queue timer */ static void ng_ppp_start_frag_timer(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); if (!(callout_pending(&priv->fragTimer))) ng_callout(&priv->fragTimer, node, NULL, MP_FRAGTIMER_INTERVAL, ng_ppp_frag_timeout, NULL, 0); } /* * Stop fragment queue timer */ static void ng_ppp_stop_frag_timer(node_p node) { const priv_p priv = NG_NODE_PRIVATE(node); if (callout_pending(&priv->fragTimer)) ng_uncallout(&priv->fragTimer, node); } diff --git a/sys/sys/libkern.h b/sys/sys/libkern.h index 41844fa7490e..8adeeb499984 100644 --- a/sys/sys/libkern.h +++ b/sys/sys/libkern.h @@ -1,257 +1,257 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)libkern.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _SYS_LIBKERN_H_ #define _SYS_LIBKERN_H_ #include #include #ifdef _KERNEL #include #endif #ifndef LIBKERN_INLINE #define LIBKERN_INLINE static __inline #define LIBKERN_BODY #endif /* BCD conversions. */ extern u_char const bcd2bin_data[]; extern u_char const bin2bcd_data[]; extern char const hex2ascii_data[]; #define LIBKERN_LEN_BCD2BIN 154 #define LIBKERN_LEN_BIN2BCD 100 #define LIBKERN_LEN_HEX2ASCII 36 static inline u_char bcd2bin(int bcd) { KASSERT(bcd >= 0 && bcd < LIBKERN_LEN_BCD2BIN, ("invalid bcd %d", bcd)); return (bcd2bin_data[bcd]); } static inline u_char bin2bcd(int bin) { KASSERT(bin >= 0 && bin < LIBKERN_LEN_BIN2BCD, ("invalid bin %d", bin)); return (bin2bcd_data[bin]); } static inline char hex2ascii(int hex) { KASSERT(hex >= 0 && hex < LIBKERN_LEN_HEX2ASCII, ("invalid hex %d", hex)); return (hex2ascii_data[hex]); } static inline bool validbcd(int bcd) { return (bcd == 0 || (bcd > 0 && bcd <= 0x99 && bcd2bin_data[bcd] != 0)); } static __inline int imax(int a, int b) { return (a > b ? a : b); } static __inline int imin(int a, int b) { return (a < b ? a : b); } static __inline long lmax(long a, long b) { return (a > b ? a : b); } static __inline long lmin(long a, long b) { return (a < b ? a : b); } static __inline u_int max(u_int a, u_int b) { return (a > b ? a : b); } static __inline u_int min(u_int a, u_int b) { return (a < b ? a : b); } static __inline quad_t qmax(quad_t a, quad_t b) { return (a > b ? a : b); } static __inline quad_t qmin(quad_t a, quad_t b) { return (a < b ? a : b); } static __inline u_quad_t uqmax(u_quad_t a, u_quad_t b) { return (a > b ? a : b); } static __inline u_quad_t uqmin(u_quad_t a, u_quad_t b) { return (a < b ? a : b); } static __inline u_long ulmax(u_long a, u_long b) { return (a > b ? a : b); } static __inline u_long ulmin(u_long a, u_long b) { return (a < b ? a : b); } static __inline __uintmax_t ummax(__uintmax_t a, __uintmax_t b) { return (a > b ? a : b); } static __inline __uintmax_t ummin(__uintmax_t a, __uintmax_t b) { return (a < b ? a : b); } static __inline off_t omax(off_t a, off_t b) { return (a > b ? a : b); } static __inline off_t omin(off_t a, off_t b) { return (a < b ? a : b); } static __inline int abs(int a) { return (a < 0 ? -a : a); } static __inline long labs(long a) { return (a < 0 ? -a : a); } static __inline quad_t qabs(quad_t a) { return (a < 0 ? -a : a); } #ifndef RANDOM_FENESTRASX #define ARC4_ENTR_NONE 0 /* Don't have entropy yet. */ #define ARC4_ENTR_HAVE 1 /* Have entropy. */ #define ARC4_ENTR_SEED 2 /* Reseeding. */ extern int arc4rand_iniseed_state; #endif /* Prototypes for non-quad routines. */ struct malloc_type; uint32_t arc4random(void); void arc4random_buf(void *, size_t); uint32_t arc4random_uniform(uint32_t); void arc4rand(void *, u_int, int); int timingsafe_bcmp(const void *, const void *, size_t); void *bsearch(const void *, const void *, size_t, size_t, int (*)(const void *, const void *)); #ifndef HAVE_INLINE_FFS int ffs(int); #endif #ifndef HAVE_INLINE_FFSL int ffsl(long); #endif #ifndef HAVE_INLINE_FFSLL int ffsll(long long); #endif #ifndef HAVE_INLINE_FLS int fls(int); #endif #ifndef HAVE_INLINE_FLSL int flsl(long); #endif #ifndef HAVE_INLINE_FLSLL int flsll(long long); #endif #define bitcount64(x) __bitcount64((uint64_t)(x)) #define bitcount32(x) __bitcount32((uint32_t)(x)) #define bitcount16(x) __bitcount16((uint16_t)(x)) #define bitcountl(x) __bitcountl((u_long)(x)) #define bitcount(x) __bitcount((u_int)(x)) int fnmatch(const char *, const char *, int); int locc(int, char *, u_int); void *memchr(const void *s, int c, size_t n); void *memcchr(const void *s, int c, size_t n); void *memmem(const void *l, size_t l_len, const void *s, size_t s_len); void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *)); -void qsort_r(void *base, size_t nmemb, size_t size, void *thunk, - int (*compar)(void *, const void *, const void *)); +void qsort_r(void *base, size_t nmemb, size_t size, + int (*compar)(const void *, const void *, void *), void *thunk); u_long random(void); int scanc(u_int, const u_char *, const u_char *, int); int strcasecmp(const char *, const char *); char *strcasestr(const char *, const char *); char *strcat(char * __restrict, const char * __restrict); char *strchr(const char *, int); char *strchrnul(const char *, int); int strcmp(const char *, const char *); char *strcpy(char * __restrict, const char * __restrict); char *strdup_flags(const char *__restrict, struct malloc_type *, int); size_t strcspn(const char *, const char *) __pure; char *strdup(const char *__restrict, struct malloc_type *); char *strncat(char *, const char *, size_t); char *strndup(const char *__restrict, size_t, struct malloc_type *); size_t strlcat(char *, const char *, size_t); size_t strlcpy(char *, const char *, size_t); size_t strlen(const char *); int strncasecmp(const char *, const char *, size_t); int strncmp(const char *, const char *, size_t); char *strncpy(char * __restrict, const char * __restrict, size_t); size_t strnlen(const char *, size_t); char *strnstr(const char *, const char *, size_t); char *strrchr(const char *, int); char *strsep(char **, const char *delim); size_t strspn(const char *, const char *); char *strstr(const char *, const char *); int strvalid(const char *, size_t); #ifdef SAN_NEEDS_INTERCEPTORS #ifndef SAN_INTERCEPTOR #define SAN_INTERCEPTOR(func) \ __CONCAT(SAN_INTERCEPTOR_PREFIX, __CONCAT(_, func)) #endif char *SAN_INTERCEPTOR(strcpy)(char *, const char *); int SAN_INTERCEPTOR(strcmp)(const char *, const char *); size_t SAN_INTERCEPTOR(strlen)(const char *); #ifndef SAN_RUNTIME #define strcpy(d, s) SAN_INTERCEPTOR(strcpy)((d), (s)) #define strcmp(s1, s2) SAN_INTERCEPTOR(strcmp)((s1), (s2)) #define strlen(s) SAN_INTERCEPTOR(strlen)(s) #endif /* !SAN_RUNTIME */ #else /* !SAN_NEEDS_INTERCEPTORS */ #define strcpy(d, s) __builtin_strcpy((d), (s)) #define strcmp(s1, s2) __builtin_strcmp((s1), (s2)) #define strlen(s) __builtin_strlen((s)) #endif /* SAN_NEEDS_INTERCEPTORS */ static __inline char * index(const char *p, int ch) { return (strchr(p, ch)); } static __inline char * rindex(const char *p, int ch) { return (strrchr(p, ch)); } static __inline int64_t signed_extend64(uint64_t bitmap, int lsb, int width) { return ((int64_t)(bitmap << (63 - lsb - (width - 1)))) >> (63 - (width - 1)); } static __inline int32_t signed_extend32(uint32_t bitmap, int lsb, int width) { return ((int32_t)(bitmap << (31 - lsb - (width - 1)))) >> (31 - (width - 1)); } /* fnmatch() return values. */ #define FNM_NOMATCH 1 /* Match failed. */ /* fnmatch() flags. */ #define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */ #define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */ #define FNM_PERIOD 0x04 /* Period must be matched by period. */ #define FNM_LEADING_DIR 0x08 /* Ignore / after Imatch. */ #define FNM_CASEFOLD 0x10 /* Case insensitive search. */ #define FNM_IGNORECASE FNM_CASEFOLD #define FNM_FILE_NAME FNM_PATHNAME #endif /* !_SYS_LIBKERN_H_ */