Index: stable/12/sys/compat/linuxkpi/common/include/linux/rcupdate.h =================================================================== --- stable/12/sys/compat/linuxkpi/common/include/linux/rcupdate.h (revision 359956) +++ stable/12/sys/compat/linuxkpi/common/include/linux/rcupdate.h (revision 359957) @@ -1,109 +1,114 @@ /*- * Copyright (c) 2016-2017 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_RCUPDATE_H_ #define _LINUX_RCUPDATE_H_ #include #include #include #define LINUX_KFREE_RCU_OFFSET_MAX 4096 /* exclusive */ +/* BSD specific defines */ +#define RCU_TYPE_REGULAR 0 +#define RCU_TYPE_SLEEPABLE 1 +#define RCU_TYPE_MAX 2 + #define RCU_INITIALIZER(v) \ ((__typeof(*(v)) *)(v)) #define RCU_INIT_POINTER(p, v) do { \ (p) = (v); \ } while (0) #define call_rcu(ptr, func) do { \ - linux_call_rcu(ptr, func); \ + linux_call_rcu(RCU_TYPE_REGULAR, ptr, func); \ } while (0) #define rcu_barrier(void) do { \ - linux_rcu_barrier(); \ + linux_rcu_barrier(RCU_TYPE_REGULAR); \ } while (0) #define rcu_read_lock(void) do { \ - linux_rcu_read_lock(); \ + linux_rcu_read_lock(RCU_TYPE_REGULAR); \ } while (0) #define rcu_read_unlock(void) do { \ - linux_rcu_read_unlock(); \ + linux_rcu_read_unlock(RCU_TYPE_REGULAR);\ } while (0) #define synchronize_rcu(void) do { \ - linux_synchronize_rcu(); \ + linux_synchronize_rcu(RCU_TYPE_REGULAR); \ } while (0) #define synchronize_rcu_expedited(void) do { \ - linux_synchronize_rcu(); \ + linux_synchronize_rcu(RCU_TYPE_REGULAR); \ } while (0) #define kfree_rcu(ptr, rcu_head) do { \ CTASSERT(offsetof(__typeof(*(ptr)), rcu_head) < \ LINUX_KFREE_RCU_OFFSET_MAX); \ call_rcu(&(ptr)->rcu_head, (rcu_callback_t)(uintptr_t) \ offsetof(__typeof(*(ptr)), rcu_head)); \ } while (0) #define rcu_access_pointer(p) \ ((__typeof(*p) *)READ_ONCE(p)) #define rcu_dereference_protected(p, c) \ ((__typeof(*p) *)READ_ONCE(p)) #define rcu_dereference(p) \ rcu_dereference_protected(p, 0) #define rcu_dereference_raw(p) \ ((__typeof(*p) *)READ_ONCE(p)) #define rcu_pointer_handoff(p) (p) #define rcu_assign_pointer(p, v) do { \ atomic_store_rel_ptr((volatile uintptr_t *)&(p), \ (uintptr_t)(v)); \ } while (0) /* prototypes */ -extern void linux_call_rcu(struct rcu_head *ptr, rcu_callback_t func); -extern void linux_rcu_barrier(void); -extern void linux_rcu_read_lock(void); -extern void linux_rcu_read_unlock(void); -extern void linux_synchronize_rcu(void); +extern void linux_call_rcu(unsigned type, struct rcu_head *ptr, rcu_callback_t func); +extern void linux_rcu_barrier(unsigned type); +extern void linux_rcu_read_lock(unsigned type); +extern void linux_rcu_read_unlock(unsigned type); +extern void linux_synchronize_rcu(unsigned type); /* Empty implementation for !DEBUG */ #define init_rcu_head(...) #define destroy_rcu_head(...) #define init_rcu_head_on_stack(...) #define destroy_rcu_head_on_stack(...) #endif /* _LINUX_RCUPDATE_H_ */ Index: stable/12/sys/compat/linuxkpi/common/src/linux_rcu.c =================================================================== --- stable/12/sys/compat/linuxkpi/common/src/linux_rcu.c (revision 359956) +++ stable/12/sys/compat/linuxkpi/common/src/linux_rcu.c (revision 359957) @@ -1,398 +1,417 @@ /*- * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) - * Copyright (c) 2017 Hans Petter Selasky (hselasky@freebsd.org) + * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will * not be skipped during panic(). */ #ifdef CONFIG_NO_RCU_SKIP #define RCU_SKIP(void) 0 #else #define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) #endif struct callback_head { STAILQ_ENTRY(callback_head) entry; rcu_callback_t func; }; struct linux_epoch_head { STAILQ_HEAD(, callback_head) cb_head; struct mtx lock; struct task task; } __aligned(CACHE_LINE_SIZE); struct linux_epoch_record { ck_epoch_record_t epoch_record; TAILQ_HEAD(, task_struct) ts_head; int cpuid; } __aligned(CACHE_LINE_SIZE); /* * Verify that "struct rcu_head" is big enough to hold "struct * callback_head". This has been done to avoid having to add special * compile flags for including ck_epoch.h to all clients of the * LinuxKPI. */ CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head)); /* * Verify that "epoch_record" is at beginning of "struct * linux_epoch_record": */ CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0); -static ck_epoch_t linux_epoch; -static struct linux_epoch_head linux_epoch_head; -DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record); +static ck_epoch_t linux_epoch[RCU_TYPE_MAX]; +static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX]; +DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]); static void linux_rcu_cleaner_func(void *, int); static void linux_rcu_runtime_init(void *arg __unused) { struct linux_epoch_head *head; int i; + int j; - ck_epoch_init(&linux_epoch); + for (j = 0; j != RCU_TYPE_MAX; j++) { + ck_epoch_init(&linux_epoch[j]); - head = &linux_epoch_head; + head = &linux_epoch_head[j]; - mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); - TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, NULL); - STAILQ_INIT(&head->cb_head); + mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); + TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head); + STAILQ_INIT(&head->cb_head); - CPU_FOREACH(i) { - struct linux_epoch_record *record; + CPU_FOREACH(i) { + struct linux_epoch_record *record; - record = &DPCPU_ID_GET(i, linux_epoch_record); + record = &DPCPU_ID_GET(i, linux_epoch_record[j]); - record->cpuid = i; - ck_epoch_register(&linux_epoch, &record->epoch_record, NULL); - TAILQ_INIT(&record->ts_head); + record->cpuid = i; + ck_epoch_register(&linux_epoch[j], + &record->epoch_record, NULL); + TAILQ_INIT(&record->ts_head); + } } } SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL); static void linux_rcu_runtime_uninit(void *arg __unused) { struct linux_epoch_head *head; + int j; - head = &linux_epoch_head; + for (j = 0; j != RCU_TYPE_MAX; j++) { + head = &linux_epoch_head[j]; - /* destroy head lock */ - mtx_destroy(&head->lock); + mtx_destroy(&head->lock); + } } SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL); static void -linux_rcu_cleaner_func(void *context __unused, int pending __unused) +linux_rcu_cleaner_func(void *context, int pending __unused) { struct linux_epoch_head *head; struct callback_head *rcu; STAILQ_HEAD(, callback_head) tmp_head; + uintptr_t offset; linux_set_current(curthread); - head = &linux_epoch_head; + head = context; /* move current callbacks into own queue */ mtx_lock(&head->lock); STAILQ_INIT(&tmp_head); STAILQ_CONCAT(&tmp_head, &head->cb_head); mtx_unlock(&head->lock); /* synchronize */ - linux_synchronize_rcu(); + linux_synchronize_rcu(head - linux_epoch_head); /* dispatch all callbacks, if any */ while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) { - uintptr_t offset; STAILQ_REMOVE_HEAD(&tmp_head, entry); offset = (uintptr_t)rcu->func; if (offset < LINUX_KFREE_RCU_OFFSET_MAX) kfree((char *)rcu - offset); else rcu->func((struct rcu_head *)rcu); } } void -linux_rcu_read_lock(void) +linux_rcu_read_lock(unsigned type) { struct linux_epoch_record *record; struct task_struct *ts; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; /* * Pin thread to current CPU so that the unlock code gets the * same per-CPU epoch record: */ sched_pin(); - record = &DPCPU_GET(linux_epoch_record); + record = &DPCPU_GET(linux_epoch_record[type]); ts = current; /* * Use a critical section to prevent recursion inside * ck_epoch_begin(). Else this function supports recursion. */ critical_enter(); ck_epoch_begin(&record->epoch_record, NULL); ts->rcu_recurse++; if (ts->rcu_recurse == 1) TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry); critical_exit(); } void -linux_rcu_read_unlock(void) +linux_rcu_read_unlock(unsigned type) { struct linux_epoch_record *record; struct task_struct *ts; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; - record = &DPCPU_GET(linux_epoch_record); + record = &DPCPU_GET(linux_epoch_record[type]); ts = current; /* * Use a critical section to prevent recursion inside * ck_epoch_end(). Else this function supports recursion. */ critical_enter(); ck_epoch_end(&record->epoch_record, NULL); ts->rcu_recurse--; if (ts->rcu_recurse == 0) TAILQ_REMOVE(&record->ts_head, ts, rcu_entry); critical_exit(); sched_unpin(); } static void linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused) { struct linux_epoch_record *record = container_of(epoch_record, struct linux_epoch_record, epoch_record); struct thread *td = curthread; struct task_struct *ts; /* check if blocked on the current CPU */ if (record->cpuid == PCPU_GET(cpuid)) { bool is_sleeping = 0; u_char prio = 0; /* * Find the lowest priority or sleeping thread which * is blocking synchronization on this CPU core. All * the threads in the queue are CPU-pinned and cannot * go anywhere while the current thread is locked. */ TAILQ_FOREACH(ts, &record->ts_head, rcu_entry) { if (ts->task_thread->td_priority > prio) prio = ts->task_thread->td_priority; is_sleeping |= (ts->task_thread->td_inhibitors != 0); } if (is_sleeping) { thread_unlock(td); pause("W", 1); thread_lock(td); } else { /* set new thread priority */ sched_prio(td, prio); /* task switch */ mi_switch(SW_VOL | SWT_RELINQUISH, NULL); /* * Release the thread lock while yielding to * allow other threads to acquire the lock * pointed to by TDQ_LOCKPTR(td). Else a * deadlock like situation might happen. */ thread_unlock(td); thread_lock(td); } } else { /* * To avoid spinning move execution to the other CPU * which is blocking synchronization. Set highest * thread priority so that code gets run. The thread * priority will be restored later. */ sched_prio(td, 0); sched_bind(td, record->cpuid); } } void -linux_synchronize_rcu(void) +linux_synchronize_rcu(unsigned type) { struct thread *td; int was_bound; int old_cpu; int old_pinned; u_char old_prio; + MPASS(type < RCU_TYPE_MAX); + if (RCU_SKIP()) return; WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "linux_synchronize_rcu() can sleep"); td = curthread; DROP_GIANT(); /* * Synchronizing RCU might change the CPU core this function * is running on. Save current values: */ thread_lock(td); old_cpu = PCPU_GET(cpuid); old_pinned = td->td_pinned; old_prio = td->td_priority; was_bound = sched_is_bound(td); sched_unbind(td); td->td_pinned = 0; sched_bind(td, old_cpu); - ck_epoch_synchronize_wait(&linux_epoch, + ck_epoch_synchronize_wait(&linux_epoch[type], &linux_synchronize_rcu_cb, NULL); /* restore CPU binding, if any */ if (was_bound != 0) { sched_bind(td, old_cpu); } else { /* get thread back to initial CPU, if any */ if (old_pinned != 0) sched_bind(td, old_cpu); sched_unbind(td); } /* restore pinned after bind */ td->td_pinned = old_pinned; /* restore thread priority */ sched_prio(td, old_prio); thread_unlock(td); PICKUP_GIANT(); } void -linux_rcu_barrier(void) +linux_rcu_barrier(unsigned type) { struct linux_epoch_head *head; - linux_synchronize_rcu(); + MPASS(type < RCU_TYPE_MAX); - head = &linux_epoch_head; + linux_synchronize_rcu(type); + head = &linux_epoch_head[type]; + /* wait for callbacks to complete */ taskqueue_drain(taskqueue_fast, &head->task); } void -linux_call_rcu(struct rcu_head *context, rcu_callback_t func) +linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func) { - struct callback_head *rcu = (struct callback_head *)context; - struct linux_epoch_head *head = &linux_epoch_head; + struct callback_head *rcu; + struct linux_epoch_head *head; + MPASS(type < RCU_TYPE_MAX); + + rcu = (struct callback_head *)context; + head = &linux_epoch_head[type]; + mtx_lock(&head->lock); rcu->func = func; STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry); taskqueue_enqueue(taskqueue_fast, &head->task); mtx_unlock(&head->lock); } int init_srcu_struct(struct srcu_struct *srcu) { return (0); } void cleanup_srcu_struct(struct srcu_struct *srcu) { } int srcu_read_lock(struct srcu_struct *srcu) { - linux_rcu_read_lock(); + linux_rcu_read_lock(RCU_TYPE_SLEEPABLE); return (0); } void srcu_read_unlock(struct srcu_struct *srcu, int key __unused) { - linux_rcu_read_unlock(); + linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE); } void synchronize_srcu(struct srcu_struct *srcu) { - linux_synchronize_rcu(); + linux_synchronize_rcu(RCU_TYPE_SLEEPABLE); } void srcu_barrier(struct srcu_struct *srcu) { - linux_rcu_barrier(); + linux_rcu_barrier(RCU_TYPE_SLEEPABLE); } Index: stable/12/sys/sys/param.h =================================================================== --- stable/12/sys/sys/param.h (revision 359956) +++ stable/12/sys/sys/param.h (revision 359957) @@ -1,365 +1,365 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1201512 /* Master, propagated to newvers */ +#define __FreeBSD_version 1201513 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */ Index: stable/12 =================================================================== --- stable/12 (revision 359956) +++ stable/12 (revision 359957) Property changes on: stable/12 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r359727