Index: stable/12/sys/kern/genoffset.c =================================================================== --- stable/12/sys/kern/genoffset.c (revision 349762) +++ stable/12/sys/kern/genoffset.c (revision 349763) @@ -1,44 +1,43 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef OFFSET_TEST #define GENOFFSET #endif #include __FBSDID("$FreeBSD$"); #include #include #include -OFFSYM(td_pre_epoch_prio, thread, u_char); OFFSYM(td_priority, thread, u_char); OFFSYM(td_epochnest, thread, u_char); OFFSYM(td_critnest, thread, u_int); OFFSYM(td_pinned, thread, int); OFFSYM(td_owepreempt, thread, u_char); Index: stable/12/sys/kern/subr_epoch.c =================================================================== --- stable/12/sys/kern/subr_epoch.c (revision 349762) +++ stable/12/sys/kern/subr_epoch.c (revision 349763) @@ -1,555 +1,658 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_EPOCH, "epoch", "epoch based reclamation"); +#ifdef __amd64__ +#define EPOCH_ALIGN CACHE_LINE_SIZE*2 +#else +#define EPOCH_ALIGN CACHE_LINE_SIZE +#endif + +TAILQ_HEAD (epoch_tdlist, epoch_tracker); +typedef struct epoch_record { + ck_epoch_record_t er_record; + volatile struct epoch_tdlist er_tdlist; + volatile uint32_t er_gen; + uint32_t er_cpuid; +} __aligned(EPOCH_ALIGN) *epoch_record_t; + +struct epoch { + struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); + epoch_record_t e_pcpu_record; + int e_idx; + int e_flags; +}; + /* arbitrary --- needs benchmarking */ #define MAX_ADAPTIVE_SPIN 100 #define MAX_EPOCHS 64 CTASSERT(sizeof(ck_epoch_entry_t) == sizeof(struct epoch_context)); SYSCTL_NODE(_kern, OID_AUTO, epoch, CTLFLAG_RW, 0, "epoch information"); SYSCTL_NODE(_kern_epoch, OID_AUTO, stats, CTLFLAG_RW, 0, "epoch stats"); /* Stats. */ static counter_u64_t block_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, nblocked, CTLFLAG_RW, &block_count, "# of times a thread was in an epoch when epoch_wait was called"); static counter_u64_t migrate_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, migrations, CTLFLAG_RW, &migrate_count, "# of times thread was migrated to another CPU in epoch_wait"); static counter_u64_t turnstile_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, ncontended, CTLFLAG_RW, &turnstile_count, "# of times a thread was blocked on a lock in an epoch during an epoch_wait"); static counter_u64_t switch_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, switches, CTLFLAG_RW, &switch_count, "# of times a thread voluntarily context switched in epoch_wait"); static counter_u64_t epoch_call_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_calls, CTLFLAG_RW, &epoch_call_count, "# of times a callback was deferred"); static counter_u64_t epoch_call_task_count; SYSCTL_COUNTER_U64(_kern_epoch_stats, OID_AUTO, epoch_call_tasks, CTLFLAG_RW, &epoch_call_task_count, "# of times a callback task was run"); TAILQ_HEAD (threadlist, thread); CK_STACK_CONTAINER(struct ck_epoch_entry, stack_entry, ck_epoch_entry_container) epoch_t allepochs[MAX_EPOCHS]; DPCPU_DEFINE(struct grouptask, epoch_cb_task); DPCPU_DEFINE(int, epoch_cb_count); static __read_mostly int inited; static __read_mostly int epoch_count; __read_mostly epoch_t global_epoch; __read_mostly epoch_t global_epoch_preempt; static void epoch_call_task(void *context __unused); static uma_zone_t pcpu_zone_record; static void epoch_init(void *arg __unused) { int cpu; block_count = counter_u64_alloc(M_WAITOK); migrate_count = counter_u64_alloc(M_WAITOK); turnstile_count = counter_u64_alloc(M_WAITOK); switch_count = counter_u64_alloc(M_WAITOK); epoch_call_count = counter_u64_alloc(M_WAITOK); epoch_call_task_count = counter_u64_alloc(M_WAITOK); - pcpu_zone_record = uma_zcreate("epoch_record pcpu", sizeof(struct epoch_record), - NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU); + pcpu_zone_record = uma_zcreate("epoch_record pcpu", + sizeof(struct epoch_record), NULL, NULL, NULL, NULL, + UMA_ALIGN_PTR, UMA_ZONE_PCPU); CPU_FOREACH(cpu) { - GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, epoch_call_task, NULL); - taskqgroup_attach_cpu(qgroup_softirq, DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, "epoch call task"); + GROUPTASK_INIT(DPCPU_ID_PTR(cpu, epoch_cb_task), 0, + epoch_call_task, NULL); + taskqgroup_attach_cpu(qgroup_softirq, + DPCPU_ID_PTR(cpu, epoch_cb_task), NULL, cpu, -1, + "epoch call task"); } inited = 1; global_epoch = epoch_alloc(0); global_epoch_preempt = epoch_alloc(EPOCH_PREEMPT); } SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_FIRST, epoch_init, NULL); #if !defined(EARLY_AP_STARTUP) static void epoch_init_smp(void *dummy __unused) { inited = 2; } SYSINIT(epoch_smp, SI_SUB_SMP + 1, SI_ORDER_FIRST, epoch_init_smp, NULL); #endif static void epoch_ctor(epoch_t epoch) { epoch_record_t er; int cpu; epoch->e_pcpu_record = uma_zalloc_pcpu(pcpu_zone_record, M_WAITOK); CPU_FOREACH(cpu) { er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); bzero(er, sizeof(*er)); ck_epoch_register(&epoch->e_epoch, &er->er_record, NULL); TAILQ_INIT((struct threadlist *)(uintptr_t)&er->er_tdlist); er->er_cpuid = cpu; } } +static void +epoch_adjust_prio(struct thread *td, u_char prio) +{ + + thread_lock(td); + sched_prio(td, prio); + thread_unlock(td); +} + epoch_t epoch_alloc(int flags) { epoch_t epoch; if (__predict_false(!inited)) panic("%s called too early in boot", __func__); epoch = malloc(sizeof(struct epoch), M_EPOCH, M_ZERO | M_WAITOK); ck_epoch_init(&epoch->e_epoch); epoch_ctor(epoch); MPASS(epoch_count < MAX_EPOCHS - 2); epoch->e_flags = flags; epoch->e_idx = epoch_count; allepochs[epoch_count++] = epoch; return (epoch); } void epoch_free(epoch_t epoch) { #ifdef INVARIANTS struct epoch_record *er; int cpu; CPU_FOREACH(cpu) { er = zpcpu_get_cpu(epoch->e_pcpu_record, cpu); MPASS(TAILQ_EMPTY(&er->er_tdlist)); } #endif allepochs[epoch->e_idx] = NULL; epoch_wait(global_epoch); uma_zfree_pcpu(pcpu_zone_record, epoch->e_pcpu_record); free(epoch, M_EPOCH); } +static epoch_record_t +epoch_currecord(epoch_t epoch) +{ + + return (zpcpu_get_cpu(epoch->e_pcpu_record, curcpu)); +} + +#define INIT_CHECK(epoch) \ + do { \ + if (__predict_false((epoch) == NULL)) \ + return; \ + } while (0) + void -epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et) +epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et) { + struct epoch_record *er; + struct thread *td; - epoch_enter_preempt(epoch, et); + MPASS(cold || epoch != NULL); + INIT_CHECK(epoch); + MPASS(epoch->e_flags & EPOCH_PREEMPT); +#ifdef EPOCH_TRACKER_DEBUG + et->et_magic_pre = EPOCH_MAGIC0; + et->et_magic_post = EPOCH_MAGIC1; +#endif + td = curthread; + et->et_td = td; + td->td_epochnest++; + critical_enter(); + sched_pin(); + + td->td_pre_epoch_prio = td->td_priority; + er = epoch_currecord(epoch); + TAILQ_INSERT_TAIL(&er->er_tdlist, et, et_link); + ck_epoch_begin(&er->er_record, &et->et_section); + critical_exit(); } void -epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et) +epoch_enter(epoch_t epoch) { + struct thread *td; + epoch_record_t er; - epoch_exit_preempt(epoch, et); + MPASS(cold || epoch != NULL); + INIT_CHECK(epoch); + td = curthread; + + td->td_epochnest++; + critical_enter(); + er = epoch_currecord(epoch); + ck_epoch_begin(&er->er_record, NULL); } void -epoch_enter_KBI(epoch_t epoch) +epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et) { + struct epoch_record *er; + struct thread *td; - epoch_enter(epoch); + INIT_CHECK(epoch); + td = curthread; + critical_enter(); + sched_unpin(); + MPASS(td->td_epochnest); + td->td_epochnest--; + er = epoch_currecord(epoch); + MPASS(epoch->e_flags & EPOCH_PREEMPT); + MPASS(et != NULL); + MPASS(et->et_td == td); +#ifdef EPOCH_TRACKER_DEBUG + MPASS(et->et_magic_pre == EPOCH_MAGIC0); + MPASS(et->et_magic_post == EPOCH_MAGIC1); + et->et_magic_pre = 0; + et->et_magic_post = 0; +#endif +#ifdef INVARIANTS + et->et_td = (void*)0xDEADBEEF; +#endif + ck_epoch_end(&er->er_record, &et->et_section); + TAILQ_REMOVE(&er->er_tdlist, et, et_link); + er->er_gen++; + if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) + epoch_adjust_prio(td, td->td_pre_epoch_prio); + critical_exit(); } void -epoch_exit_KBI(epoch_t epoch) +epoch_exit(epoch_t epoch) { + struct thread *td; + epoch_record_t er; - epoch_exit(epoch); + INIT_CHECK(epoch); + td = curthread; + MPASS(td->td_epochnest); + td->td_epochnest--; + er = epoch_currecord(epoch); + ck_epoch_end(&er->er_record, NULL); + critical_exit(); } /* - * epoch_block_handler_preempt is a callback from the ck code when another thread is - * currently in an epoch section. + * epoch_block_handler_preempt() is a callback from the CK code when another + * thread is currently in an epoch section. */ static void -epoch_block_handler_preempt(struct ck_epoch *global __unused, ck_epoch_record_t *cr, - void *arg __unused) +epoch_block_handler_preempt(struct ck_epoch *global __unused, + ck_epoch_record_t *cr, void *arg __unused) { epoch_record_t record; struct thread *td, *owner, *curwaittd; - struct epoch_thread *tdwait; + struct epoch_tracker *tdwait; struct turnstile *ts; struct lock_object *lock; int spincount, gen; int locksheld __unused; record = __containerof(cr, struct epoch_record, er_record); td = curthread; locksheld = td->td_locks; spincount = 0; counter_u64_add(block_count, 1); /* * We lost a race and there's no longer any threads * on the CPU in an epoch section. */ if (TAILQ_EMPTY(&record->er_tdlist)) return; if (record->er_cpuid != curcpu) { /* * If the head of the list is running, we can wait for it * to remove itself from the list and thus save us the * overhead of a migration */ gen = record->er_gen; thread_unlock(td); /* * We can't actually check if the waiting thread is running * so we simply poll for it to exit before giving up and * migrating. */ do { cpu_spinwait(); } while (!TAILQ_EMPTY(&record->er_tdlist) && gen == record->er_gen && spincount++ < MAX_ADAPTIVE_SPIN); thread_lock(td); /* * If the generation has changed we can poll again * otherwise we need to migrate. */ if (gen != record->er_gen) return; /* * Being on the same CPU as that of the record on which * we need to wait allows us access to the thread * list associated with that CPU. We can then examine the * oldest thread in the queue and wait on its turnstile * until it resumes and so on until a grace period * elapses. * */ counter_u64_add(migrate_count, 1); sched_bind(td, record->er_cpuid); /* * At this point we need to return to the ck code * to scan to see if a grace period has elapsed. * We can't move on to check the thread list, because * in the meantime new threads may have arrived that * in fact belong to a different epoch. */ return; } /* * Try to find a thread in an epoch section on this CPU * waiting on a turnstile. Otherwise find the lowest * priority thread (highest prio value) and drop our priority * to match to allow it to run. */ TAILQ_FOREACH(tdwait, &record->er_tdlist, et_link) { /* * Propagate our priority to any other waiters to prevent us * from starving them. They will have their original priority * restore on exit from epoch_wait(). */ curwaittd = tdwait->et_td; if (!TD_IS_INHIBITED(curwaittd) && curwaittd->td_priority > td->td_priority) { critical_enter(); thread_unlock(td); thread_lock(curwaittd); sched_prio(curwaittd, td->td_priority); thread_unlock(curwaittd); thread_lock(td); critical_exit(); } if (TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd) && ((ts = curwaittd->td_blocked) != NULL)) { /* - * We unlock td to allow turnstile_wait to reacquire the - * the thread lock. Before unlocking it we enter a critical - * section to prevent preemption after we reenable interrupts - * by dropping the thread lock in order to prevent curwaittd - * from getting to run. + * We unlock td to allow turnstile_wait to reacquire + * the thread lock. Before unlocking it we enter a + * critical section to prevent preemption after we + * reenable interrupts by dropping the thread lock in + * order to prevent curwaittd from getting to run. */ critical_enter(); thread_unlock(td); owner = turnstile_lock(ts, &lock); /* - * The owner pointer indicates that the lock succeeded. Only - * in case we hold the lock and the turnstile we locked is still - * the one that curwaittd is blocked on can we continue. Otherwise - * The turnstile pointer has been changed out from underneath - * us, as in the case where the lock holder has signalled curwaittd, + * The owner pointer indicates that the lock succeeded. + * Only in case we hold the lock and the turnstile we + * locked is still the one that curwaittd is blocked on + * can we continue. Otherwise the turnstile pointer has + * been changed out from underneath us, as in the case + * where the lock holder has signalled curwaittd, * and we need to continue. */ if (owner != NULL && ts == curwaittd->td_blocked) { - MPASS(TD_IS_INHIBITED(curwaittd) && TD_ON_LOCK(curwaittd)); + MPASS(TD_IS_INHIBITED(curwaittd) && + TD_ON_LOCK(curwaittd)); critical_exit(); turnstile_wait(ts, owner, curwaittd->td_tsqueue); counter_u64_add(turnstile_count, 1); thread_lock(td); return; } else if (owner != NULL) turnstile_unlock(ts, lock); thread_lock(td); critical_exit(); KASSERT(td->td_locks == locksheld, ("%d extra locks held", td->td_locks - locksheld)); } } /* * We didn't find any threads actually blocked on a lock * so we have nothing to do except context switch away. */ counter_u64_add(switch_count, 1); mi_switch(SW_VOL | SWT_RELINQUISH, NULL); /* * Release the thread lock while yielding to * allow other threads to acquire the lock * pointed to by TDQ_LOCKPTR(td). Else a * deadlock like situation might happen. (HPS) */ thread_unlock(td); thread_lock(td); } void epoch_wait_preempt(epoch_t epoch) { struct thread *td; int was_bound; int old_cpu; int old_pinned; u_char old_prio; int locks __unused; MPASS(cold || epoch != NULL); INIT_CHECK(epoch); td = curthread; #ifdef INVARIANTS locks = curthread->td_locks; MPASS(epoch->e_flags & EPOCH_PREEMPT); if ((epoch->e_flags & EPOCH_LOCKED) == 0) WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "epoch_wait() can be long running"); - KASSERT(!in_epoch(epoch), - ("epoch_wait_preempt() called in the middle " - "of an epoch section of the same epoch")); + KASSERT(!in_epoch(epoch), ("epoch_wait_preempt() called in the middle " + "of an epoch section of the same epoch")); #endif thread_lock(td); DROP_GIANT(); old_cpu = PCPU_GET(cpuid); old_pinned = td->td_pinned; old_prio = td->td_priority; was_bound = sched_is_bound(td); sched_unbind(td); td->td_pinned = 0; sched_bind(td, old_cpu); - ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, NULL); + ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler_preempt, + NULL); /* restore CPU binding, if any */ if (was_bound != 0) { sched_bind(td, old_cpu); } else { /* get thread back to initial CPU, if any */ if (old_pinned != 0) sched_bind(td, old_cpu); sched_unbind(td); } /* restore pinned after bind */ td->td_pinned = old_pinned; /* restore thread priority */ sched_prio(td, old_prio); thread_unlock(td); PICKUP_GIANT(); KASSERT(td->td_locks == locks, ("%d residual locks held", td->td_locks - locks)); } static void epoch_block_handler(struct ck_epoch *g __unused, ck_epoch_record_t *c __unused, void *arg __unused) { cpu_spinwait(); } void epoch_wait(epoch_t epoch) { MPASS(cold || epoch != NULL); INIT_CHECK(epoch); MPASS(epoch->e_flags == 0); critical_enter(); ck_epoch_synchronize_wait(&epoch->e_epoch, epoch_block_handler, NULL); critical_exit(); } void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)) { epoch_record_t er; ck_epoch_entry_t *cb; cb = (void *)ctx; MPASS(callback); /* too early in boot to have epoch set up */ if (__predict_false(epoch == NULL)) goto boottime; #if !defined(EARLY_AP_STARTUP) if (__predict_false(inited < 2)) goto boottime; #endif critical_enter(); *DPCPU_PTR(epoch_cb_count) += 1; er = epoch_currecord(epoch); ck_epoch_call(&er->er_record, cb, (ck_epoch_cb_t *)callback); critical_exit(); return; boottime: callback(ctx); } static void epoch_call_task(void *arg __unused) { ck_stack_entry_t *cursor, *head, *next; ck_epoch_record_t *record; epoch_record_t er; epoch_t epoch; ck_stack_t cb_stack; int i, npending, total; ck_stack_init(&cb_stack); critical_enter(); epoch_enter(global_epoch); for (total = i = 0; i < epoch_count; i++) { if (__predict_false((epoch = allepochs[i]) == NULL)) continue; er = epoch_currecord(epoch); record = &er->er_record; if ((npending = record->n_pending) == 0) continue; ck_epoch_poll_deferred(record, &cb_stack); total += npending - record->n_pending; } epoch_exit(global_epoch); *DPCPU_PTR(epoch_cb_count) -= total; critical_exit(); counter_u64_add(epoch_call_count, total); counter_u64_add(epoch_call_task_count, 1); head = ck_stack_batch_pop_npsc(&cb_stack); for (cursor = head; cursor != NULL; cursor = next) { struct ck_epoch_entry *entry = - ck_epoch_entry_container(cursor); + ck_epoch_entry_container(cursor); next = CK_STACK_NEXT(cursor); entry->function(entry); } } int in_epoch_verbose(epoch_t epoch, int dump_onfail) { - struct epoch_thread *tdwait; + struct epoch_tracker *tdwait; struct thread *td; epoch_record_t er; td = curthread; if (td->td_epochnest == 0) return (0); if (__predict_false((epoch) == NULL)) return (0); critical_enter(); er = epoch_currecord(epoch); TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) if (tdwait->et_td == td) { critical_exit(); return (1); } #ifdef INVARIANTS if (dump_onfail) { MPASS(td->td_pinned); printf("cpu: %d id: %d\n", curcpu, td->td_tid); TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) printf("td_tid: %d ", tdwait->et_td->td_tid); printf("\n"); } #endif critical_exit(); return (0); } int in_epoch(epoch_t epoch) { return (in_epoch_verbose(epoch, 0)); -} - -void -epoch_adjust_prio(struct thread *td, u_char prio) -{ - thread_lock(td); - sched_prio(td, prio); - thread_unlock(td); } Index: stable/12/sys/sys/epoch_private.h =================================================================== --- stable/12/sys/sys/epoch_private.h (revision 349762) +++ stable/12/sys/sys/epoch_private.h (nonexistent) @@ -1,210 +0,0 @@ -/*- - * SPDX-License-Identifier: BSD-2-Clause-FreeBSD - * - * Copyright (c) 2018, Matthew Macy - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _SYS_EPOCH_PRIVATE_H_ -#define _SYS_EPOCH_PRIVATE_H_ -#ifndef _KERNEL -#error "no user serviceable parts" -#else -#include -#include - -#include - -extern void epoch_adjust_prio(struct thread *td, u_char prio); -#ifndef _SYS_SYSTM_H_ -extern void critical_exit_preempt(void); -#endif - -#ifdef __amd64__ -#define EPOCH_ALIGN CACHE_LINE_SIZE*2 -#else -#define EPOCH_ALIGN CACHE_LINE_SIZE -#endif - -/* - * Standalone (_sa) routines for thread state manipulation - */ -static __inline void -critical_enter_sa(void *tdarg) -{ - struct thread_lite *td; - - td = tdarg; - td->td_critnest++; - __compiler_membar(); -} - -static __inline void -critical_exit_sa(void *tdarg) -{ - struct thread_lite *td; - - td = tdarg; - MPASS(td->td_critnest > 0); - __compiler_membar(); - td->td_critnest--; - __compiler_membar(); - if (__predict_false(td->td_owepreempt != 0)) - critical_exit_preempt(); -} - -typedef struct epoch_thread { -#ifdef EPOCH_TRACKER_DEBUG - uint64_t et_magic_pre; -#endif - TAILQ_ENTRY(epoch_thread) et_link; /* Epoch queue. */ - struct thread *et_td; /* pointer to thread in section */ - ck_epoch_section_t et_section; /* epoch section object */ -#ifdef EPOCH_TRACKER_DEBUG - uint64_t et_magic_post; -#endif -} *epoch_thread_t; -TAILQ_HEAD (epoch_tdlist, epoch_thread); - -typedef struct epoch_record { - ck_epoch_record_t er_record; - volatile struct epoch_tdlist er_tdlist; - volatile uint32_t er_gen; - uint32_t er_cpuid; -} __aligned(EPOCH_ALIGN) *epoch_record_t; - -struct epoch { - struct ck_epoch e_epoch __aligned(EPOCH_ALIGN); - epoch_record_t e_pcpu_record; - int e_idx; - int e_flags; -}; - -static epoch_record_t -epoch_currecord(epoch_t epoch) -{ - return zpcpu_get_cpu(epoch->e_pcpu_record, curcpu); -} - -#define INIT_CHECK(epoch) \ - do { \ - if (__predict_false((epoch) == NULL)) \ - return; \ - } while (0) - -static __inline void -epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et) -{ - struct epoch_record *er; - struct epoch_thread *etd; - struct thread_lite *td; - - MPASS(cold || epoch != NULL); - INIT_CHECK(epoch); - etd = (void *)et; - MPASS(epoch->e_flags & EPOCH_PREEMPT); -#ifdef EPOCH_TRACKER_DEBUG - etd->et_magic_pre = EPOCH_MAGIC0; - etd->et_magic_post = EPOCH_MAGIC1; -#endif - td = (struct thread_lite *)curthread; - etd->et_td = (void*)td; - td->td_epochnest++; - critical_enter_sa(td); - sched_pin_lite(td); - - td->td_pre_epoch_prio = td->td_priority; - er = epoch_currecord(epoch); - TAILQ_INSERT_TAIL(&er->er_tdlist, etd, et_link); - ck_epoch_begin(&er->er_record, (ck_epoch_section_t *)&etd->et_section); - critical_exit_sa(td); -} - -static __inline void -epoch_enter(epoch_t epoch) -{ - struct thread_lite *td; - epoch_record_t er; - - MPASS(cold || epoch != NULL); - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - - td->td_epochnest++; - critical_enter_sa(td); - er = epoch_currecord(epoch); - ck_epoch_begin(&er->er_record, NULL); -} - -static __inline void -epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et) -{ - struct epoch_record *er; - struct epoch_thread *etd; - struct thread_lite *td; - - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - critical_enter_sa(td); - sched_unpin_lite(td); - MPASS(td->td_epochnest); - td->td_epochnest--; - er = epoch_currecord(epoch); - MPASS(epoch->e_flags & EPOCH_PREEMPT); - etd = (void *)et; - MPASS(etd != NULL); - MPASS(etd->et_td == (struct thread *)td); -#ifdef EPOCH_TRACKER_DEBUG - MPASS(etd->et_magic_pre == EPOCH_MAGIC0); - MPASS(etd->et_magic_post == EPOCH_MAGIC1); - etd->et_magic_pre = 0; - etd->et_magic_post = 0; -#endif - etd->et_td = (void*)0xDEADBEEF; - ck_epoch_end(&er->er_record, - (ck_epoch_section_t *)&etd->et_section); - TAILQ_REMOVE(&er->er_tdlist, etd, et_link); - er->er_gen++; - if (__predict_false(td->td_pre_epoch_prio != td->td_priority)) - epoch_adjust_prio((struct thread *)td, td->td_pre_epoch_prio); - critical_exit_sa(td); -} - -static __inline void -epoch_exit(epoch_t epoch) -{ - struct thread_lite *td; - epoch_record_t er; - - INIT_CHECK(epoch); - td = (struct thread_lite *)curthread; - MPASS(td->td_epochnest); - td->td_epochnest--; - er = epoch_currecord(epoch); - ck_epoch_end(&er->er_record, NULL); - critical_exit_sa(td); -} -#endif /* _KERNEL */ -#endif /* _SYS_EPOCH_PRIVATE_H_ */ Property changes on: stable/12/sys/sys/epoch_private.h ___________________________________________________________________ Deleted: svn:eol-style ## -1 +0,0 ## -native \ No newline at end of property Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Deleted: svn:mime-type ## -1 +0,0 ## -text/plain \ No newline at end of property Index: stable/12/sys/sys/epoch.h =================================================================== --- stable/12/sys/sys/epoch.h (revision 349762) +++ stable/12/sys/sys/epoch.h (revision 349763) @@ -1,93 +1,86 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2018, Matthew Macy * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_EPOCH_H_ #define _SYS_EPOCH_H_ + +struct epoch_context { + void *data[2]; +} __aligned(sizeof(void *)); + +typedef struct epoch_context *epoch_context_t; + #ifdef _KERNEL #include #include -#endif +#include struct epoch; typedef struct epoch *epoch_t; #define EPOCH_PREEMPT 0x1 #define EPOCH_LOCKED 0x2 extern epoch_t global_epoch; extern epoch_t global_epoch_preempt; -struct epoch_context { - void *data[2]; -} __aligned(sizeof(void *)); - -typedef struct epoch_context *epoch_context_t; - - struct epoch_tracker { - void *datap[3]; -#ifdef EPOCH_TRACKER_DEBUG - int datai[5]; -#else - int datai[1]; +#ifdef EPOCH_TRACKER_DEBUG +#define EPOCH_MAGIC0 0xFADECAFEF00DD00D +#define EPOCH_MAGIC1 0xBADDBABEDEEDFEED + uint64_t et_magic_pre; #endif + TAILQ_ENTRY(epoch_tracker) et_link; + struct thread *et_td; + ck_epoch_section_t et_section; +#ifdef EPOCH_TRACKER_DEBUG + uint64_t et_magic_post; +#endif } __aligned(sizeof(void *)); - typedef struct epoch_tracker *epoch_tracker_t; epoch_t epoch_alloc(int flags); void epoch_free(epoch_t epoch); void epoch_wait(epoch_t epoch); void epoch_wait_preempt(epoch_t epoch); void epoch_call(epoch_t epoch, epoch_context_t ctx, void (*callback) (epoch_context_t)); int in_epoch(epoch_t epoch); int in_epoch_verbose(epoch_t epoch, int dump_onfail); -#ifdef _KERNEL DPCPU_DECLARE(int, epoch_cb_count); DPCPU_DECLARE(struct grouptask, epoch_cb_task); #define EPOCH_MAGIC0 0xFADECAFEF00DD00D #define EPOCH_MAGIC1 0xBADDBABEDEEDFEED -void epoch_enter_preempt_KBI(epoch_t epoch, epoch_tracker_t et); -void epoch_exit_preempt_KBI(epoch_t epoch, epoch_tracker_t et); -void epoch_enter_KBI(epoch_t epoch); -void epoch_exit_KBI(epoch_t epoch); +void epoch_enter_preempt(epoch_t epoch, epoch_tracker_t et); +void epoch_exit_preempt(epoch_t epoch, epoch_tracker_t et); +void epoch_enter(epoch_t epoch); +void epoch_exit(epoch_t epoch); - -#if defined(KLD_MODULE) && !defined(KLD_TIED) -#define epoch_enter_preempt(e, t) epoch_enter_preempt_KBI((e), (t)) -#define epoch_exit_preempt(e, t) epoch_exit_preempt_KBI((e), (t)) -#define epoch_enter(e) epoch_enter_KBI((e)) -#define epoch_exit(e) epoch_exit_KBI((e)) -#else -#include -#endif /* KLD_MODULE */ - -#endif /* _KERNEL */ -#endif +#endif /* _KERNEL */ +#endif /* _SYS_EPOCH_H_ */ Index: stable/12/sys/sys/param.h =================================================================== --- stable/12/sys/sys/param.h (revision 349762) +++ stable/12/sys/sys/param.h (revision 349763) @@ -1,365 +1,365 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here in the doc/ repository: * * head/en_US.ISO8859-1/books/porters-handbook/versions/chapter.xml * * scheme is: Rxx * 'R' is in the range 0 to 4 if this is a release branch or * X.0-CURRENT before releng/X.0 is created, otherwise 'R' is * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1200512 /* Master, propagated to newvers */ +#define __FreeBSD_version 1200513 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, * which by definition is always true on FreeBSD. This macro is also defined * on other systems that use the kernel of FreeBSD, such as GNU/kFreeBSD. * * It is tempting to use this macro in userland code when we want to enable * kernel-specific routines, and in fact it's fine to do this in code that * is part of FreeBSD itself. However, be aware that as presence of this * macro is still not widespread (e.g. older FreeBSD versions, 3rd party * compilers, etc), it is STRONGLY DISCOURAGED to check for this macro in * external applications without also checking for __FreeBSD__ as an * alternative. */ #undef __FreeBSD_kernel__ #define __FreeBSD_kernel__ #if defined(_KERNEL) || defined(IN_RTLD) #define P_OSREL_SIGWAIT 700000 #define P_OSREL_SIGSEGV 700004 #define P_OSREL_MAP_ANON 800104 #define P_OSREL_MAP_FSTRICT 1100036 #define P_OSREL_SHUTDOWN_ENOTCONN 1100077 #define P_OSREL_MAP_GUARD 1200035 #define P_OSREL_WRFSBASE 1200041 #define P_OSREL_CK_CYLGRP 1200046 #define P_OSREL_VMTOTAL64 1200054 #define P_OSREL_MAJOR(x) ((x) / 100000) #endif #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP PATH_MAX /* max interpreter file name length */ #define MAXLOGNAME 33 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS (NGROUPS_MAX+1) /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * MAXBCACHEBUF - Maximum size of a buffer in the buffer cache. This must * be >= MAXBSIZE and can be set differently for different * architectures by defining it in . * Making this larger allows NFS to do larger reads/writes. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * The default value here can be overridden on a per-architecture * basis by defining it in . * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #ifndef MAXBCACHEBUF #define MAXBCACHEBUF MAXBSIZE /* must be a power of 2 >= MAXBSIZE */ #endif #ifndef BKVASIZE #define BKVASIZE 16384 /* must be power of 2 */ #endif #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define nitems(x) (sizeof((x)) / sizeof((x)[0])) #define rounddown(x, y) (((x)/(y))*(y)) #define rounddown2(x, y) ((x)&(~((y)-1))) /* if y is power of two */ #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Old spelling of __containerof(). */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) /* * Access a variable length array that has been declared as a fixed * length array. */ #define __PAST_END(array, offset) (((__typeof__(*(array)) *)(array))[offset]) #endif /* _SYS_PARAM_H_ */ Index: stable/12 =================================================================== --- stable/12 (revision 349762) +++ stable/12 (revision 349763) Property changes on: stable/12 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r340404,340415,340417,340419-340420