Index: head/sys/amd64/include/pcpu.h =================================================================== --- head/sys/amd64/include/pcpu.h (revision 313192) +++ head/sys/amd64/include/pcpu.h (revision 313193) @@ -1,262 +1,252 @@ /*- * Copyright (c) Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #ifndef _SYS_CDEFS_H_ #error "sys/cdefs.h is a prerequisite for this file" #endif /* * The SMP parts are setup in pmap.c and locore.s for the BSP, and * mp_machdep.c sets up the data for the AP's to "see" when they awake. * The reason for doing it via a struct is so that an array of pointers * to each CPU's data can be set up for things like "check curproc on all * other processors" */ #define PCPU_MD_FIELDS \ char pc_monitorbuf[128] __aligned(128); /* cache line */ \ struct pcpu *pc_prvspace; /* Self-reference */ \ struct pmap *pc_curpmap; \ struct amd64tss *pc_tssp; /* TSS segment active on CPU */ \ struct amd64tss *pc_commontssp;/* Common TSS for the CPU */ \ register_t pc_rsp0; \ register_t pc_scratch_rsp; /* User %rsp in syscall */ \ u_int pc_apic_id; \ u_int pc_acpi_id; /* ACPI CPU id */ \ /* Pointer to the CPU %fs descriptor */ \ struct user_segment_descriptor *pc_fs32p; \ /* Pointer to the CPU %gs descriptor */ \ struct user_segment_descriptor *pc_gs32p; \ /* Pointer to the CPU LDT descriptor */ \ struct system_segment_descriptor *pc_ldt; \ /* Pointer to the CPU TSS descriptor */ \ struct system_segment_descriptor *pc_tss; \ uint64_t pc_pm_save_cnt; \ u_int pc_cmci_mask; /* MCx banks for CMCI */ \ uint64_t pc_dbreg[16]; /* ddb debugging regs */ \ int pc_dbreg_cmd; /* ddb debugging reg cmd */ \ u_int pc_vcpu_id; /* Xen vCPU ID */ \ uint32_t pc_pcid_next; \ uint32_t pc_pcid_gen; \ uint32_t pc_smp_tlb_done; /* TLB op acknowledgement */ \ char __pad[145] /* be divisor of PAGE_SIZE \ after cache alignment */ #define PC_DBREG_CMD_NONE 0 #define PC_DBREG_CMD_LOAD 1 #ifdef _KERNEL #ifdef lint extern struct pcpu *pcpup; -#define get_pcpu() (pcpup) #define PCPU_GET(member) (pcpup->pc_ ## member) #define PCPU_ADD(member, val) (pcpup->pc_ ## member += (val)) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&pcpup->pc_ ## member) #define PCPU_SET(member, val) (pcpup->pc_ ## member = (val)) #elif defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) /* * Evaluates to the byte offset of the per-cpu variable name. */ #define __pcpu_offset(name) \ __offsetof(struct pcpu, name) /* * Evaluates to the type of the per-cpu variable name. */ #define __pcpu_type(name) \ __typeof(((struct pcpu *)0)->name) /* * Evaluates to the address of the per-cpu variable name. */ #define __PCPU_PTR(name) __extension__ ({ \ __pcpu_type(name) *__p; \ \ __asm __volatile("movq %%gs:%1,%0; addq %2,%0" \ : "=r" (__p) \ : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace))), \ "i" (__pcpu_offset(name))); \ \ __p; \ }) /* * Evaluates to the value of the per-cpu variable name. */ #define __PCPU_GET(name) __extension__ ({ \ __pcpu_type(name) __res; \ struct __s { \ u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ } __s; \ \ if (sizeof(__res) == 1 || sizeof(__res) == 2 || \ sizeof(__res) == 4 || sizeof(__res) == 8) { \ __asm __volatile("mov %%gs:%1,%0" \ : "=r" (__s) \ : "m" (*(struct __s *)(__pcpu_offset(name)))); \ *(struct __s *)(void *)&__res = __s; \ } else { \ __res = *__PCPU_PTR(name); \ } \ __res; \ }) /* * Adds the value to the per-cpu counter name. The implementation * must be atomic with respect to interrupts. */ #define __PCPU_ADD(name, val) do { \ __pcpu_type(name) __val; \ struct __s { \ u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ } __s; \ \ __val = (val); \ if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ sizeof(__val) == 4 || sizeof(__val) == 8) { \ __s = *(struct __s *)(void *)&__val; \ __asm __volatile("add %1,%%gs:%0" \ : "=m" (*(struct __s *)(__pcpu_offset(name))) \ : "r" (__s)); \ } else \ *__PCPU_PTR(name) += __val; \ } while (0) /* * Increments the value of the per-cpu counter name. The implementation * must be atomic with respect to interrupts. */ #define __PCPU_INC(name) do { \ CTASSERT(sizeof(__pcpu_type(name)) == 1 || \ sizeof(__pcpu_type(name)) == 2 || \ sizeof(__pcpu_type(name)) == 4 || \ sizeof(__pcpu_type(name)) == 8); \ if (sizeof(__pcpu_type(name)) == 1) { \ __asm __volatile("incb %%gs:%0" \ : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ } else if (sizeof(__pcpu_type(name)) == 2) { \ __asm __volatile("incw %%gs:%0" \ : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ } else if (sizeof(__pcpu_type(name)) == 4) { \ __asm __volatile("incl %%gs:%0" \ : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ } else if (sizeof(__pcpu_type(name)) == 8) { \ __asm __volatile("incq %%gs:%0" \ : "=m" (*(__pcpu_type(name) *)(__pcpu_offset(name)))\ : "m" (*(__pcpu_type(name) *)(__pcpu_offset(name))));\ } \ } while (0) /* * Sets the value of the per-cpu variable name to value val. */ #define __PCPU_SET(name, val) { \ __pcpu_type(name) __val; \ struct __s { \ u_char __b[MIN(sizeof(__pcpu_type(name)), 8)]; \ } __s; \ \ __val = (val); \ if (sizeof(__val) == 1 || sizeof(__val) == 2 || \ sizeof(__val) == 4 || sizeof(__val) == 8) { \ __s = *(struct __s *)(void *)&__val; \ __asm __volatile("mov %1,%%gs:%0" \ : "=m" (*(struct __s *)(__pcpu_offset(name))) \ : "r" (__s)); \ } else { \ *__PCPU_PTR(name) = __val; \ } \ } - -#define get_pcpu() __extension__ ({ \ - struct pcpu *__pc; \ - \ - __asm __volatile("movq %%gs:%1,%0" \ - : "=r" (__pc) \ - : "m" (*(struct pcpu *)(__pcpu_offset(pc_prvspace)))); \ - __pc; \ -}) #define PCPU_GET(member) __PCPU_GET(pc_ ## member) #define PCPU_ADD(member, val) __PCPU_ADD(pc_ ## member, val) #define PCPU_INC(member) __PCPU_INC(pc_ ## member) #define PCPU_PTR(member) __PCPU_PTR(pc_ ## member) #define PCPU_SET(member, val) __PCPU_SET(pc_ ## member, val) #define OFFSETOF_CURTHREAD 0 #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wnull-dereference" #endif static __inline __pure2 struct thread * __curthread(void) { struct thread *td; __asm("movq %%gs:%1,%0" : "=r" (td) : "m" (*(char *)OFFSETOF_CURTHREAD)); return (td); } #ifdef __clang__ #pragma clang diagnostic pop #endif #define curthread (__curthread()) #define OFFSETOF_CURPCB 32 static __inline __pure2 struct pcb * __curpcb(void) { struct pcb *pcb; __asm("movq %%gs:%1,%0" : "=r" (pcb) : "m" (*(char *)OFFSETOF_CURPCB)); return (pcb); } #define curpcb (__curpcb()) #define IS_BSP() (PCPU_GET(cpuid) == 0) #else /* !lint || defined(__GNUCLIKE_ASM) && defined(__GNUCLIKE___TYPEOF) */ #error "this file needs to be ported to your compiler" #endif /* lint, etc. */ #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: head/sys/kern/kern_rmlock.c =================================================================== --- head/sys/kern/kern_rmlock.c (revision 313192) +++ head/sys/kern/kern_rmlock.c (revision 313193) @@ -1,860 +1,860 @@ /*- * Copyright (c) 2007 Stephan Uphoff * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Machine independent bits of reader/writer lock implementation. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif /* * A cookie to mark destroyed rmlocks. This is stored in the head of * rm_activeReaders. */ #define RM_DESTROYED ((void *)0xdead) #define rm_destroyed(rm) \ (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED) #define RMPF_ONQUEUE 1 #define RMPF_SIGNAL 2 #ifndef INVARIANTS #define _rm_assert(c, what, file, line) #endif static void assert_rm(const struct lock_object *lock, int what); #ifdef DDB static void db_show_rm(const struct lock_object *lock); #endif static void lock_rm(struct lock_object *lock, uintptr_t how); #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner); #endif static uintptr_t unlock_rm(struct lock_object *lock); struct lock_class lock_class_rm = { .lc_name = "rm", .lc_flags = LC_SLEEPLOCK | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; struct lock_class lock_class_rm_sleepable = { .lc_name = "sleepable rm", .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE, .lc_assert = assert_rm, #ifdef DDB .lc_ddb_show = db_show_rm, #endif .lc_lock = lock_rm, .lc_unlock = unlock_rm, #ifdef KDTRACE_HOOKS .lc_owner = owner_rm, #endif }; static void assert_rm(const struct lock_object *lock, int what) { rm_assert((const struct rmlock *)lock, what); } static void lock_rm(struct lock_object *lock, uintptr_t how) { struct rmlock *rm; struct rm_priotracker *tracker; rm = (struct rmlock *)lock; if (how == 0) rm_wlock(rm); else { tracker = (struct rm_priotracker *)how; rm_rlock(rm, tracker); } } static uintptr_t unlock_rm(struct lock_object *lock) { struct thread *td; struct pcpu *pc; struct rmlock *rm; struct rm_queue *queue; struct rm_priotracker *tracker; uintptr_t how; rm = (struct rmlock *)lock; tracker = NULL; how = 0; rm_assert(rm, RA_LOCKED | RA_NOTRECURSED); if (rm_wowned(rm)) rm_wunlock(rm); else { /* * Find the right rm_priotracker structure for curthread. * The guarantee about its uniqueness is given by the fact * we already asserted the lock wasn't recursively acquired. */ critical_enter(); td = curthread; - pc = get_pcpu(); + pc = pcpu_find(curcpu); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) { how = (uintptr_t)tracker; break; } } KASSERT(tracker != NULL, ("rm_priotracker is non-NULL when lock held in read mode")); critical_exit(); rm_runlock(rm, tracker); } return (how); } #ifdef KDTRACE_HOOKS static int owner_rm(const struct lock_object *lock, struct thread **owner) { const struct rmlock *rm; struct lock_class *lc; rm = (const struct rmlock *)lock; lc = LOCK_CLASS(&rm->rm_wlock_object); return (lc->lc_owner(&rm->rm_wlock_object, owner)); } #endif static struct mtx rm_spinlock; MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN); /* * Add or remove tracker from per-cpu list. * * The per-cpu list can be traversed at any time in forward direction from an * interrupt on the *local* cpu. */ static void inline rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next; /* Initialize all tracker pointers */ tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; next = pc->pc_rm_queue.rmq_next; tracker->rmp_cpuQueue.rmq_next = next; /* rmq_prev is not used during froward traversal. */ next->rmq_prev = &tracker->rmp_cpuQueue; /* Update pointer to first element. */ pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; } /* * Return a count of the number of trackers the thread 'td' already * has on this CPU for the lock 'rm'. */ static int rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm, const struct thread *td) { struct rm_queue *queue; struct rm_priotracker *tracker; int count; count = 0; for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) count++; } return (count); } static void inline rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker) { struct rm_queue *next, *prev; next = tracker->rmp_cpuQueue.rmq_next; prev = tracker->rmp_cpuQueue.rmq_prev; /* Not used during forward traversal. */ next->rmq_prev = prev; /* Remove from list. */ prev->rmq_next = next; } static void rm_cleanIPI(void *arg) { struct pcpu *pc; struct rmlock *rm = arg; struct rm_priotracker *tracker; struct rm_queue *queue; - pc = get_pcpu(); + pc = pcpu_find(curcpu); for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tracker = (struct rm_priotracker *)queue; if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { tracker->rmp_flags = RMPF_ONQUEUE; mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); mtx_unlock_spin(&rm_spinlock); } } } void rm_init_flags(struct rmlock *rm, const char *name, int opts) { struct lock_class *lc; int liflags, xflags; liflags = 0; if (!(opts & RM_NOWITNESS)) liflags |= LO_WITNESS; if (opts & RM_RECURSE) liflags |= LO_RECURSABLE; if (opts & RM_NEW) liflags |= LO_NEW; rm->rm_writecpus = all_cpus; LIST_INIT(&rm->rm_activeReaders); if (opts & RM_SLEEPABLE) { liflags |= LO_SLEEPABLE; lc = &lock_class_rm_sleepable; xflags = (opts & RM_NEW ? SX_NEW : 0); sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", xflags | SX_NOWITNESS); } else { lc = &lock_class_rm; xflags = (opts & RM_NEW ? MTX_NEW : 0); mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", xflags | MTX_NOWITNESS); } lock_init(&rm->lock_object, lc, name, NULL, liflags); } void rm_init(struct rmlock *rm, const char *name) { rm_init_flags(rm, name, 0); } void rm_destroy(struct rmlock *rm) { rm_assert(rm, RA_UNLOCKED); LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_destroy(&rm->rm_lock_sx); else mtx_destroy(&rm->rm_lock_mtx); lock_destroy(&rm->lock_object); } int rm_wowned(const struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) return (sx_xlocked(&rm->rm_lock_sx)); else return (mtx_owned(&rm->rm_lock_mtx)); } void rm_sysinit(void *arg) { struct rm_args *args = arg; rm_init(args->ra_rm, args->ra_desc); } void rm_sysinit_flags(void *arg) { struct rm_args_flags *args = arg; rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts); } static int _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct pcpu *pc; critical_enter(); - pc = get_pcpu(); + pc = pcpu_find(curcpu); /* Check if we just need to do a proper critical_exit. */ if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { critical_exit(); return (1); } /* Remove our tracker from the per-cpu list. */ rm_tracker_remove(pc, tracker); /* Check to see if the IPI granted us the lock after all. */ if (tracker->rmp_flags) { /* Just add back tracker - we hold the lock. */ rm_tracker_add(pc, tracker); critical_exit(); return (1); } /* * We allow readers to acquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. */ if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { /* * Just grant the lock if this thread already has a tracker * for this lock on the per-cpu queue. */ if (rm_trackers_present(pc, rm, curthread) != 0) { mtx_lock_spin(&rm_spinlock); LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, rmp_qentry); tracker->rmp_flags = RMPF_ONQUEUE; mtx_unlock_spin(&rm_spinlock); rm_tracker_add(pc, tracker); critical_exit(); return (1); } } sched_unpin(); critical_exit(); if (trylock) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { if (!sx_try_xlock(&rm->rm_lock_sx)) return (0); } else { if (!mtx_trylock(&rm->rm_lock_mtx)) return (0); } } else { if (rm->lock_object.lo_flags & LO_SLEEPABLE) { THREAD_SLEEPING_OK(); sx_xlock(&rm->rm_lock_sx); THREAD_NO_SLEEPING(); } else mtx_lock(&rm->rm_lock_mtx); } critical_enter(); - pc = get_pcpu(); + pc = pcpu_find(curcpu); CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); rm_tracker_add(pc, tracker); sched_pin(); critical_exit(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); return (1); } int _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) { struct thread *td = curthread; struct pcpu *pc; if (SCHEDULER_STOPPED()) return (1); tracker->rmp_flags = 0; tracker->rmp_thread = td; tracker->rmp_rmlock = rm; if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_NO_SLEEPING(); td->td_critnest++; /* critical_enter(); */ __compiler_membar(); pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_add(pc, tracker); sched_pin(); __compiler_membar(); td->td_critnest--; /* * Fast path to combine two common conditions into a single * conditional jump. */ if (0 == (td->td_owepreempt | CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))) return (1); /* We do not have a read token and need to acquire one. */ return _rm_rlock_hard(rm, tracker, trylock); } static void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { if (td->td_owepreempt) { td->td_critnest++; critical_exit(); } if (!tracker->rmp_flags) return; mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; rm = tracker->rmp_rmlock; turnstile_chain_lock(&rm->lock_object); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&rm->lock_object); } else mtx_unlock_spin(&rm_spinlock); } void _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker) { struct pcpu *pc; struct thread *td = tracker->rmp_thread; if (SCHEDULER_STOPPED()) return; td->td_critnest++; /* critical_enter(); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); td->td_critnest--; sched_unpin(); if (rm->lock_object.lo_flags & LO_SLEEPABLE) THREAD_SLEEPING_OK(); if (0 == (td->td_owepreempt | tracker->rmp_flags)) return; _rm_unlock_hard(td, tracker); } void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; cpuset_t readcpus; if (SCHEDULER_STOPPED()) return; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ readcpus = all_cpus; CPU_NAND(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP smp_rendezvous_cpus(readcpus, smp_no_rendevous_barrier, rm_cleanIPI, smp_no_rendevous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } } void _rm_wunlock(struct rmlock *rm) { if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xunlock(&rm->rm_lock_sx); else mtx_unlock(&rm->rm_lock_mtx); } #if LOCK_DEBUG > 0 void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_wlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_UNLOCKED, file, line); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); _rm_wlock(rm); LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_INC(curthread); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_WLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_wunlock(rm); TD_LOCKS_DEC(curthread); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { if (SCHEDULER_STOPPED()) return (1); #ifdef INVARIANTS if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { critical_enter(); - KASSERT(rm_trackers_present(get_pcpu(), rm, + KASSERT(rm_trackers_present(pcpu_find(curcpu), rm, curthread) == 0, ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n", rm->lock_object.lo_name, file, line)); critical_exit(); } #endif KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d", curthread, rm->lock_object.lo_name, file, line)); KASSERT(!rm_destroyed(rm), ("rm_rlock() of destroyed rmlock @ %s:%d", file, line)); if (!trylock) { KASSERT(!rm_wowned(rm), ("rm_rlock: wlock already held for %s @ %s:%d", rm->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line, NULL); } if (_rm_rlock(rm, tracker, trylock)) { if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, line); else LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, line); WITNESS_LOCK(&rm->lock_object, 0, file, line); TD_LOCKS_INC(curthread); return (1); } else if (trylock) LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); return (0); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { if (SCHEDULER_STOPPED()) return; KASSERT(!rm_destroyed(rm), ("rm_runlock() of destroyed rmlock @ %s:%d", file, line)); _rm_assert(rm, RA_RLOCKED, file, line); WITNESS_UNLOCK(&rm->lock_object, 0, file, line); LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); _rm_runlock(rm, tracker); TD_LOCKS_DEC(curthread); } #else /* * Just strip out file and line arguments if no lock debugging is enabled in * the kernel - we are called from a kernel module. */ void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wlock(rm); } void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { _rm_wunlock(rm); } int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { return _rm_rlock(rm, tracker, trylock); } void _rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, const char *file, int line) { _rm_runlock(rm, tracker); } #endif #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS #undef _rm_assert #endif /* * Note that this does not need to use witness_assert() for read lock * assertions since an exact count of read locks held by this thread * is computable. */ void _rm_assert(const struct rmlock *rm, int what, const char *file, int line) { int count; if (panicstr != NULL) return; switch (what) { case RA_LOCKED: case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_NOTRECURSED: case RA_RLOCKED: case RA_RLOCKED | RA_RECURSED: case RA_RLOCKED | RA_NOTRECURSED: /* * Handle the write-locked case. Unlike other * primitives, writers can never recurse. */ if (rm_wowned(rm)) { if (what & RA_RLOCKED) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; } critical_enter(); - count = rm_trackers_present(get_pcpu(), rm, curthread); + count = rm_trackers_present(pcpu_find(curcpu), rm, curthread); critical_exit(); if (count == 0) panic("Lock %s not %slocked @ %s:%d\n", rm->lock_object.lo_name, (what & RA_RLOCKED) ? "read " : "", file, line); if (count > 1) { if (what & RA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); } else if (what & RA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_WLOCKED: if (!rm_wowned(rm)) panic("Lock %s not exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; case RA_UNLOCKED: if (rm_wowned(rm)) panic("Lock %s exclusively locked @ %s:%d\n", rm->lock_object.lo_name, file, line); critical_enter(); - count = rm_trackers_present(get_pcpu(), rm, curthread); + count = rm_trackers_present(pcpu_find(curcpu), rm, curthread); critical_exit(); if (count != 0) panic("Lock %s read locked @ %s:%d\n", rm->lock_object.lo_name, file, line); break; default: panic("Unknown rm lock assertion: %d @ %s:%d", what, file, line); } } #endif /* INVARIANT_SUPPORT */ #ifdef DDB static void print_tracker(struct rm_priotracker *tr) { struct thread *td; td = tr->rmp_thread; db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, td->td_proc->p_pid, td->td_name); if (tr->rmp_flags & RMPF_ONQUEUE) { db_printf("ONQUEUE"); if (tr->rmp_flags & RMPF_SIGNAL) db_printf(",SIGNAL"); } else db_printf("0"); db_printf("}\n"); } static void db_show_rm(const struct lock_object *lock) { struct rm_priotracker *tr; struct rm_queue *queue; const struct rmlock *rm; struct lock_class *lc; struct pcpu *pc; rm = (const struct rmlock *)lock; db_printf(" writecpus: "); ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); db_printf("\n"); db_printf(" per-CPU readers:\n"); STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; queue = queue->rmq_next) { tr = (struct rm_priotracker *)queue; if (tr->rmp_rmlock == rm) print_tracker(tr); } db_printf(" active readers:\n"); LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) print_tracker(tr); lc = LOCK_CLASS(&rm->rm_wlock_object); db_printf("Backing write-lock (%s):\n", lc->lc_name); lc->lc_ddb_show(&rm->rm_wlock_object); } #endif Index: head/sys/mips/include/pcpu.h =================================================================== --- head/sys/mips/include/pcpu.h (revision 313192) +++ head/sys/mips/include/pcpu.h (revision 313193) @@ -1,85 +1,84 @@ /*- * Copyright (c) 1999 Luoqi Chen * Copyright (c) Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: src/sys/alpha/include/pcpu.h,v 1.15 2004/11/05 19:16:44 jhb * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #include #include #define PCPU_MD_COMMON_FIELDS \ pd_entry_t *pc_segbase; /* curthread segbase */ \ struct pmap *pc_curpmap; /* pmap of curthread */ \ u_int32_t pc_next_asid; /* next ASID to alloc */ \ u_int32_t pc_asid_generation; /* current ASID generation */ \ u_int pc_pending_ipis; /* IPIs pending to this CPU */ #ifdef __mips_n64 #define PCPU_MD_MIPS64_FIELDS \ PCPU_MD_COMMON_FIELDS \ char __pad[61] #else #define PCPU_MD_MIPS32_FIELDS \ PCPU_MD_COMMON_FIELDS \ char __pad[193] #endif #ifdef __mips_n64 #define PCPU_MD_FIELDS PCPU_MD_MIPS64_FIELDS #else #define PCPU_MD_FIELDS PCPU_MD_MIPS32_FIELDS #endif #ifdef _KERNEL extern char pcpu_space[MAXCPU][PAGE_SIZE * 2]; #define PCPU_ADDR(cpu) (struct pcpu *)(pcpu_space[(cpu)]) extern struct pcpu *pcpup; #define PCPUP pcpup -#define get_pcpu() (pcpup) #define PCPU_ADD(member, value) (PCPUP->pc_ ## member += (value)) #define PCPU_GET(member) (PCPUP->pc_ ## member) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&PCPUP->pc_ ## member) #define PCPU_SET(member,value) (PCPUP->pc_ ## member = (value)) #define PCPU_LAZY_INC(member) (++PCPUP->pc_ ## member) #ifdef SMP /* * Instantiate the wired TLB entry at PCPU_TLB_ENTRY to map 'pcpu' at 'pcpup'. */ void mips_pcpu_tlb_init(struct pcpu *pcpu); #endif #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: head/sys/net/netisr.c =================================================================== --- head/sys/net/netisr.c (revision 313192) +++ head/sys/net/netisr.c (revision 313193) @@ -1,1531 +1,1532 @@ /*- * Copyright (c) 2007-2009 Robert N. M. Watson * Copyright (c) 2010-2011 Juniper Networks, Inc. * All rights reserved. * * This software was developed by Robert N. M. Watson under contract * to Juniper Networks, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * netisr is a packet dispatch service, allowing synchronous (directly * dispatched) and asynchronous (deferred dispatch) processing of packets by * registered protocol handlers. Callers pass a protocol identifier and * packet to netisr, along with a direct dispatch hint, and work will either * be immediately processed by the registered handler, or passed to a * software interrupt (SWI) thread for deferred dispatch. Callers will * generally select one or the other based on: * * - Whether directly dispatching a netisr handler lead to code reentrance or * lock recursion, such as entering the socket code from the socket code. * - Whether directly dispatching a netisr handler lead to recursive * processing, such as when decapsulating several wrapped layers of tunnel * information (IPSEC within IPSEC within ...). * * Maintaining ordering for protocol streams is a critical design concern. * Enforcing ordering limits the opportunity for concurrency, but maintains * the strong ordering requirements found in some protocols, such as TCP. Of * related concern is CPU affinity--it is desirable to process all data * associated with a particular stream on the same CPU over time in order to * avoid acquiring locks associated with the connection on different CPUs, * keep connection data in one cache, and to generally encourage associated * user threads to live on the same CPU as the stream. It's also desirable * to avoid lock migration and contention where locks are associated with * more than one flow. * * netisr supports several policy variations, represented by the * NETISR_POLICY_* constants, allowing protocols to play various roles in * identifying flows, assigning work to CPUs, etc. These are described in * netisr.h. */ #include "opt_ddb.h" #include "opt_device_polling.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DDB #include #endif #define _WANT_NETISR_INTERNAL /* Enable definitions from netisr_internal.h */ #include #include #include #include #include /*- * Synchronize use and modification of the registered netisr data structures; * acquire a read lock while modifying the set of registered protocols to * prevent partially registered or unregistered protocols from being run. * * The following data structures and fields are protected by this lock: * * - The netisr_proto array, including all fields of struct netisr_proto. * - The nws array, including all fields of struct netisr_worker. * - The nws_array array. * * Note: the NETISR_LOCKING define controls whether read locks are acquired * in packet processing paths requiring netisr registration stability. This * is disabled by default as it can lead to measurable performance * degradation even with rmlocks (3%-6% for loopback ping-pong traffic), and * because netisr registration and unregistration is extremely rare at * runtime. If it becomes more common, this decision should be revisited. * * XXXRW: rmlocks don't support assertions. */ static struct rmlock netisr_rmlock; #define NETISR_LOCK_INIT() rm_init_flags(&netisr_rmlock, "netisr", \ RM_NOWITNESS) #define NETISR_LOCK_ASSERT() #define NETISR_RLOCK(tracker) rm_rlock(&netisr_rmlock, (tracker)) #define NETISR_RUNLOCK(tracker) rm_runlock(&netisr_rmlock, (tracker)) #define NETISR_WLOCK() rm_wlock(&netisr_rmlock) #define NETISR_WUNLOCK() rm_wunlock(&netisr_rmlock) /* #define NETISR_LOCKING */ static SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr"); /*- * Three global direct dispatch policies are supported: * * NETISR_DISPATCH_DEFERRED: All work is deferred for a netisr, regardless of * context (may be overriden by protocols). * * NETISR_DISPATCH_HYBRID: If the executing context allows direct dispatch, * and we're running on the CPU the work would be performed on, then direct * dispatch it if it wouldn't violate ordering constraints on the workstream. * * NETISR_DISPATCH_DIRECT: If the executing context allows direct dispatch, * always direct dispatch. (The default.) * * Notice that changing the global policy could lead to short periods of * misordered processing, but this is considered acceptable as compared to * the complexity of enforcing ordering during policy changes. Protocols can * override the global policy (when they're not doing that, they select * NETISR_DISPATCH_DEFAULT). */ #define NETISR_DISPATCH_POLICY_DEFAULT NETISR_DISPATCH_DIRECT #define NETISR_DISPATCH_POLICY_MAXSTR 20 /* Used for temporary buffers. */ static u_int netisr_dispatch_policy = NETISR_DISPATCH_POLICY_DEFAULT; static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_net_isr, OID_AUTO, dispatch, CTLTYPE_STRING | CTLFLAG_RWTUN, 0, 0, sysctl_netisr_dispatch_policy, "A", "netisr dispatch policy"); /* * Allow the administrator to limit the number of threads (CPUs) to use for * netisr. We don't check netisr_maxthreads before creating the thread for * CPU 0. This must be set at boot. We will create at most one thread per CPU. * By default we initialize this to 1 which would assign just 1 cpu (cpu0) and * therefore only 1 workstream. If set to -1, netisr would use all cpus * (mp_ncpus) and therefore would have those many workstreams. One workstream * per thread (CPU). */ static int netisr_maxthreads = 1; /* Max number of threads. */ SYSCTL_INT(_net_isr, OID_AUTO, maxthreads, CTLFLAG_RDTUN, &netisr_maxthreads, 0, "Use at most this many CPUs for netisr processing"); static int netisr_bindthreads = 0; /* Bind threads to CPUs. */ SYSCTL_INT(_net_isr, OID_AUTO, bindthreads, CTLFLAG_RDTUN, &netisr_bindthreads, 0, "Bind netisr threads to CPUs."); /* * Limit per-workstream mbuf queue limits s to at most net.isr.maxqlimit, * both for initial configuration and later modification using * netisr_setqlimit(). */ #define NETISR_DEFAULT_MAXQLIMIT 10240 static u_int netisr_maxqlimit = NETISR_DEFAULT_MAXQLIMIT; SYSCTL_UINT(_net_isr, OID_AUTO, maxqlimit, CTLFLAG_RDTUN, &netisr_maxqlimit, 0, "Maximum netisr per-protocol, per-CPU queue depth."); /* * The default per-workstream mbuf queue limit for protocols that don't * initialize the nh_qlimit field of their struct netisr_handler. If this is * set above netisr_maxqlimit, we truncate it to the maximum during boot. */ #define NETISR_DEFAULT_DEFAULTQLIMIT 256 static u_int netisr_defaultqlimit = NETISR_DEFAULT_DEFAULTQLIMIT; SYSCTL_UINT(_net_isr, OID_AUTO, defaultqlimit, CTLFLAG_RDTUN, &netisr_defaultqlimit, 0, "Default netisr per-protocol, per-CPU queue limit if not set by protocol"); /* * Store and export the compile-time constant NETISR_MAXPROT limit on the * number of protocols that can register with netisr at a time. This is * required for crashdump analysis, as it sizes netisr_proto[]. */ static u_int netisr_maxprot = NETISR_MAXPROT; SYSCTL_UINT(_net_isr, OID_AUTO, maxprot, CTLFLAG_RD, &netisr_maxprot, 0, "Compile-time limit on the number of protocols supported by netisr."); /* * The netisr_proto array describes all registered protocols, indexed by * protocol number. See netisr_internal.h for more details. */ static struct netisr_proto netisr_proto[NETISR_MAXPROT]; #ifdef VIMAGE /* * The netisr_enable array describes a per-VNET flag for registered * protocols on whether this netisr is active in this VNET or not. * netisr_register() will automatically enable the netisr for the * default VNET and all currently active instances. * netisr_unregister() will disable all active VNETs, including vnet0. * Individual network stack instances can be enabled/disabled by the * netisr_(un)register _vnet() functions. * With this we keep the one netisr_proto per protocol but add a * mechanism to stop netisr processing for vnet teardown. * Apart from that we expect a VNET to always be enabled. */ static VNET_DEFINE(u_int, netisr_enable[NETISR_MAXPROT]); #define V_netisr_enable VNET(netisr_enable) #endif /* * Per-CPU workstream data. See netisr_internal.h for more details. */ DPCPU_DEFINE(struct netisr_workstream, nws); /* * Map contiguous values between 0 and nws_count into CPU IDs appropriate for * accessing workstreams. This allows constructions of the form * DPCPU_ID_GET(nws_array[arbitraryvalue % nws_count], nws). */ static u_int nws_array[MAXCPU]; /* * Number of registered workstreams. Will be at most the number of running * CPUs once fully started. */ static u_int nws_count; SYSCTL_UINT(_net_isr, OID_AUTO, numthreads, CTLFLAG_RD, &nws_count, 0, "Number of extant netisr threads."); /* * Synchronization for each workstream: a mutex protects all mutable fields * in each stream, including per-protocol state (mbuf queues). The SWI is * woken up if asynchronous dispatch is required. */ #define NWS_LOCK(s) mtx_lock(&(s)->nws_mtx) #define NWS_LOCK_ASSERT(s) mtx_assert(&(s)->nws_mtx, MA_OWNED) #define NWS_UNLOCK(s) mtx_unlock(&(s)->nws_mtx) #define NWS_SIGNAL(s) swi_sched((s)->nws_swi_cookie, 0) /* * Utility routines for protocols that implement their own mapping of flows * to CPUs. */ u_int netisr_get_cpucount(void) { return (nws_count); } u_int netisr_get_cpuid(u_int cpunumber) { return (nws_array[cpunumber % nws_count]); } /* * The default implementation of flow -> CPU ID mapping. * * Non-static so that protocols can use it to map their own work to specific * CPUs in a manner consistent to netisr for affinity purposes. */ u_int netisr_default_flow2cpu(u_int flowid) { return (nws_array[flowid % nws_count]); } /* * Dispatch tunable and sysctl configuration. */ struct netisr_dispatch_table_entry { u_int ndte_policy; const char *ndte_policy_str; }; static const struct netisr_dispatch_table_entry netisr_dispatch_table[] = { { NETISR_DISPATCH_DEFAULT, "default" }, { NETISR_DISPATCH_DEFERRED, "deferred" }, { NETISR_DISPATCH_HYBRID, "hybrid" }, { NETISR_DISPATCH_DIRECT, "direct" }, }; static void netisr_dispatch_policy_to_str(u_int dispatch_policy, char *buffer, u_int buflen) { const struct netisr_dispatch_table_entry *ndtep; const char *str; u_int i; str = "unknown"; for (i = 0; i < nitems(netisr_dispatch_table); i++) { ndtep = &netisr_dispatch_table[i]; if (ndtep->ndte_policy == dispatch_policy) { str = ndtep->ndte_policy_str; break; } } snprintf(buffer, buflen, "%s", str); } static int netisr_dispatch_policy_from_str(const char *str, u_int *dispatch_policyp) { const struct netisr_dispatch_table_entry *ndtep; u_int i; for (i = 0; i < nitems(netisr_dispatch_table); i++) { ndtep = &netisr_dispatch_table[i]; if (strcmp(ndtep->ndte_policy_str, str) == 0) { *dispatch_policyp = ndtep->ndte_policy; return (0); } } return (EINVAL); } static int sysctl_netisr_dispatch_policy(SYSCTL_HANDLER_ARGS) { char tmp[NETISR_DISPATCH_POLICY_MAXSTR]; u_int dispatch_policy; int error; netisr_dispatch_policy_to_str(netisr_dispatch_policy, tmp, sizeof(tmp)); error = sysctl_handle_string(oidp, tmp, sizeof(tmp), req); if (error == 0 && req->newptr != NULL) { error = netisr_dispatch_policy_from_str(tmp, &dispatch_policy); if (error == 0 && dispatch_policy == NETISR_DISPATCH_DEFAULT) error = EINVAL; if (error == 0) netisr_dispatch_policy = dispatch_policy; } return (error); } /* * Register a new netisr handler, which requires initializing per-protocol * fields for each workstream. All netisr work is briefly suspended while * the protocol is installed. */ void netisr_register(const struct netisr_handler *nhp) { VNET_ITERATOR_DECL(vnet_iter); struct netisr_work *npwp; const char *name; u_int i, proto; proto = nhp->nh_proto; name = nhp->nh_name; /* * Test that the requested registration is valid. */ KASSERT(nhp->nh_name != NULL, ("%s: nh_name NULL for %u", __func__, proto)); KASSERT(nhp->nh_handler != NULL, ("%s: nh_handler NULL for %s", __func__, name)); KASSERT(nhp->nh_policy == NETISR_POLICY_SOURCE || nhp->nh_policy == NETISR_POLICY_FLOW || nhp->nh_policy == NETISR_POLICY_CPU, ("%s: unsupported nh_policy %u for %s", __func__, nhp->nh_policy, name)); KASSERT(nhp->nh_policy == NETISR_POLICY_FLOW || nhp->nh_m2flow == NULL, ("%s: nh_policy != FLOW but m2flow defined for %s", __func__, name)); KASSERT(nhp->nh_policy == NETISR_POLICY_CPU || nhp->nh_m2cpuid == NULL, ("%s: nh_policy != CPU but m2cpuid defined for %s", __func__, name)); KASSERT(nhp->nh_policy != NETISR_POLICY_CPU || nhp->nh_m2cpuid != NULL, ("%s: nh_policy == CPU but m2cpuid not defined for %s", __func__, name)); KASSERT(nhp->nh_dispatch == NETISR_DISPATCH_DEFAULT || nhp->nh_dispatch == NETISR_DISPATCH_DEFERRED || nhp->nh_dispatch == NETISR_DISPATCH_HYBRID || nhp->nh_dispatch == NETISR_DISPATCH_DIRECT, ("%s: invalid nh_dispatch (%u)", __func__, nhp->nh_dispatch)); KASSERT(proto < NETISR_MAXPROT, ("%s(%u, %s): protocol too big", __func__, proto, name)); /* * Test that no existing registration exists for this protocol. */ NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_name == NULL, ("%s(%u, %s): name present", __func__, proto, name)); KASSERT(netisr_proto[proto].np_handler == NULL, ("%s(%u, %s): handler present", __func__, proto, name)); netisr_proto[proto].np_name = name; netisr_proto[proto].np_handler = nhp->nh_handler; netisr_proto[proto].np_m2flow = nhp->nh_m2flow; netisr_proto[proto].np_m2cpuid = nhp->nh_m2cpuid; netisr_proto[proto].np_drainedcpu = nhp->nh_drainedcpu; if (nhp->nh_qlimit == 0) netisr_proto[proto].np_qlimit = netisr_defaultqlimit; else if (nhp->nh_qlimit > netisr_maxqlimit) { printf("%s: %s requested queue limit %u capped to " "net.isr.maxqlimit %u\n", __func__, name, nhp->nh_qlimit, netisr_maxqlimit); netisr_proto[proto].np_qlimit = netisr_maxqlimit; } else netisr_proto[proto].np_qlimit = nhp->nh_qlimit; netisr_proto[proto].np_policy = nhp->nh_policy; netisr_proto[proto].np_dispatch = nhp->nh_dispatch; CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; bzero(npwp, sizeof(*npwp)); npwp->nw_qlimit = netisr_proto[proto].np_qlimit; } #ifdef VIMAGE /* * Test that we are in vnet0 and have a curvnet set. */ KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); KASSERT(IS_DEFAULT_VNET(curvnet), ("%s: curvnet %p is not vnet0 %p", __func__, curvnet, vnet0)); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); V_netisr_enable[proto] = 1; CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); #endif NETISR_WUNLOCK(); } /* * Clear drop counters across all workstreams for a protocol. */ void netisr_clearqdrops(const struct netisr_handler *nhp) { struct netisr_work *npwp; #ifdef INVARIANTS const char *name; #endif u_int i, proto; proto = nhp->nh_proto; #ifdef INVARIANTS name = nhp->nh_name; #endif KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, name)); NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, name)); CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; npwp->nw_qdrops = 0; } NETISR_WUNLOCK(); } /* * Query current drop counters across all workstreams for a protocol. */ void netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) { struct netisr_work *npwp; struct rm_priotracker tracker; #ifdef INVARIANTS const char *name; #endif u_int i, proto; *qdropp = 0; proto = nhp->nh_proto; #ifdef INVARIANTS name = nhp->nh_name; #endif KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, name)); NETISR_RLOCK(&tracker); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, name)); CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; *qdropp += npwp->nw_qdrops; } NETISR_RUNLOCK(&tracker); } /* * Query current per-workstream queue limit for a protocol. */ void netisr_getqlimit(const struct netisr_handler *nhp, u_int *qlimitp) { struct rm_priotracker tracker; #ifdef INVARIANTS const char *name; #endif u_int proto; proto = nhp->nh_proto; #ifdef INVARIANTS name = nhp->nh_name; #endif KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, name)); NETISR_RLOCK(&tracker); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, name)); *qlimitp = netisr_proto[proto].np_qlimit; NETISR_RUNLOCK(&tracker); } /* * Update the queue limit across per-workstream queues for a protocol. We * simply change the limits, and don't drain overflowed packets as they will * (hopefully) take care of themselves shortly. */ int netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) { struct netisr_work *npwp; #ifdef INVARIANTS const char *name; #endif u_int i, proto; if (qlimit > netisr_maxqlimit) return (EINVAL); proto = nhp->nh_proto; #ifdef INVARIANTS name = nhp->nh_name; #endif KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, name)); NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, name)); netisr_proto[proto].np_qlimit = qlimit; CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; npwp->nw_qlimit = qlimit; } NETISR_WUNLOCK(); return (0); } /* * Drain all packets currently held in a particular protocol work queue. */ static void netisr_drain_proto(struct netisr_work *npwp) { struct mbuf *m; /* * We would assert the lock on the workstream but it's not passed in. */ while ((m = npwp->nw_head) != NULL) { npwp->nw_head = m->m_nextpkt; m->m_nextpkt = NULL; if (npwp->nw_head == NULL) npwp->nw_tail = NULL; npwp->nw_len--; m_freem(m); } KASSERT(npwp->nw_tail == NULL, ("%s: tail", __func__)); KASSERT(npwp->nw_len == 0, ("%s: len", __func__)); } /* * Remove the registration of a network protocol, which requires clearing * per-protocol fields across all workstreams, including freeing all mbufs in * the queues at time of unregister. All work in netisr is briefly suspended * while this takes place. */ void netisr_unregister(const struct netisr_handler *nhp) { VNET_ITERATOR_DECL(vnet_iter); struct netisr_work *npwp; #ifdef INVARIANTS const char *name; #endif u_int i, proto; proto = nhp->nh_proto; #ifdef INVARIANTS name = nhp->nh_name; #endif KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, name)); NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, name)); #ifdef VIMAGE VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); V_netisr_enable[proto] = 0; CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); #endif netisr_proto[proto].np_name = NULL; netisr_proto[proto].np_handler = NULL; netisr_proto[proto].np_m2flow = NULL; netisr_proto[proto].np_m2cpuid = NULL; netisr_proto[proto].np_qlimit = 0; netisr_proto[proto].np_policy = 0; CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; netisr_drain_proto(npwp); bzero(npwp, sizeof(*npwp)); } NETISR_WUNLOCK(); } #ifdef VIMAGE void netisr_register_vnet(const struct netisr_handler *nhp) { u_int proto; proto = nhp->nh_proto; KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, nhp->nh_name)); V_netisr_enable[proto] = 1; NETISR_WUNLOCK(); } static void netisr_drain_proto_vnet(struct vnet *vnet, u_int proto) { struct netisr_workstream *nwsp; struct netisr_work *npwp; struct mbuf *m, *mp, *n, *ne; u_int i; KASSERT(vnet != NULL, ("%s: vnet is NULL", __func__)); NETISR_LOCK_ASSERT(); CPU_FOREACH(i) { nwsp = DPCPU_ID_PTR(i, nws); if (nwsp->nws_intr_event == NULL) continue; npwp = &nwsp->nws_work[proto]; NWS_LOCK(nwsp); /* * Rather than dissecting and removing mbufs from the middle * of the chain, we build a new chain if the packet stays and * update the head and tail pointers at the end. All packets * matching the given vnet are freed. */ m = npwp->nw_head; n = ne = NULL; while (m != NULL) { mp = m; m = m->m_nextpkt; mp->m_nextpkt = NULL; if (mp->m_pkthdr.rcvif->if_vnet != vnet) { if (n == NULL) { n = ne = mp; } else { ne->m_nextpkt = mp; ne = mp; } continue; } /* This is a packet in the selected vnet. Free it. */ npwp->nw_len--; m_freem(mp); } npwp->nw_head = n; npwp->nw_tail = ne; NWS_UNLOCK(nwsp); } } void netisr_unregister_vnet(const struct netisr_handler *nhp) { u_int proto; proto = nhp->nh_proto; KASSERT(curvnet != NULL, ("%s: curvnet is NULL", __func__)); KASSERT(proto < NETISR_MAXPROT, ("%s(%u): protocol too big for %s", __func__, proto, nhp->nh_name)); NETISR_WLOCK(); KASSERT(netisr_proto[proto].np_handler != NULL, ("%s(%u): protocol not registered for %s", __func__, proto, nhp->nh_name)); V_netisr_enable[proto] = 0; netisr_drain_proto_vnet(curvnet, proto); NETISR_WUNLOCK(); } #endif /* * Compose the global and per-protocol policies on dispatch, and return the * dispatch policy to use. */ static u_int netisr_get_dispatch(struct netisr_proto *npp) { /* * Protocol-specific configuration overrides the global default. */ if (npp->np_dispatch != NETISR_DISPATCH_DEFAULT) return (npp->np_dispatch); return (netisr_dispatch_policy); } /* * Look up the workstream given a packet and source identifier. Do this by * checking the protocol's policy, and optionally call out to the protocol * for assistance if required. */ static struct mbuf * netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy, uintptr_t source, struct mbuf *m, u_int *cpuidp) { struct ifnet *ifp; u_int policy; NETISR_LOCK_ASSERT(); /* * In the event we have only one worker, shortcut and deliver to it * without further ado. */ if (nws_count == 1) { *cpuidp = nws_array[0]; return (m); } /* * What happens next depends on the policy selected by the protocol. * If we want to support per-interface policies, we should do that * here first. */ policy = npp->np_policy; if (policy == NETISR_POLICY_CPU) { m = npp->np_m2cpuid(m, source, cpuidp); if (m == NULL) return (NULL); /* * It's possible for a protocol not to have a good idea about * where to process a packet, in which case we fall back on * the netisr code to decide. In the hybrid case, return the * current CPU ID, which will force an immediate direct * dispatch. In the queued case, fall back on the SOURCE * policy. */ if (*cpuidp != NETISR_CPUID_NONE) { *cpuidp = netisr_get_cpuid(*cpuidp); return (m); } if (dispatch_policy == NETISR_DISPATCH_HYBRID) { *cpuidp = netisr_get_cpuid(curcpu); return (m); } policy = NETISR_POLICY_SOURCE; } if (policy == NETISR_POLICY_FLOW) { if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE && npp->np_m2flow != NULL) { m = npp->np_m2flow(m, source); if (m == NULL) return (NULL); } if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) { *cpuidp = netisr_default_flow2cpu(m->m_pkthdr.flowid); return (m); } policy = NETISR_POLICY_SOURCE; } KASSERT(policy == NETISR_POLICY_SOURCE, ("%s: invalid policy %u for %s", __func__, npp->np_policy, npp->np_name)); ifp = m->m_pkthdr.rcvif; if (ifp != NULL) *cpuidp = nws_array[(ifp->if_index + source) % nws_count]; else *cpuidp = nws_array[source % nws_count]; return (m); } /* * Process packets associated with a workstream and protocol. For reasons of * fairness, we process up to one complete netisr queue at a time, moving the * queue to a stack-local queue for processing, but do not loop refreshing * from the global queue. The caller is responsible for deciding whether to * loop, and for setting the NWS_RUNNING flag. The passed workstream will be * locked on entry and relocked before return, but will be released while * processing. The number of packets processed is returned. */ static u_int netisr_process_workstream_proto(struct netisr_workstream *nwsp, u_int proto) { struct netisr_work local_npw, *npwp; u_int handled; struct mbuf *m; NETISR_LOCK_ASSERT(); NWS_LOCK_ASSERT(nwsp); KASSERT(nwsp->nws_flags & NWS_RUNNING, ("%s(%u): not running", __func__, proto)); KASSERT(proto >= 0 && proto < NETISR_MAXPROT, ("%s(%u): invalid proto\n", __func__, proto)); npwp = &nwsp->nws_work[proto]; if (npwp->nw_len == 0) return (0); /* * Move the global work queue to a thread-local work queue. * * Notice that this means the effective maximum length of the queue * is actually twice that of the maximum queue length specified in * the protocol registration call. */ handled = npwp->nw_len; local_npw = *npwp; npwp->nw_head = NULL; npwp->nw_tail = NULL; npwp->nw_len = 0; nwsp->nws_pendingbits &= ~(1 << proto); NWS_UNLOCK(nwsp); while ((m = local_npw.nw_head) != NULL) { local_npw.nw_head = m->m_nextpkt; m->m_nextpkt = NULL; if (local_npw.nw_head == NULL) local_npw.nw_tail = NULL; local_npw.nw_len--; VNET_ASSERT(m->m_pkthdr.rcvif != NULL, ("%s:%d rcvif == NULL: m=%p", __func__, __LINE__, m)); CURVNET_SET(m->m_pkthdr.rcvif->if_vnet); netisr_proto[proto].np_handler(m); CURVNET_RESTORE(); } KASSERT(local_npw.nw_len == 0, ("%s(%u): len %u", __func__, proto, local_npw.nw_len)); if (netisr_proto[proto].np_drainedcpu) netisr_proto[proto].np_drainedcpu(nwsp->nws_cpu); NWS_LOCK(nwsp); npwp->nw_handled += handled; return (handled); } /* * SWI handler for netisr -- processes packets in a set of workstreams that * it owns, woken up by calls to NWS_SIGNAL(). If this workstream is already * being direct dispatched, go back to sleep and wait for the dispatching * thread to wake us up again. */ static void swi_net(void *arg) { #ifdef NETISR_LOCKING struct rm_priotracker tracker; #endif struct netisr_workstream *nwsp; u_int bits, prot; nwsp = arg; #ifdef DEVICE_POLLING KASSERT(nws_count == 1, ("%s: device_polling but nws_count != 1", __func__)); netisr_poll(); #endif #ifdef NETISR_LOCKING NETISR_RLOCK(&tracker); #endif NWS_LOCK(nwsp); KASSERT(!(nwsp->nws_flags & NWS_RUNNING), ("swi_net: running")); if (nwsp->nws_flags & NWS_DISPATCHING) goto out; nwsp->nws_flags |= NWS_RUNNING; nwsp->nws_flags &= ~NWS_SCHEDULED; while ((bits = nwsp->nws_pendingbits) != 0) { while ((prot = ffs(bits)) != 0) { prot--; bits &= ~(1 << prot); (void)netisr_process_workstream_proto(nwsp, prot); } } nwsp->nws_flags &= ~NWS_RUNNING; out: NWS_UNLOCK(nwsp); #ifdef NETISR_LOCKING NETISR_RUNLOCK(&tracker); #endif #ifdef DEVICE_POLLING netisr_pollmore(); #endif } static int netisr_queue_workstream(struct netisr_workstream *nwsp, u_int proto, struct netisr_work *npwp, struct mbuf *m, int *dosignalp) { NWS_LOCK_ASSERT(nwsp); *dosignalp = 0; if (npwp->nw_len < npwp->nw_qlimit) { m->m_nextpkt = NULL; if (npwp->nw_head == NULL) { npwp->nw_head = m; npwp->nw_tail = m; } else { npwp->nw_tail->m_nextpkt = m; npwp->nw_tail = m; } npwp->nw_len++; if (npwp->nw_len > npwp->nw_watermark) npwp->nw_watermark = npwp->nw_len; /* * We must set the bit regardless of NWS_RUNNING, so that * swi_net() keeps calling netisr_process_workstream_proto(). */ nwsp->nws_pendingbits |= (1 << proto); if (!(nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED))) { nwsp->nws_flags |= NWS_SCHEDULED; *dosignalp = 1; /* Defer until unlocked. */ } npwp->nw_queued++; return (0); } else { m_freem(m); npwp->nw_qdrops++; return (ENOBUFS); } } static int netisr_queue_internal(u_int proto, struct mbuf *m, u_int cpuid) { struct netisr_workstream *nwsp; struct netisr_work *npwp; int dosignal, error; #ifdef NETISR_LOCKING NETISR_LOCK_ASSERT(); #endif KASSERT(cpuid <= mp_maxid, ("%s: cpuid too big (%u, %u)", __func__, cpuid, mp_maxid)); KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); dosignal = 0; error = 0; nwsp = DPCPU_ID_PTR(cpuid, nws); npwp = &nwsp->nws_work[proto]; NWS_LOCK(nwsp); error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); NWS_UNLOCK(nwsp); if (dosignal) NWS_SIGNAL(nwsp); return (error); } int netisr_queue_src(u_int proto, uintptr_t source, struct mbuf *m) { #ifdef NETISR_LOCKING struct rm_priotracker tracker; #endif u_int cpuid; int error; KASSERT(proto < NETISR_MAXPROT, ("%s: invalid proto %u", __func__, proto)); #ifdef NETISR_LOCKING NETISR_RLOCK(&tracker); #endif KASSERT(netisr_proto[proto].np_handler != NULL, ("%s: invalid proto %u", __func__, proto)); #ifdef VIMAGE if (V_netisr_enable[proto] == 0) { m_freem(m); return (ENOPROTOOPT); } #endif m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_DEFERRED, source, m, &cpuid); if (m != NULL) { KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); error = netisr_queue_internal(proto, m, cpuid); } else error = ENOBUFS; #ifdef NETISR_LOCKING NETISR_RUNLOCK(&tracker); #endif return (error); } int netisr_queue(u_int proto, struct mbuf *m) { return (netisr_queue_src(proto, 0, m)); } /* * Dispatch a packet for netisr processing; direct dispatch is permitted by * calling context. */ int netisr_dispatch_src(u_int proto, uintptr_t source, struct mbuf *m) { #ifdef NETISR_LOCKING struct rm_priotracker tracker; #endif struct netisr_workstream *nwsp; struct netisr_proto *npp; struct netisr_work *npwp; int dosignal, error; u_int cpuid, dispatch_policy; KASSERT(proto < NETISR_MAXPROT, ("%s: invalid proto %u", __func__, proto)); #ifdef NETISR_LOCKING NETISR_RLOCK(&tracker); #endif npp = &netisr_proto[proto]; KASSERT(npp->np_handler != NULL, ("%s: invalid proto %u", __func__, proto)); #ifdef VIMAGE if (V_netisr_enable[proto] == 0) { m_freem(m); return (ENOPROTOOPT); } #endif dispatch_policy = netisr_get_dispatch(npp); if (dispatch_policy == NETISR_DISPATCH_DEFERRED) return (netisr_queue_src(proto, source, m)); /* * If direct dispatch is forced, then unconditionally dispatch * without a formal CPU selection. Borrow the current CPU's stats, * even if there's no worker on it. In this case we don't update * nws_flags because all netisr processing will be source ordered due * to always being forced to directly dispatch. */ if (dispatch_policy == NETISR_DISPATCH_DIRECT) { nwsp = DPCPU_PTR(nws); npwp = &nwsp->nws_work[proto]; npwp->nw_dispatched++; npwp->nw_handled++; netisr_proto[proto].np_handler(m); error = 0; goto out_unlock; } KASSERT(dispatch_policy == NETISR_DISPATCH_HYBRID, ("%s: unknown dispatch policy (%u)", __func__, dispatch_policy)); /* * Otherwise, we execute in a hybrid mode where we will try to direct * dispatch if we're on the right CPU and the netisr worker isn't * already running. */ sched_pin(); m = netisr_select_cpuid(&netisr_proto[proto], NETISR_DISPATCH_HYBRID, source, m, &cpuid); if (m == NULL) { error = ENOBUFS; goto out_unpin; } KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); if (cpuid != curcpu) goto queue_fallback; nwsp = DPCPU_PTR(nws); npwp = &nwsp->nws_work[proto]; /*- * We are willing to direct dispatch only if three conditions hold: * * (1) The netisr worker isn't already running, * (2) Another thread isn't already directly dispatching, and * (3) The netisr hasn't already been woken up. */ NWS_LOCK(nwsp); if (nwsp->nws_flags & (NWS_RUNNING | NWS_DISPATCHING | NWS_SCHEDULED)) { error = netisr_queue_workstream(nwsp, proto, npwp, m, &dosignal); NWS_UNLOCK(nwsp); if (dosignal) NWS_SIGNAL(nwsp); goto out_unpin; } /* * The current thread is now effectively the netisr worker, so set * the dispatching flag to prevent concurrent processing of the * stream from another thread (even the netisr worker), which could * otherwise lead to effective misordering of the stream. */ nwsp->nws_flags |= NWS_DISPATCHING; NWS_UNLOCK(nwsp); netisr_proto[proto].np_handler(m); NWS_LOCK(nwsp); nwsp->nws_flags &= ~NWS_DISPATCHING; npwp->nw_handled++; npwp->nw_hybrid_dispatched++; /* * If other work was enqueued by another thread while we were direct * dispatching, we need to signal the netisr worker to do that work. * In the future, we might want to do some of that work in the * current thread, rather than trigger further context switches. If * so, we'll want to establish a reasonable bound on the work done in * the "borrowed" context. */ if (nwsp->nws_pendingbits != 0) { nwsp->nws_flags |= NWS_SCHEDULED; dosignal = 1; } else dosignal = 0; NWS_UNLOCK(nwsp); if (dosignal) NWS_SIGNAL(nwsp); error = 0; goto out_unpin; queue_fallback: error = netisr_queue_internal(proto, m, cpuid); out_unpin: sched_unpin(); out_unlock: #ifdef NETISR_LOCKING NETISR_RUNLOCK(&tracker); #endif return (error); } int netisr_dispatch(u_int proto, struct mbuf *m) { return (netisr_dispatch_src(proto, 0, m)); } #ifdef DEVICE_POLLING /* * Kernel polling borrows a netisr thread to run interface polling in; this * function allows kernel polling to request that the netisr thread be * scheduled even if no packets are pending for protocols. */ void netisr_sched_poll(void) { struct netisr_workstream *nwsp; nwsp = DPCPU_ID_PTR(nws_array[0], nws); NWS_SIGNAL(nwsp); } #endif static void netisr_start_swi(u_int cpuid, struct pcpu *pc) { char swiname[12]; struct netisr_workstream *nwsp; int error; KASSERT(!CPU_ABSENT(cpuid), ("%s: CPU %u absent", __func__, cpuid)); nwsp = DPCPU_ID_PTR(cpuid, nws); mtx_init(&nwsp->nws_mtx, "netisr_mtx", NULL, MTX_DEF); nwsp->nws_cpu = cpuid; snprintf(swiname, sizeof(swiname), "netisr %u", cpuid); error = swi_add(&nwsp->nws_intr_event, swiname, swi_net, nwsp, SWI_NET, INTR_MPSAFE, &nwsp->nws_swi_cookie); if (error) panic("%s: swi_add %d", __func__, error); pc->pc_netisr = nwsp->nws_intr_event; if (netisr_bindthreads) { error = intr_event_bind(nwsp->nws_intr_event, cpuid); if (error != 0) printf("%s: cpu %u: intr_event_bind: %d", __func__, cpuid, error); } NETISR_WLOCK(); nws_array[nws_count] = nwsp->nws_cpu; nws_count++; NETISR_WUNLOCK(); } /* * Initialize the netisr subsystem. We rely on BSS and static initialization * of most fields in global data structures. * * Start a worker thread for the boot CPU so that we can support network * traffic immediately in case the network stack is used before additional * CPUs are started (for example, diskless boot). */ static void netisr_init(void *arg) { +#ifdef EARLY_AP_STARTUP struct pcpu *pc; +#endif NETISR_LOCK_INIT(); if (netisr_maxthreads == 0 || netisr_maxthreads < -1 ) netisr_maxthreads = 1; /* default behavior */ else if (netisr_maxthreads == -1) netisr_maxthreads = mp_ncpus; /* use max cpus */ if (netisr_maxthreads > mp_ncpus) { printf("netisr_init: forcing maxthreads from %d to %d\n", netisr_maxthreads, mp_ncpus); netisr_maxthreads = mp_ncpus; } if (netisr_defaultqlimit > netisr_maxqlimit) { printf("netisr_init: forcing defaultqlimit from %d to %d\n", netisr_defaultqlimit, netisr_maxqlimit); netisr_defaultqlimit = netisr_maxqlimit; } #ifdef DEVICE_POLLING /* * The device polling code is not yet aware of how to deal with * multiple netisr threads, so for the time being compiling in device * polling disables parallel netisr workers. */ if (netisr_maxthreads != 1 || netisr_bindthreads != 0) { printf("netisr_init: forcing maxthreads to 1 and " "bindthreads to 0 for device polling\n"); netisr_maxthreads = 1; netisr_bindthreads = 0; } #endif #ifdef EARLY_AP_STARTUP STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (nws_count >= netisr_maxthreads) break; netisr_start_swi(pc->pc_cpuid, pc); } #else - pc = get_pcpu(); - netisr_start_swi(pc->pc_cpuid, pc); + netisr_start_swi(curcpu, pcpu_find(curcpu)); #endif } SYSINIT(netisr_init, SI_SUB_SOFTINTR, SI_ORDER_FIRST, netisr_init, NULL); #ifndef EARLY_AP_STARTUP /* * Start worker threads for additional CPUs. No attempt to gracefully handle * work reassignment, we don't yet support dynamic reconfiguration. */ static void netisr_start(void *arg) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (nws_count >= netisr_maxthreads) break; /* Worker will already be present for boot CPU. */ if (pc->pc_netisr != NULL) continue; netisr_start_swi(pc->pc_cpuid, pc); } } SYSINIT(netisr_start, SI_SUB_SMP, SI_ORDER_MIDDLE, netisr_start, NULL); #endif /* * Sysctl monitoring for netisr: query a list of registered protocols. */ static int sysctl_netisr_proto(SYSCTL_HANDLER_ARGS) { struct rm_priotracker tracker; struct sysctl_netisr_proto *snpp, *snp_array; struct netisr_proto *npp; u_int counter, proto; int error; if (req->newptr != NULL) return (EINVAL); snp_array = malloc(sizeof(*snp_array) * NETISR_MAXPROT, M_TEMP, M_ZERO | M_WAITOK); counter = 0; NETISR_RLOCK(&tracker); for (proto = 0; proto < NETISR_MAXPROT; proto++) { npp = &netisr_proto[proto]; if (npp->np_name == NULL) continue; snpp = &snp_array[counter]; snpp->snp_version = sizeof(*snpp); strlcpy(snpp->snp_name, npp->np_name, NETISR_NAMEMAXLEN); snpp->snp_proto = proto; snpp->snp_qlimit = npp->np_qlimit; snpp->snp_policy = npp->np_policy; snpp->snp_dispatch = npp->np_dispatch; if (npp->np_m2flow != NULL) snpp->snp_flags |= NETISR_SNP_FLAGS_M2FLOW; if (npp->np_m2cpuid != NULL) snpp->snp_flags |= NETISR_SNP_FLAGS_M2CPUID; if (npp->np_drainedcpu != NULL) snpp->snp_flags |= NETISR_SNP_FLAGS_DRAINEDCPU; counter++; } NETISR_RUNLOCK(&tracker); KASSERT(counter <= NETISR_MAXPROT, ("sysctl_netisr_proto: counter too big (%d)", counter)); error = SYSCTL_OUT(req, snp_array, sizeof(*snp_array) * counter); free(snp_array, M_TEMP); return (error); } SYSCTL_PROC(_net_isr, OID_AUTO, proto, CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_proto, "S,sysctl_netisr_proto", "Return list of protocols registered with netisr"); /* * Sysctl monitoring for netisr: query a list of workstreams. */ static int sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) { struct rm_priotracker tracker; struct sysctl_netisr_workstream *snwsp, *snws_array; struct netisr_workstream *nwsp; u_int counter, cpuid; int error; if (req->newptr != NULL) return (EINVAL); snws_array = malloc(sizeof(*snws_array) * MAXCPU, M_TEMP, M_ZERO | M_WAITOK); counter = 0; NETISR_RLOCK(&tracker); CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; NWS_LOCK(nwsp); snwsp = &snws_array[counter]; snwsp->snws_version = sizeof(*snwsp); /* * For now, we equate workstream IDs and CPU IDs in the * kernel, but expose them independently to userspace in case * that assumption changes in the future. */ snwsp->snws_wsid = cpuid; snwsp->snws_cpu = cpuid; if (nwsp->nws_intr_event != NULL) snwsp->snws_flags |= NETISR_SNWS_FLAGS_INTR; NWS_UNLOCK(nwsp); counter++; } NETISR_RUNLOCK(&tracker); KASSERT(counter <= MAXCPU, ("sysctl_netisr_workstream: counter too big (%d)", counter)); error = SYSCTL_OUT(req, snws_array, sizeof(*snws_array) * counter); free(snws_array, M_TEMP); return (error); } SYSCTL_PROC(_net_isr, OID_AUTO, workstream, CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_workstream, "S,sysctl_netisr_workstream", "Return list of workstreams implemented by netisr"); /* * Sysctl monitoring for netisr: query per-protocol data across all * workstreams. */ static int sysctl_netisr_work(SYSCTL_HANDLER_ARGS) { struct rm_priotracker tracker; struct sysctl_netisr_work *snwp, *snw_array; struct netisr_workstream *nwsp; struct netisr_proto *npp; struct netisr_work *nwp; u_int counter, cpuid, proto; int error; if (req->newptr != NULL) return (EINVAL); snw_array = malloc(sizeof(*snw_array) * MAXCPU * NETISR_MAXPROT, M_TEMP, M_ZERO | M_WAITOK); counter = 0; NETISR_RLOCK(&tracker); CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; NWS_LOCK(nwsp); for (proto = 0; proto < NETISR_MAXPROT; proto++) { npp = &netisr_proto[proto]; if (npp->np_name == NULL) continue; nwp = &nwsp->nws_work[proto]; snwp = &snw_array[counter]; snwp->snw_version = sizeof(*snwp); snwp->snw_wsid = cpuid; /* See comment above. */ snwp->snw_proto = proto; snwp->snw_len = nwp->nw_len; snwp->snw_watermark = nwp->nw_watermark; snwp->snw_dispatched = nwp->nw_dispatched; snwp->snw_hybrid_dispatched = nwp->nw_hybrid_dispatched; snwp->snw_qdrops = nwp->nw_qdrops; snwp->snw_queued = nwp->nw_queued; snwp->snw_handled = nwp->nw_handled; counter++; } NWS_UNLOCK(nwsp); } KASSERT(counter <= MAXCPU * NETISR_MAXPROT, ("sysctl_netisr_work: counter too big (%d)", counter)); NETISR_RUNLOCK(&tracker); error = SYSCTL_OUT(req, snw_array, sizeof(*snw_array) * counter); free(snw_array, M_TEMP); return (error); } SYSCTL_PROC(_net_isr, OID_AUTO, work, CTLFLAG_RD|CTLTYPE_STRUCT|CTLFLAG_MPSAFE, 0, 0, sysctl_netisr_work, "S,sysctl_netisr_work", "Return list of per-workstream, per-protocol work in netisr"); #ifdef DDB DB_SHOW_COMMAND(netisr, db_show_netisr) { struct netisr_workstream *nwsp; struct netisr_work *nwp; int first, proto; u_int cpuid; db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; first = 1; for (proto = 0; proto < NETISR_MAXPROT; proto++) { if (netisr_proto[proto].np_handler == NULL) continue; nwp = &nwsp->nws_work[proto]; if (first) { db_printf("%3d ", cpuid); first = 0; } else db_printf("%3s ", ""); db_printf( "%6s %5d %5d %5d %8ju %8ju %8ju %8ju\n", netisr_proto[proto].np_name, nwp->nw_len, nwp->nw_watermark, nwp->nw_qlimit, nwp->nw_dispatched, nwp->nw_hybrid_dispatched, nwp->nw_qdrops, nwp->nw_queued); } } } #endif Index: head/sys/powerpc/include/cpufunc.h =================================================================== --- head/sys/powerpc/include/cpufunc.h (revision 313192) +++ head/sys/powerpc/include/cpufunc.h (revision 313193) @@ -1,215 +1,215 @@ /*- * Copyright (c) 1998 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ #ifdef _KERNEL #include #include #include struct thread; #ifdef KDB void breakpoint(void); #else static __inline void breakpoint(void) { return; } #endif /* CPU register mangling inlines */ static __inline void mtmsr(register_t value) { __asm __volatile ("mtmsr %0; isync" :: "r"(value)); } #ifdef __powerpc64__ static __inline void mtmsrd(register_t value) { __asm __volatile ("mtmsrd %0; isync" :: "r"(value)); } #endif static __inline register_t mfmsr(void) { register_t value; __asm __volatile ("mfmsr %0" : "=r"(value)); return (value); } #ifndef __powerpc64__ static __inline void mtsrin(vm_offset_t va, register_t value) { __asm __volatile ("mtsrin %0,%1; isync" :: "r"(value), "r"(va)); } static __inline register_t mfsrin(vm_offset_t va) { register_t value; __asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va)); return (value); } #endif static __inline register_t mfctrl(void) { register_t value; __asm __volatile ("mfspr %0,136" : "=r"(value)); return (value); } static __inline void mtdec(register_t value) { __asm __volatile ("mtdec %0" :: "r"(value)); } static __inline register_t mfdec(void) { register_t value; __asm __volatile ("mfdec %0" : "=r"(value)); return (value); } static __inline register_t mfpvr(void) { register_t value; __asm __volatile ("mfpvr %0" : "=r"(value)); return (value); } static __inline u_quad_t mftb(void) { u_quad_t tb; #ifdef __powerpc64__ __asm __volatile ("mftb %0" : "=r"(tb)); #else uint32_t *tbup = (uint32_t *)&tb; uint32_t *tblp = tbup + 1; do { *tbup = mfspr(TBR_TBU); *tblp = mfspr(TBR_TBL); } while (*tbup != mfspr(TBR_TBU)); #endif return (tb); } static __inline void mttb(u_quad_t time) { mtspr(TBR_TBWL, 0); mtspr(TBR_TBWU, (uint32_t)(time >> 32)); mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff)); } static __inline void eieio(void) { __asm __volatile ("eieio" : : : "memory"); } static __inline void isync(void) { __asm __volatile ("isync" : : : "memory"); } static __inline void powerpc_sync(void) { __asm __volatile ("sync" : : : "memory"); } static __inline register_t intr_disable(void) { register_t msr; msr = mfmsr(); mtmsr(msr & ~PSL_EE); return (msr); } static __inline void intr_restore(register_t msr) { mtmsr(msr); } static __inline struct pcpu * -get_pcpu(void) +powerpc_get_pcpup(void) { struct pcpu *ret; __asm __volatile("mfsprg %0, 0" : "=r"(ret)); return (ret); } #endif /* _KERNEL */ #endif /* !_MACHINE_CPUFUNC_H_ */ Index: head/sys/powerpc/include/pcpu.h =================================================================== --- head/sys/powerpc/include/pcpu.h (revision 313192) +++ head/sys/powerpc/include/pcpu.h (revision 313193) @@ -1,173 +1,173 @@ /*- * Copyright (c) 1999 Luoqi Chen * Copyright (c) Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #include #include #include struct pmap; struct pvo_entry; #define CPUSAVE_LEN 9 #define PCPU_MD_COMMON_FIELDS \ int pc_inside_intr; \ struct pmap *pc_curpmap; /* current pmap */ \ struct thread *pc_fputhread; /* current fpu user */ \ struct thread *pc_vecthread; /* current vec user */ \ uintptr_t pc_hwref; \ uint32_t pc_pir; \ int pc_bsp; \ volatile int pc_awake; \ uint32_t pc_ipimask; \ register_t pc_tempsave[CPUSAVE_LEN]; \ register_t pc_disisave[CPUSAVE_LEN]; \ register_t pc_dbsave[CPUSAVE_LEN]; \ void *pc_restore; #define PCPU_MD_AIM32_FIELDS \ vm_offset_t pc_qmap_addr; \ struct pvo_entry *pc_qmap_pvo; \ struct mtx pc_qmap_lock; \ /* char __pad[0] */ #define PCPU_MD_AIM64_FIELDS \ struct slb pc_slb[64]; \ struct slb **pc_userslb; \ register_t pc_slbsave[18]; \ uint8_t pc_slbstack[1024]; \ vm_offset_t pc_qmap_addr; \ struct pvo_entry *pc_qmap_pvo; \ struct mtx pc_qmap_lock; \ char __pad[1121 - sizeof(struct mtx)] #ifdef __powerpc64__ #define PCPU_MD_AIM_FIELDS PCPU_MD_AIM64_FIELDS #else #define PCPU_MD_AIM_FIELDS PCPU_MD_AIM32_FIELDS #endif #define BOOKE_CRITSAVE_LEN (CPUSAVE_LEN + 2) #define BOOKE_TLB_MAXNEST 3 #define BOOKE_TLB_SAVELEN 16 #define BOOKE_TLBSAVE_LEN (BOOKE_TLB_SAVELEN * BOOKE_TLB_MAXNEST) #define PCPU_MD_BOOKE_FIELDS \ register_t pc_booke_critsave[BOOKE_CRITSAVE_LEN]; \ register_t pc_booke_mchksave[CPUSAVE_LEN]; \ register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \ register_t pc_booke_tlb_level; \ vm_offset_t pc_qmap_addr; \ uint32_t *pc_booke_tlb_lock; \ int pc_tid_next; \ char __pad[173] /* Definitions for register offsets within the exception tmp save areas */ #define CPUSAVE_R27 0 /* where r27 gets saved */ #define CPUSAVE_R28 1 /* where r28 gets saved */ #define CPUSAVE_R29 2 /* where r29 gets saved */ #define CPUSAVE_R30 3 /* where r30 gets saved */ #define CPUSAVE_R31 4 /* where r31 gets saved */ #define CPUSAVE_AIM_DAR 5 /* where SPR_DAR gets saved */ #define CPUSAVE_AIM_DSISR 6 /* where SPR_DSISR gets saved */ #define CPUSAVE_BOOKE_DEAR 5 /* where SPR_DEAR gets saved */ #define CPUSAVE_BOOKE_ESR 6 /* where SPR_ESR gets saved */ #define CPUSAVE_SRR0 7 /* where SRR0 gets saved */ #define CPUSAVE_SRR1 8 /* where SRR1 gets saved */ /* Book-E TLBSAVE is more elaborate */ #define TLBSAVE_BOOKE_LR 0 #define TLBSAVE_BOOKE_CR 1 #define TLBSAVE_BOOKE_SRR0 2 #define TLBSAVE_BOOKE_SRR1 3 #define TLBSAVE_BOOKE_R20 4 #define TLBSAVE_BOOKE_R21 5 #define TLBSAVE_BOOKE_R22 6 #define TLBSAVE_BOOKE_R23 7 #define TLBSAVE_BOOKE_R24 8 #define TLBSAVE_BOOKE_R25 9 #define TLBSAVE_BOOKE_R26 10 #define TLBSAVE_BOOKE_R27 11 #define TLBSAVE_BOOKE_R28 12 #define TLBSAVE_BOOKE_R29 13 #define TLBSAVE_BOOKE_R30 14 #define TLBSAVE_BOOKE_R31 15 #ifdef AIM #define PCPU_MD_FIELDS \ PCPU_MD_COMMON_FIELDS \ PCPU_MD_AIM_FIELDS #endif #if defined(BOOKE) #define PCPU_MD_FIELDS \ PCPU_MD_COMMON_FIELDS \ PCPU_MD_BOOKE_FIELDS #endif /* * Catch-all for ports (e.g. lsof, used by gtop) */ #ifndef PCPU_MD_FIELDS #define PCPU_MD_FIELDS \ int pc_md_placeholder[32] #endif #ifdef _KERNEL -#define pcpup (get_pcpu()) +#define pcpup ((struct pcpu *) powerpc_get_pcpup()) static __inline __pure2 struct thread * __curthread(void) { struct thread *td; #ifdef __powerpc64__ __asm __volatile("mr %0,13" : "=r"(td)); #else __asm __volatile("mr %0,2" : "=r"(td)); #endif return (td); } #define curthread (__curthread()) #define PCPU_GET(member) (pcpup->pc_ ## member) /* * XXX The implementation of this operation should be made atomic * with respect to preemption. */ #define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value)) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&pcpup->pc_ ## member) #define PCPU_SET(member,value) (pcpup->pc_ ## member = (value)) #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */ Index: head/sys/sparc64/include/pcpu.h =================================================================== --- head/sys/sparc64/include/pcpu.h (revision 313192) +++ head/sys/sparc64/include/pcpu.h (revision 313193) @@ -1,101 +1,100 @@ /*- * Copyright (c) 1999 Luoqi Chen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: FreeBSD: src/sys/i386/include/globaldata.h,v 1.27 2001/04/27 * $FreeBSD$ */ #ifndef _MACHINE_PCPU_H_ #define _MACHINE_PCPU_H_ #include #include #include #include #define ALT_STACK_SIZE 128 struct pmap; /* * Inside the kernel, the globally reserved register g7 is used to * point at the globaldata structure. */ #define PCPU_MD_FIELDS \ struct cacheinfo pc_cache; \ struct intr_request pc_irpool[IR_FREE]; \ struct intr_request *pc_irhead; \ struct intr_request **pc_irtail; \ struct intr_request *pc_irfree; \ struct pmap *pc_pmap; \ vm_offset_t pc_addr; \ vm_offset_t pc_qmap_addr; \ u_long pc_tickref; \ u_long pc_tickadj; \ u_long pc_tickincrement; \ u_int pc_clock; \ u_int pc_impl; \ u_int pc_mid; \ u_int pc_node; \ u_int pc_tlb_ctx; \ u_int pc_tlb_ctx_max; \ u_int pc_tlb_ctx_min; \ char __pad[397] #ifdef _KERNEL extern void *dpcpu0; struct pcb; struct pcpu; register struct pcb *curpcb __asm__(__XSTRING(PCB_REG)); register struct pcpu *pcpup __asm__(__XSTRING(PCPU_REG)); -#define get_pcpu() (pcpup) #define PCPU_GET(member) (pcpup->pc_ ## member) static __inline __pure2 struct thread * __curthread(void) { struct thread *td; __asm("ldx [%" __XSTRING(PCPU_REG) "], %0" : "=r" (td)); return (td); } #define curthread (__curthread()) /* * XXX The implementation of this operation should be made atomic * with respect to preemption. */ #define PCPU_ADD(member, value) (pcpup->pc_ ## member += (value)) #define PCPU_INC(member) PCPU_ADD(member, 1) #define PCPU_PTR(member) (&pcpup->pc_ ## member) #define PCPU_SET(member,value) (pcpup->pc_ ## member = (value)) #endif /* _KERNEL */ #endif /* !_MACHINE_PCPU_H_ */