Changeset View
Standalone View
sys/kern/kern_rmlock.c
| Show First 20 Lines • Show All 443 Lines • ▼ Show 20 Lines | _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) | ||||
| tracker->rmp_flags = 0; | tracker->rmp_flags = 0; | ||||
| tracker->rmp_thread = td; | tracker->rmp_thread = td; | ||||
| tracker->rmp_rmlock = rm; | tracker->rmp_rmlock = rm; | ||||
| if (rm->lock_object.lo_flags & LO_SLEEPABLE) | if (rm->lock_object.lo_flags & LO_SLEEPABLE) | ||||
| THREAD_NO_SLEEPING(); | THREAD_NO_SLEEPING(); | ||||
| td->td_critnest++; /* critical_enter(); */ | td->td_critnest++; /* critical_enter(); */ | ||||
| atomic_interrupt_fence(); | atomic_interrupt_fence(); | ||||
| pc = get_pcpu(); | pc = get_pcpu(); | ||||
| rm_tracker_add(pc, tracker); | rm_tracker_add(pc, tracker); | ||||
| sched_pin(); | sched_pin(); | ||||
| atomic_interrupt_fence(); | atomic_interrupt_fence(); | ||||
| td->td_critnest--; | td->td_critnest--; | ||||
| /* | /* | ||||
| * Fast path to combine two common conditions into a single | * Fast path to combine two common conditions into a single | ||||
| * conditional jump. | * conditional jump. | ||||
| */ | */ | ||||
| if (__predict_true(0 == (td->td_owepreempt | | if (__predict_true(0 == (td->td_owepreempt | | ||||
| CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) | CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) | ||||
| ▲ Show 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | |||||
| { | { | ||||
| struct pcpu *pc; | struct pcpu *pc; | ||||
| struct thread *td = tracker->rmp_thread; | struct thread *td = tracker->rmp_thread; | ||||
| if (SCHEDULER_STOPPED()) | if (SCHEDULER_STOPPED()) | ||||
| return; | return; | ||||
| td->td_critnest++; /* critical_enter(); */ | td->td_critnest++; /* critical_enter(); */ | ||||
| atomic_interrupt_fence(); | |||||
jah: Why not just use critical_enter()/critical_exit() here? Are we in a spot where the pending… | |||||
Not Done Inline ActionsSee the if statement below, preemption check is merged with another one to save a branch. mjg: See the if statement below, preemption check is merged with another one to save a branch. | |||||
Done Inline ActionsAnother reason is that critical_enter()/_exit() were historically not inline functions; that changed only in the past few years. So we could certainly use critical_enter() here, but switching to critical_exit() in _rm_rlock() results in worse-looking code: https://reviews.freebsd.org/P540 I suspect it's worth keeping this micro-optimization. I guess we could at least use critical_enter(), but that on its own doesn't represent much of an improvement to me. markj: Another reason is that critical_enter()/_exit() were historically not inline functions; that… | |||||
Not Done Inline ActionsI have to note critical_exit is already suboptimal by having to test for preemption using a different variable. Instead, both preemption and the flag could be in one int (if not short). It does not even have to be per-thread, rather can be moved per-CPU. I don't know how feasible it is outside of amd64 though. mjg: I have to note critical_exit is already suboptimal by having to test for preemption using a… | |||||
| pc = get_pcpu(); | pc = get_pcpu(); | ||||
| rm_tracker_remove(pc, tracker); | rm_tracker_remove(pc, tracker); | ||||
| atomic_interrupt_fence(); | |||||
| td->td_critnest--; | td->td_critnest--; | ||||
| sched_unpin(); | sched_unpin(); | ||||
| if (rm->lock_object.lo_flags & LO_SLEEPABLE) | if (rm->lock_object.lo_flags & LO_SLEEPABLE) | ||||
| THREAD_SLEEPING_OK(); | THREAD_SLEEPING_OK(); | ||||
| if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) | if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) | ||||
| return; | return; | ||||
| ▲ Show 20 Lines • Show All 730 Lines • Show Last 20 Lines | |||||
Why not just use critical_enter()/critical_exit() here? Are we in a spot where the pending preemption check in critical_exit() can't be tolerated?