Page MenuHomeFreeBSD

D21740.id62522.diff
No OneTemporary

D21740.id62522.diff

Index: sys/kern/subr_lock.c
===================================================================
--- sys/kern/subr_lock.c
+++ sys/kern/subr_lock.c
@@ -308,6 +308,32 @@
sched_relinquish(curthread);
}
+static void
+lock_prof_reset_cpu_wait(int cpu)
+{
+ struct thread *td, *newtd;
+ struct pcpu *pcpu;
+
+ MPASS(td->td_critnest == 0);
+
+ /*
+ * Observe all CPUs not executing in critical section.
+ * We are not in one so the check for us is safe. If the found
+ * thread changes to something else we know the section was
+ * exited as well.
+ */
+ pcpu = cpuid_to_pcpu[cpu];
+ td = pcpu->pc_curthread;
+ for (;;) {
+ if (td->td_critnest == 0)
+ break;
+ cpu_spinwait();
+ newtd = (struct thread *)atomic_load_acq_ptr((u_long *)pcpu->pc_curthread);
+ if (td != newtd)
+ break;
+ }
+}
+
static void
lock_prof_reset(void)
{
@@ -324,7 +350,12 @@
atomic_store_rel_int(&lock_prof_resetting, 1);
enabled = lock_prof_enable;
lock_prof_enable = 0;
- quiesce_all_cpus("profreset", 0);
+ /*
+ * This both publishes lock_prof_enable as disabled and makes sure
+ * everyone else reads it if they are not far enough. We wait for the
+ * rest down below.
+ */
+ cpus_fence_seq_cst();
/*
* Some objects may have migrated between CPUs. Clear all links
* before we zero the structures. Some items may still be linked
@@ -343,6 +374,9 @@
lock_prof_init_type(&lpc->lpc_types[0]);
lock_prof_init_type(&lpc->lpc_types[1]);
}
+ /*
+ * Paired with the fence from cpus_fence_seq_cst()
+ */
atomic_store_rel_int(&lock_prof_resetting, 0);
lock_prof_enable = enabled;
}
@@ -433,12 +467,19 @@
"max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
enabled = lock_prof_enable;
lock_prof_enable = 0;
- quiesce_all_cpus("profstat", 0);
+ /*
+ * See the comment in lock_prof_reset
+ */
+ cpus_fence_seq_cst();
+ CPU_FOREACH(cpu) {
+ lock_prof_reset_cpu_wait(cpu);
+ }
t = ticks;
CPU_FOREACH(cpu) {
lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[0], sb, 0, t);
lock_prof_type_stats(&LP_CPU(cpu)->lpc_types[1], sb, 1, t);
}
+ atomic_thread_fence_rel();
lock_prof_enable = enabled;
error = sbuf_finish(sb);
@@ -591,6 +632,10 @@
else
l->lpo_waittime = 0;
out:
+ /*
+ * Paired with cpus_fence_seq_cst().
+ */
+ atomic_thread_fence_rel();
critical_exit();
}
@@ -677,6 +722,10 @@
type = &LP_CPU_SELF->lpc_types[spin];
LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
out:
+ /*
+ * Paired with cpus_fence_seq_cst().
+ */
+ atomic_thread_fence_rel();
critical_exit();
}
Index: sys/kern/subr_smp.c
===================================================================
--- sys/kern/subr_smp.c
+++ sys/kern/subr_smp.c
@@ -929,6 +929,29 @@
return quiesce_cpus(all_cpus, wmesg, prio);
}
+static void
+cpus_fence_seq_cst_issue(void *arg __unused)
+{
+
+ atomic_thread_fence_seq_cst();
+}
+
+void
+cpus_fence_seq_cst(void)
+{
+
+#ifdef SMP
+ smp_rendezvous(
+ smp_no_rendezvous_barrier,
+ cpus_fence_seq_cst_issue,
+ smp_no_rendezvous_barrier,
+ NULL
+ );
+#else
+ cpus_fence_seq_cst_issue(NULL);
+#endif
+}
+
/* Extra care is taken with this sysctl because the data type is volatile */
static int
sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
Index: sys/sys/smp.h
===================================================================
--- sys/sys/smp.h
+++ sys/sys/smp.h
@@ -264,6 +264,7 @@
int quiesce_all_cpus(const char *, int);
int quiesce_cpus(cpuset_t, const char *, int);
+void cpus_fence_seq_cst(void);
void smp_no_rendezvous_barrier(void *);
void smp_rendezvous(void (*)(void *),
void (*)(void *),

File Metadata

Mime Type
text/plain
Expires
Sun, Mar 8, 10:21 AM (49 m, 37 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29402960
Default Alt Text
D21740.id62522.diff (3 KB)

Event Timeline