Page MenuHomeFreeBSD

D22823.id65683.diff
No OneTemporary

D22823.id65683.diff

Index: sys/kern/kern_rmlock.c
===================================================================
--- sys/kern/kern_rmlock.c
+++ sys/kern/kern_rmlock.c
@@ -53,6 +53,7 @@
#include <sys/turnstile.h>
#include <sys/lock_profile.h>
#include <machine/cpu.h>
+#include <vm/uma.h>
#ifdef DDB
#include <ddb/ddb.h>
@@ -853,3 +854,111 @@
lc->lc_ddb_show(&rm->rm_wlock_object);
}
#endif
+
+void
+rms_init(struct rmslock *rms, const char *name)
+{
+
+ rms->writer = 0;
+ rms->readers = 0;
+ mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW);
+ rms->readers_pcpu = uma_zalloc_pcpu(pcpu_zone_int, M_WAITOK | M_ZERO);
+}
+
+void
+rms_destroy(struct rmslock *rms)
+{
+
+ mtx_destroy(&rms->mtx);
+ uma_zfree_pcpu(pcpu_zone_int, rms->readers_pcpu);
+}
+
+static void __noinline
+rms_rlock_fallback(struct rmslock *rms)
+{
+
+ critical_exit();
+
+ mtx_lock(&rms->mtx);
+ while (rms->writer)
+ msleep(&rms->readers, &rms->mtx, PUSER - 1, "rms wait", 0);
+ (*(int *)zpcpu_get(rms->readers_pcpu))++;
+ mtx_unlock(&rms->mtx);
+}
+
+void
+rms_rlock(struct rmslock *rms)
+{
+
+ critical_enter();
+ if (__predict_false(rms->writer)) {
+ rms_rlock_fallback(rms);
+ return;
+ }
+ (*(int *)zpcpu_get(rms->readers_pcpu))++;
+ atomic_thread_fence_rel();
+ critical_exit();
+}
+
+static void __noinline
+rms_runlock_fallback(struct rmslock *rms)
+{
+
+ critical_exit();
+
+ MPASS(rms->writer == 1);
+ mtx_lock(&rms->mtx);
+ MPASS(*(int *)zpcpu_get(rms->readers_pcpu) == 0);
+ MPASS(rms->readers > 0);
+ rms->readers--;
+ if (rms->readers == 0)
+ wakeup(&rms->writer);
+ mtx_unlock(&rms->mtx);
+}
+
+void
+rms_runlock(struct rmslock *rms)
+{
+
+ critical_enter();
+ if (__predict_false(rms->writer)) {
+ rms_runlock_fallback(rms);
+ return;
+ }
+ (*(int *)zpcpu_get(rms->readers_pcpu))--;
+ atomic_thread_fence_rel();
+ critical_exit();
+}
+
+void
+rms_wlock(struct rmslock *rms)
+{
+ int cpu;
+
+ mtx_lock(&rms->mtx);
+ MPASS(rms->writer == 0);
+ MPASS(rms->readers == 0);
+ rms->writer = 1;
+ cpus_fence_seq_cst();
+ quiesce_all_critical();
+ CPU_FOREACH(cpu) {
+ rms->readers += zpcpu_replace_cpu(rms->readers_pcpu, 0, cpu);
+ }
+ if (rms->readers)
+ msleep(&rms->writer, &rms->mtx, PUSER - 1 | PDROP, "rms writer", 0);
+ else
+ mtx_unlock(&rms->mtx);
+ MPASS(rms->readers == 0);
+}
+
+void
+rms_wunlock(struct rmslock *rms)
+{
+
+ mtx_lock(&rms->mtx);
+ MPASS(rms->writer == 1);
+ MPASS(rms->readers == 0);
+ rms->writer = 0;
+ wakeup(&rms->readers);
+ mtx_unlock(&rms->mtx);
+}
Index: sys/kern/subr_smp.c
===================================================================
--- sys/kern/subr_smp.c
+++ sys/kern/subr_smp.c
@@ -934,6 +934,9 @@
* We are not in one so the check for us is safe. If the found
* thread changes to something else we know the section was
* exited as well.
+ *
+ * It is allowed to call the routine while in critical section, the
+ * caller is explicitly skipped.
*/
void
quiesce_all_critical(void)
@@ -942,14 +945,14 @@
struct pcpu *pcpu;
int cpu;
- MPASS(curthread->td_critnest == 0);
-
CPU_FOREACH(cpu) {
pcpu = cpuid_to_pcpu[cpu];
td = pcpu->pc_curthread;
for (;;) {
if (td->td_critnest == 0)
break;
+ if (td == curthread)
+ break;
cpu_spinwait();
newtd = (struct thread *)
atomic_load_acq_ptr((void *)pcpu->pc_curthread);
Index: sys/security/mac/mac_framework.c
===================================================================
--- sys/security/mac/mac_framework.c
+++ sys/security/mac/mac_framework.c
@@ -176,6 +176,7 @@
#ifndef MAC_STATIC
static struct rmlock mac_policy_rm; /* Non-sleeping entry points. */
static struct sx mac_policy_sx; /* Sleeping entry points. */
+static struct rmslock mac_policy_rms;
#endif
struct mac_policy_list_head mac_policy_list;
@@ -209,7 +210,7 @@
if (!mac_late)
return;
- sx_slock(&mac_policy_sx);
+ rms_rlock(&mac_policy_rms);
#endif
}
@@ -233,7 +234,7 @@
if (!mac_late)
return;
- sx_sunlock(&mac_policy_sx);
+ rms_runlock(&mac_policy_rms);
#endif
}
@@ -249,6 +250,7 @@
return;
sx_xlock(&mac_policy_sx);
+ rms_wlock(&mac_policy_rms);
rm_wlock(&mac_policy_rm);
#endif
}
@@ -262,6 +264,7 @@
return;
rm_wunlock(&mac_policy_rm);
+ rms_wunlock(&mac_policy_rms);
sx_xunlock(&mac_policy_sx);
#endif
}
@@ -294,6 +297,7 @@
rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS |
RM_RECURSE);
sx_init_flags(&mac_policy_sx, "mac_policy_sx", SX_NOWITNESS);
+ rms_init(&mac_policy_rms, "mac_policy_rms");
#endif
}
Index: sys/sys/rmlock.h
===================================================================
--- sys/sys/rmlock.h
+++ sys/sys/rmlock.h
@@ -133,5 +133,19 @@
#define rm_assert(rm, what)
#endif
+struct rmslock {
+ struct mtx mtx;
+ int writer;
+ int readers;
+ int *readers_pcpu;
+};
+
+void rms_init(struct rmslock *rms, const char *name);
+void rms_destroy(struct rmslock *rms);
+void rms_rlock(struct rmslock *rms);
+void rms_runlock(struct rmslock *rms);
+void rms_wlock(struct rmslock *rms);
+void rms_wunlock(struct rmslock *rms);
+
#endif /* _KERNEL */
#endif /* !_SYS_RMLOCK_H_ */

File Metadata

Mime Type
text/plain
Expires
Tue, Apr 21, 9:44 AM (14 h, 25 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31902983
Default Alt Text
D22823.id65683.diff (4 KB)

Event Timeline