Page MenuHomeFreeBSD

D23463.id67646.diff
No OneTemporary

D23463.id67646.diff

Index: sys/kern/subr_smr.c
===================================================================
--- sys/kern/subr_smr.c
+++ sys/kern/subr_smr.c
@@ -35,6 +35,8 @@
#include <sys/proc.h>
#include <sys/smp.h>
#include <sys/smr.h>
+#include <sys/sysctl.h>
+#include <sys/counter.h>
#include <vm/uma.h>
@@ -162,6 +164,17 @@
#define SMR_SEQ_MAX_ADVANCE SMR_SEQ_MAX_DELTA / 2
#endif
+static SYSCTL_NODE(_debug, OID_AUTO, smr, CTLFLAG_RW, NULL, "SMR Stats");
+static counter_u64_t advance;
+SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance, CTLFLAG_RD, &advance, "");
+static counter_u64_t advance_wait;
+SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, advance_wait, CTLFLAG_RD, &advance_wait, "");
+static counter_u64_t poll_fast;
+SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_fast, CTLFLAG_RD, &poll_fast, "");
+static counter_u64_t poll_scan;
+SYSCTL_COUNTER_U64(_debug_smr, OID_AUTO, poll_scan, CTLFLAG_RD, &poll_scan, "");
+
+
/*
* Advance the write sequence and return the new value for use as the
* wait goal. This guarantees that any changes made by the calling
@@ -197,14 +210,17 @@
*/
s = zpcpu_get(smr)->c_shared;
goal = atomic_fetchadd_int(&s->s_wr_seq, SMR_SEQ_INCR) + SMR_SEQ_INCR;
+ counter_u64_add(advance, 1);
/*
* Force a synchronization here if the goal is getting too
* far ahead of the read sequence number. This keeps the
* wrap detecting arithmetic working in pathological cases.
*/
- if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA)
+ if (goal - atomic_load_int(&s->s_rd_seq) >= SMR_SEQ_MAX_DELTA) {
+ counter_u64_add(advance_wait, 1);
smr_wait(smr, goal - SMR_SEQ_MAX_ADVANCE);
+ }
return (goal);
}
@@ -296,14 +312,17 @@
* A valid goal must be greater than s_rd_seq or we have not verified
* that it has been observed and must fall through to polling.
*/
- if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal))
+ if (SMR_SEQ_GEQ(s_rd_seq, goal) || SMR_SEQ_LT(s_wr_seq, goal)) {
+ counter_u64_add(poll_fast, 1);
goto out;
+ }
/*
* Loop until all cores have observed the goal sequence or have
* gone inactive. Keep track of the oldest sequence currently
* active as rd_seq.
*/
+ counter_u64_add(poll_scan, 1);
rd_seq = s_wr_seq;
CPU_FOREACH(i) {
c = zpcpu_get_cpu(smr, i);
@@ -364,7 +383,7 @@
s_rd_seq = atomic_load_int(&s->s_rd_seq);
do {
if (SMR_SEQ_LEQ(rd_seq, s_rd_seq))
- break;
+ goto out;
} while (atomic_fcmpset_int(&s->s_rd_seq, &s_rd_seq, rd_seq) == 0);
out:
@@ -424,3 +443,14 @@
smr_zone = uma_zcreate("SMR CPU", sizeof(struct smr),
NULL, NULL, NULL, NULL, (CACHE_LINE_SIZE * 2) - 1, UMA_ZONE_PCPU);
}
+
+static void
+smr_init_counters(void *unused)
+{
+
+ advance = counter_u64_alloc(M_WAITOK);
+ advance_wait = counter_u64_alloc(M_WAITOK);
+ poll_fast = counter_u64_alloc(M_WAITOK);
+ poll_scan = counter_u64_alloc(M_WAITOK);
+}
+SYSINIT(smr_counters, SI_SUB_CPU, SI_ORDER_ANY, smr_init_counters, NULL);

File Metadata

Mime Type
text/plain
Expires
Mon, Jan 13, 4:32 AM (6 h, 26 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15776401
Default Alt Text
D23463.id67646.diff (2 KB)

Event Timeline