Page MenuHomeFreeBSD

D38853.diff
No OneTemporary

D38853.diff

diff --git a/share/man/man9/ratecheck.9 b/share/man/man9/ratecheck.9
--- a/share/man/man9/ratecheck.9
+++ b/share/man/man9/ratecheck.9
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd March 11, 2022
+.Dd March 2, 2023
.Dt RATECHECK 9
.Os
.Sh NAME
@@ -82,6 +82,22 @@
or
.Fa maxpps
is negative.
+.Sh NOTES
+The
+.Nm ratecheck
+function requires external locking to ensure correct operation on
+multi-processor systems.
+The
+.Nm ppsratecheck
+function uses atomic operations to ensure correct operation on multi-processor
+systems.
+.Pp
+The
+.Fn counter_ratecheck
+function may be more appropriate in certain use cases.
+See
+.Xr counter 9
+for more details on that function.
.Sh SEE ALSO
.Xr counter 9
.Sh HISTORY
diff --git a/sys/kern/kern_time.c b/sys/kern/kern_time.c
--- a/sys/kern/kern_time.c
+++ b/sys/kern/kern_time.c
@@ -1073,6 +1073,8 @@
/*
* ratecheck(): simple time-based rate-limit checking.
+ *
+ * Note that this is not multi-processor safe without external locking.
*/
int
ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
@@ -1114,22 +1116,51 @@
int
eventratecheck(struct timeval *lasttime, int *cureps, int maxeps)
{
- int now;
+ time_t last;
+ int count, now;
+
+ /*
+ * Work around different time_t sizes. The compile-time asserts
+ * catch cases where this has changed.
+ */
+#if (!defined(__LP64__) && (defined(__i386__) || defined(__amd64__)))
+#define atomic_cmpset_acq_time atomic_cmpset_acq_32
+ _Static_assert(sizeof(time_t) == sizeof(int32_t), "sizeof(time_t) not expected size");
+#else
+#define atomic_cmpset_acq_time atomic_cmpset_acq_64
+ _Static_assert(sizeof(time_t) == sizeof(int64_t), "sizeof(time_t) not expected size");
+#endif
/*
* Reset the last time and counter if this is the first call
* or more than a second has passed since the last update of
* lasttime.
+ *
+ * If we are racing with another thread to update the current time
+ * and lose the race, we simply fall through and check the count.
*/
now = ticks;
- if (lasttime->tv_sec == 0 || (u_int)(now - lasttime->tv_sec) >= hz) {
- lasttime->tv_sec = now;
- *cureps = 1;
+ last = lasttime->tv_sec;
+ if ((last == 0 || (u_int)(now - last) >= hz) &&
+ atomic_cmpset_acq_time(&lasttime->tv_sec, last, now)) {
+ atomic_set_rel_int(cureps, 1);
return (maxeps != 0);
- } else {
- (*cureps)++; /* NB: ignore potential overflow */
- return (maxeps < 0 || *cureps <= maxeps);
}
+
+ /*
+ * Do an unlocked read of *cureps to handle the negative case.
+ * Under high load, this prevents an atomic operation. It also
+ * keeps us from incrementing the counter so far that it wraps around
+ * to positive again. And, if we are that far over our rate counter,
+ * an unlocked read is probably sufficient.
+ */
+ if (*cureps < 0)
+ return (maxeps < 0);
+
+ /* Increment the counter and check whether we are within the rate. */
+ count = atomic_fetchadd_int(cureps, 1);
+ return (maxeps < 0 || (count >= 0 && count < maxeps));
+#undef atomic_cmpset_acq_time
}
static void

File Metadata

Mime Type
text/plain
Expires
Sat, Mar 21, 7:37 AM (12 h, 4 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
30070506
Default Alt Text
D38853.diff (2 KB)

Event Timeline