Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F153468828
D55045.id171115.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
6 KB
Referenced Files
None
Subscribers
None
D55045.id171115.diff
View Options
diff --git a/sys/x86/include/x86_var.h b/sys/x86/include/x86_var.h
--- a/sys/x86/include/x86_var.h
+++ b/sys/x86/include/x86_var.h
@@ -162,7 +162,7 @@
uint64_t rdtsc_ordered(void);
/*
- * MSR ops for x86_msr_op()
+ * MSR ops for x86_msr_op().
*/
#define MSR_OP_ANDNOT 0x00000001
#define MSR_OP_OR 0x00000002
@@ -170,10 +170,9 @@
#define MSR_OP_READ 0x00000004
/*
- * Where and which execution mode
- *
- * All modes cause execution on the target CPU(s) with interrupts disabled.
+ * Where and which execution mode.
*/
+#define MSR_OP_SAFE 0x08000000
#define MSR_OP_LOCAL 0x10000000
#define MSR_OP_SCHED_ALL 0x20000000
#define MSR_OP_SCHED_ONE 0x30000000
@@ -181,7 +180,7 @@
#define MSR_OP_RENDEZVOUS_ONE 0x50000000
#define MSR_OP_CPUID(id) ((id) << 8)
-void x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res);
+int x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res);
#if defined(__i386__) && defined(INVARIANTS)
void trap_check_kstack(void);
diff --git a/sys/x86/x86/cpu_machdep.c b/sys/x86/x86/cpu_machdep.c
--- a/sys/x86/x86/cpu_machdep.c
+++ b/sys/x86/x86/cpu_machdep.c
@@ -118,23 +118,63 @@
int op;
uint64_t arg1;
uint64_t *res;
+ bool safe;
};
-static void
-x86_msr_op_one(void *argp)
+static int
+x86_msr_op_one_safe(struct msr_op_arg *a)
+{
+ uint64_t v;
+ int error;
+
+ error = 0;
+ switch (a->op) {
+ case MSR_OP_ANDNOT:
+ error = rdmsr_safe(a->msr, &v);
+ if (error != 0)
+ break;
+ if (a->res != NULL)
+ *(a->res) = v;
+ v &= ~a->arg1;
+ error = wrmsr_safe(a->msr, v);
+ break;
+ case MSR_OP_OR:
+ error = rdmsr_safe(a->msr, &v);
+ if (error != 0)
+ break;
+ if (a->res != NULL)
+ *(a->res) = v;
+ v |= a->arg1;
+ error = wrmsr_safe(a->msr, v);
+ case MSR_OP_WRITE:
+ error = wrmsr_safe(a->msr, a->arg1);
+ break;
+ case MSR_OP_READ:
+ error = rdmsr_safe(a->msr, &v);
+ if (error == 0 && a->res != NULL)
+ *a->res = v;
+ break;
+ }
+ return (error);
+}
+
+static void
+x86_msr_op_one_unsafe(struct msr_op_arg *a)
{
- struct msr_op_arg *a;
uint64_t v;
- a = argp;
switch (a->op) {
case MSR_OP_ANDNOT:
v = rdmsr(a->msr);
+ if (a->res != NULL)
+ *(a->res) = v;
v &= ~a->arg1;
wrmsr(a->msr, v);
break;
case MSR_OP_OR:
v = rdmsr(a->msr);
+ if (a->res != NULL)
+ *(a->res) = v;
v |= a->arg1;
wrmsr(a->msr, v);
break;
@@ -143,18 +183,76 @@
break;
case MSR_OP_READ:
v = rdmsr(a->msr);
- *a->res = v;
+ if (a->res != NULL)
+ *(a->res) = v;
break;
default:
__assert_unreachable();
}
}
+static void
+x86_msr_op_one_unsafe_void(void *arg)
+{
+ x86_msr_op_one_unsafe(arg);
+}
+
+static int
+x86_msr_op_one(struct msr_op_arg *a)
+{
+ if (a->safe)
+ return (x86_msr_op_one_safe(a));
+ x86_msr_op_one_unsafe(a);
+ return (0);
+}
+
#define MSR_OP_EXMODE_MASK 0xf0000000
#define MSR_OP_OP_MASK 0x000000ff
#define MSR_OP_GET_CPUID(x) (((x) & ~MSR_OP_EXMODE_MASK) >> 8)
-void
+/*
+ * Utility function to wrap common MSR accesses.
+ *
+ * The msr argument specifies the MSR number to operate on.
+ * arg1 is an optional additional argument which is needed by
+ * modifying ops.
+ * res is the location where the value read from MSR is placed.
+ * It is the value that was initially read from the MSR, before
+ * applying the specified operation. Can be NULL if the value
+ * is not needed.
+ *
+ * op encoding combines the target/mode specification and the requested
+ * operation, all or-ed together.
+
+ * The following targets can be specified:
+ * MSR_OP_LOCAL execute on current CPU, wiith interrupts
+ * disabled around the op.
+ * MSR_OP_SCHED_ALL execute on all CPUs, by migrating the
+ * current thread to them in sequence.
+ * MSR_OP_SCHED_ALL | MSR_OP_SAFE execute on all CPUs by migrating,
+ * using safe MSR access.
+ * MSR_OP_SCHED_ONE execute on specified CPU, migrate
+ * curthread to it.
+ * MSR_OP_SCHED_ONE | MSR_OP_SAFE safely execute on specified CPU,
+ * migrate curthread to it.
+ * MSR_OP_RENDEZVOUS_ALL execute on all CPUs in interrupt
+ * context, interrupts are disabled
+ * around op.
+ * MSR_OP_RENDEZVOUS_ONE execute on specified CPU in interrupt
+ * context, interrupts are disabled
+ * around op.
+ * If a _ONE target is specified, or the op value with MSR_OP_CPUID(cpuid)
+ * to name the target CPU. _SAFE variants might return EFAULT if access to
+ * MSR faulted with #GP. Non-_SAFE variants most likely panic or reboot
+ * the machine if the MSR is not present or access is not tolerated by hw.
+ *
+ * The following operations can be specified:
+ * MSR_OP_ANDNOT *res = v = *msr; *msr = v & ~arg1
+ * MSR_OP_OR *res = v = *msr; *msr = v | arg1
+ * MSR_OP_READ *res = *msr
+ * MSR_OP_WRITE *res = *msr; *msr = arg1
+ */
+int
x86_msr_op(u_int msr, u_int op, uint64_t arg1, uint64_t *res)
{
struct thread *td;
@@ -162,16 +260,23 @@
cpuset_t set;
register_t flags;
u_int exmode;
- int bound_cpu, cpu, i, is_bound;
+ int bound_cpu, cpu, error, i, is_bound;
exmode = op & MSR_OP_EXMODE_MASK;
a.op = op & MSR_OP_OP_MASK;
a.msr = msr;
+ a.safe = (op & MSR_OP_SAFE) != 0;
a.arg1 = arg1;
a.res = res;
+ error = 0;
+ /*
+ * MSR_OP_RENDEZVOUS and MSR_OP_LOCAL variants cannot be safe,
+ * and cannot return an error.
+ */
switch (exmode) {
case MSR_OP_LOCAL:
+ MPASS(!a.safe);
flags = intr_disable();
x86_msr_op_one(&a);
intr_restore(flags);
@@ -181,10 +286,12 @@
thread_lock(td);
is_bound = sched_is_bound(td);
bound_cpu = td->td_oncpu;
+ thread_unlock(td);
CPU_FOREACH(i) {
sched_bind(td, i);
- x86_msr_op_one(&a);
+ error = x86_msr_op_one(&a);
}
+ thread_lock(td);
if (is_bound)
sched_bind(td, bound_cpu);
else
@@ -199,7 +306,9 @@
bound_cpu = td->td_oncpu;
if (!is_bound || bound_cpu != cpu)
sched_bind(td, cpu);
- x86_msr_op_one(&a);
+ thread_unlock(td);
+ error = x86_msr_op_one(&a);
+ thread_lock(td);
if (is_bound) {
if (bound_cpu != cpu)
sched_bind(td, bound_cpu);
@@ -209,18 +318,21 @@
thread_unlock(td);
break;
case MSR_OP_RENDEZVOUS_ALL:
- smp_rendezvous(smp_no_rendezvous_barrier, x86_msr_op_one,
- smp_no_rendezvous_barrier, &a);
+ MPASS(!a.safe);
+ smp_rendezvous(smp_no_rendezvous_barrier,
+ x86_msr_op_one_unsafe_void, smp_no_rendezvous_barrier, &a);
break;
case MSR_OP_RENDEZVOUS_ONE:
+ MPASS(!a.safe);
cpu = MSR_OP_GET_CPUID(op);
CPU_SETOF(cpu, &set);
smp_rendezvous_cpus(set, smp_no_rendezvous_barrier,
- x86_msr_op_one, smp_no_rendezvous_barrier, &a);
+ x86_msr_op_one_unsafe_void, smp_no_rendezvous_barrier, &a);
break;
default:
__assert_unreachable();
}
+ return (error);
}
/*
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Apr 22, 8:29 AM (22 h, 28 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
31965028
Default Alt Text
D55045.id171115.diff (6 KB)
Attached To
Mode
D55045: x86: add a safe variant of MSR_OP_SCHED* operations
Attached
Detach File
Event Timeline
Log In to Comment