Page MenuHomeFreeBSD

D48441.id149175.diff
No OneTemporary

D48441.id149175.diff

Index: sys/conf/files.riscv
===================================================================
--- sys/conf/files.riscv
+++ sys/conf/files.riscv
@@ -86,6 +86,7 @@
riscv/vmm/vmm.c optional vmm
riscv/vmm/vmm_aplic.c optional vmm
riscv/vmm/vmm_dev_machdep.c optional vmm
+riscv/vmm/vmm_fence.c optional vmm
riscv/vmm/vmm_instruction_emul.c optional vmm
riscv/vmm/vmm_riscv.c optional vmm
riscv/vmm/vmm_sbi.c optional vmm
Index: sys/riscv/include/cpufunc.h
===================================================================
--- sys/riscv/include/cpufunc.h
+++ sys/riscv/include/cpufunc.h
@@ -104,6 +104,21 @@
__asm __volatile("sfence.vma %0" :: "r" (addr) : "memory");
}
+static __inline void
+sfence_vma_asid(uint64_t asid)
+{
+
+ __asm __volatile("sfence.vma x0, %0" :: "r" (asid) : "memory");
+}
+
+static __inline void
+sfence_vma_asid_page(uint64_t asid, uintptr_t addr)
+{
+
+ __asm __volatile("sfence.vma %0, %1" :: "r" (addr), "r" (asid)
+ : "memory");
+}
+
#define rdcycle() csr_read64(cycle)
#define rdtime() csr_read64(time)
#define rdinstret() csr_read64(instret)
Index: sys/riscv/vmm/riscv.h
===================================================================
--- sys/riscv/vmm/riscv.h
+++ sys/riscv/vmm/riscv.h
@@ -67,6 +67,20 @@
uint64_t senvcfg;
};
+enum vmm_fence_type {
+ VMM_RISCV_FENCE_UNK = 0,
+ VMM_RISCV_FENCE_I,
+ VMM_RISCV_FENCE_VMA,
+ VMM_RISCV_FENCE_VMA_ASID,
+};
+
+struct vmm_fence {
+ enum vmm_fence_type type;
+ size_t start;
+ size_t size;
+ uint64_t asid;
+};
+
struct hypctx {
struct hypregs host_regs;
struct hypregs guest_regs;
@@ -82,6 +96,13 @@
int ipi_pending;
int interrupts_pending;
struct vtimer vtimer;
+
+ struct vmm_fence *fence_queue;
+ struct mtx fence_queue_mtx;
+ int fence_queue_head;
+ int fence_queue_tail;
+ int fence_i_req;
+ int sfence_vma_req;
};
struct hyp {
Index: sys/riscv/vmm/vmm_fence.h
===================================================================
--- /dev/null
+++ sys/riscv/vmm/vmm_fence.h
@@ -0,0 +1,43 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory (Department of Computer Science and Technology) under Innovate
+ * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
+ * Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_FENCE_H_
+#define _VMM_FENCE_H_
+
+struct hypctx;
+
+#define VMM_FENCE_QUEUE_SIZE 128
+
+void vmm_fence_process(struct hypctx *hypctx);
+void vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence);
+
+#endif /* !_VMM_FENCE_H_ */
Index: sys/riscv/vmm/vmm_fence.c
===================================================================
--- /dev/null
+++ sys/riscv/vmm/vmm_fence.c
@@ -0,0 +1,209 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This software was developed by the University of Cambridge Computer
+ * Laboratory (Department of Computer Science and Technology) under Innovate
+ * UK project 105694, "Digital Security by Design (DSbD) Technology Platform
+ * Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/smp.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/bus.h>
+
+#include "riscv.h"
+#include "vmm_fence.h"
+
+static int
+vmm_fence_dequeue(struct hypctx *hypctx, struct vmm_fence *new_fence)
+{
+ struct vmm_fence *queue;
+ struct vmm_fence *fence;
+
+ mtx_lock_spin(&hypctx->fence_queue_mtx);
+ queue = hypctx->fence_queue;
+ fence = &queue[hypctx->fence_queue_tail];
+ if (fence->type != VMM_RISCV_FENCE_UNK) {
+ memcpy(new_fence, fence, sizeof(struct vmm_fence));
+ fence->type = VMM_RISCV_FENCE_UNK;
+ hypctx->fence_queue_tail += 1;
+ if (hypctx->fence_queue_tail == VMM_FENCE_QUEUE_SIZE)
+ hypctx->fence_queue_tail = 0;
+ } else {
+ mtx_unlock_spin(&hypctx->fence_queue_mtx);
+ return (-1);
+ }
+ mtx_unlock_spin(&hypctx->fence_queue_mtx);
+
+ return (0);
+}
+
+static int
+vmm_fence_enqueue(struct hypctx *hypctx, struct vmm_fence *new_fence)
+{
+ struct vmm_fence *queue;
+ struct vmm_fence *fence;
+
+ mtx_lock_spin(&hypctx->fence_queue_mtx);
+ queue = hypctx->fence_queue;
+ fence = &queue[hypctx->fence_queue_head];
+ if (fence->type == VMM_RISCV_FENCE_UNK) {
+ memcpy(fence, new_fence, sizeof(struct vmm_fence));
+ hypctx->fence_queue_head += 1;
+ if (hypctx->fence_queue_head == VMM_FENCE_QUEUE_SIZE)
+ hypctx->fence_queue_head = 0;
+ } else {
+ mtx_unlock_spin(&hypctx->fence_queue_mtx);
+ return (-1);
+ }
+ mtx_unlock_spin(&hypctx->fence_queue_mtx);
+
+ return (0);
+}
+
+static void
+vmm_fence_process_one(struct vmm_fence *fence)
+{
+ uint64_t va;
+
+ KASSERT(fence->type == VMM_RISCV_FENCE_VMA ||
+ fence->type == VMM_RISCV_FENCE_VMA_ASID,
+ ("%s: wrong fence type %d", __func__, fence->type));
+
+ switch (fence->type) {
+ case VMM_RISCV_FENCE_VMA:
+ for (va = fence->start; va < (fence->start + fence->size);
+ va += PAGE_SIZE)
+ sfence_vma_page(va);
+ break;
+ case VMM_RISCV_FENCE_VMA_ASID:
+ if (fence->start == 0 && fence->size == 0)
+ sfence_vma_asid(fence->asid);
+ else {
+ for (va = fence->start;
+ va < (fence->start + fence->size);
+ va += PAGE_SIZE)
+ sfence_vma_asid_page(fence->asid, va);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void
+vmm_fence_process(struct hypctx *hypctx)
+{
+ struct vmm_fence fence;
+ int pending;
+
+ pending = atomic_swap_32(&hypctx->fence_i_req, 0);
+ if (pending)
+ fence_i();
+
+ pending = atomic_swap_32(&hypctx->sfence_vma_req, 0);
+ if (pending)
+ sfence_vma();
+
+ while (vmm_fence_dequeue(hypctx, &fence) == 0)
+ vmm_fence_process_one(&fence);
+}
+
+void
+vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence)
+{
+ struct hypctx *hypctx;
+ cpuset_t running_vcpus;
+ struct vcpu *vcpu;
+ uint16_t maxcpus;
+ int error;
+ int state;
+ int enq;
+ int i;
+
+ CPU_ZERO(&running_vcpus);
+
+ maxcpus = vm_get_maxcpus(vm);
+ for (i = 0; i < maxcpus; i++) {
+ if (!CPU_ISSET(i, cpus))
+ continue;
+ vcpu = vm_vcpu(vm, i);
+ hypctx = vcpu_get_cookie(vcpu);
+
+ enq = 0;
+
+ /* No need to enqueue fences i and vma global. */
+ switch (fence->type) {
+ case VMM_RISCV_FENCE_I:
+ atomic_set_32(&hypctx->fence_i_req, 1);
+ break;
+ case VMM_RISCV_FENCE_VMA:
+ if (fence->start == 0 && fence->size == 0)
+ atomic_set_32(&hypctx->sfence_vma_req, 1);
+ else
+ enq = 1;
+ break;
+ case VMM_RISCV_FENCE_VMA_ASID:
+ enq = 1;
+ break;
+ default:
+ KASSERT(0, ("%s: wrong fence type %d", __func__,
+ fence->type));
+ break;
+ }
+
+ /*
+ * Try to enqueue. In case of failure use more conservative
+ * request.
+ */
+ if (enq) {
+ error = vmm_fence_enqueue(hypctx, fence);
+ if (error)
+ atomic_set_32(&hypctx->sfence_vma_req, 1);
+ }
+
+ mb();
+
+ state = vcpu_get_state(vcpu, NULL);
+ if (state == VCPU_RUNNING)
+ CPU_SET_ATOMIC(i, &running_vcpus);
+ }
+
+ /*
+ * Interrupt other cores. On reception of IPI they will leave guest.
+ * On entry back to the guest they will process fence request.
+ *
+ * If vcpu migrates to another cpu right here, it should process
+ * all fences on entry to the guest as well.
+ */
+ if (!CPU_EMPTY(&running_vcpus))
+ smp_rendezvous_cpus(running_vcpus, NULL, NULL, NULL, NULL);
+}
Index: sys/riscv/vmm/vmm_riscv.c
===================================================================
--- sys/riscv/vmm/vmm_riscv.c
+++ sys/riscv/vmm/vmm_riscv.c
@@ -68,6 +68,7 @@
#include "riscv.h"
#include "vmm_aplic.h"
+#include "vmm_fence.h"
#include "vmm_stat.h"
MALLOC_DEFINE(M_HYP, "RISC-V VMM HYP", "RISC-V VMM HYP");
@@ -212,6 +213,11 @@
hypctx->vcpu = vcpu1;
hypctx->guest_scounteren = HCOUNTEREN_CY | HCOUNTEREN_TM;
+ /* Fence queue. */
+ hypctx->fence_queue = mallocarray(VMM_FENCE_QUEUE_SIZE,
+ sizeof(struct vmm_fence), M_HYP, M_WAITOK | M_ZERO);
+ mtx_init(&hypctx->fence_queue_mtx, "fence queue", NULL, MTX_SPIN);
+
/* sstatus */
hypctx->guest_regs.hyp_sstatus = SSTATUS_SPP | SSTATUS_SPIE;
hypctx->guest_regs.hyp_sstatus |= SSTATUS_FS_INITIAL;
@@ -659,6 +665,7 @@
riscv_set_active_vcpu(hypctx);
aplic_flush_hwstate(hypctx);
riscv_sync_interrupts(hypctx);
+ vmm_fence_process(hypctx);
dprintf("%s: Entering guest VM, vsatp %lx, ss %lx hs %lx\n",
__func__, csr_read(vsatp), hypctx->guest_regs.hyp_sstatus,
@@ -740,6 +747,7 @@
aplic_cpucleanup(hypctx);
+ free(hypctx->fence_queue, M_HYP);
free(hypctx, M_HYP);
}
Index: sys/riscv/vmm/vmm_sbi.c
===================================================================
--- sys/riscv/vmm/vmm_sbi.c
+++ sys/riscv/vmm/vmm_sbi.c
@@ -58,39 +58,72 @@
#include <machine/sbi.h>
#include "riscv.h"
+#include "vmm_fence.h"
static int
vmm_sbi_handle_rfnc(struct vcpu *vcpu, struct hypctx *hypctx)
{
- uint64_t hart_mask __unused;
- uint64_t start __unused;
- uint64_t size __unused;
- uint64_t asid __unused;
+ struct vmm_fence fence;
+ uint64_t hart_mask;
+ uint64_t hart_mask_base;
uint64_t func_id;
+ struct hyp *hyp;
+ uint16_t maxcpus;
+ cpuset_t cpus;
+ int vcpu_id;
+ int ret;
+ int i;
func_id = hypctx->guest_regs.hyp_a[6];
hart_mask = hypctx->guest_regs.hyp_a[0];
- start = hypctx->guest_regs.hyp_a[2];
- size = hypctx->guest_regs.hyp_a[3];
- asid = hypctx->guest_regs.hyp_a[4];
+ hart_mask_base = hypctx->guest_regs.hyp_a[1];
+
+ /* Construct vma_fence. */
- dprintf("%s: %ld hart_mask %lx start %lx size %lx\n", __func__,
- func_id, hart_mask, start, size);
+ fence.start = hypctx->guest_regs.hyp_a[2];
+ fence.size = hypctx->guest_regs.hyp_a[3];
+ fence.asid = hypctx->guest_regs.hyp_a[4];
- /* TODO: implement remote sfence. */
+ ret = 0;
switch (func_id) {
case SBI_RFNC_REMOTE_FENCE_I:
+ fence.type = VMM_RISCV_FENCE_I;
break;
case SBI_RFNC_REMOTE_SFENCE_VMA:
+ fence.type = VMM_RISCV_FENCE_VMA;
break;
case SBI_RFNC_REMOTE_SFENCE_VMA_ASID:
+ fence.type = VMM_RISCV_FENCE_VMA_ASID;
break;
default:
- break;
+ ret = -1;
+ goto done;
}
- hypctx->guest_regs.hyp_a[0] = 0;
+ /* Construct cpuset_t from the mask supplied. */
+
+ CPU_ZERO(&cpus);
+ hyp = hypctx->hyp;
+ maxcpus = vm_get_maxcpus(hyp->vm);
+ for (i = 0; i < maxcpus; i++) {
+ vcpu = vm_vcpu(hyp->vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_id = vcpu_vcpuid(vcpu);
+ if (hart_mask_base != -1UL) {
+ if (vcpu_id < hart_mask_base)
+ continue;
+ if (!(hart_mask & (1UL << (vcpu_id - hart_mask_base))))
+ continue;
+ }
+ CPU_SET_ATOMIC(i, &cpus);
+ }
+
+ vmm_fence_add(hyp->vm, &cpus, &fence);
+
+done:
+ hypctx->guest_regs.hyp_a[0] = ret;
return (0);
}

File Metadata

Mime Type
text/plain
Expires
Sun, Mar 15, 5:11 PM (6 h, 59 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
29724553
Default Alt Text
D48441.id149175.diff (13 KB)

Event Timeline