diff --git a/sys/dev/hwt/hwt_ioctl.c b/sys/dev/hwt/hwt_ioctl.c
index 04133838d292..592db4931bb4 100644
--- a/sys/dev/hwt/hwt_ioctl.c
+++ b/sys/dev/hwt/hwt_ioctl.c
@@ -1,443 +1,445 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023-2025 Ruslan Bukin
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Hardware Trace (HWT) framework. */
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define HWT_IOCTL_DEBUG
#undef HWT_IOCTL_DEBUG
#ifdef HWT_IOCTL_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
/* No real reason for these limitations just sanity checks. */
#define HWT_MAXBUFSIZE (32UL * 1024 * 1024 * 1024) /* 32 GB */
static MALLOC_DEFINE(M_HWT_IOCTL, "hwt_ioctl", "Hardware Trace");
/*
* Check if owner process *o can trace target process *t.
*/
static int
hwt_priv_check(struct proc *o, struct proc *t)
{
struct ucred *oc, *tc;
int error;
int i;
PROC_LOCK(o);
oc = o->p_ucred;
crhold(oc);
PROC_UNLOCK(o);
PROC_LOCK_ASSERT(t, MA_OWNED);
tc = t->p_ucred;
crhold(tc);
error = 0;
/*
* The effective uid of the HWT owner should match at least one
* of the effective / real / saved uids of the target process.
*/
if (oc->cr_uid != tc->cr_uid &&
oc->cr_uid != tc->cr_svuid &&
oc->cr_uid != tc->cr_ruid) {
error = EPERM;
goto done;
}
/*
* Everyone of the target's group ids must be in the owner's
* group list.
*/
for (i = 0; i < tc->cr_ngroups; i++)
if (!groupmember(tc->cr_groups[i], oc)) {
error = EPERM;
goto done;
}
/* Check the read and saved GIDs too. */
if (!groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc)) {
error = EPERM;
goto done;
}
done:
crfree(tc);
crfree(oc);
return (error);
}
static int
hwt_ioctl_alloc_mode_thread(struct thread *td, struct hwt_owner *ho,
struct hwt_backend *backend, struct hwt_alloc *halloc)
{
struct thread **threads, *td1;
struct hwt_record_entry *entry;
struct hwt_context *ctx, *ctx1;
struct hwt_thread *thr;
char path[MAXPATHLEN];
struct proc *p;
int thread_id;
int error;
int cnt;
int i;
/* Check if the owner have this pid configured already. */
ctx = hwt_owner_lookup_ctx(ho, halloc->pid);
if (ctx)
return (EEXIST);
/* Allocate a new HWT context. */
error = hwt_ctx_alloc(&ctx);
if (error)
return (error);
ctx->bufsize = halloc->bufsize;
ctx->pid = halloc->pid;
ctx->hwt_backend = backend;
ctx->hwt_owner = ho;
ctx->mode = HWT_MODE_THREAD;
ctx->hwt_td = td;
ctx->kqueue_fd = halloc->kqueue_fd;
error = copyout(&ctx->ident, halloc->ident, sizeof(int));
if (error) {
hwt_ctx_free(ctx);
return (error);
}
/* Now get the victim proc. */
p = pfind(halloc->pid);
if (p == NULL) {
hwt_ctx_free(ctx);
return (ENXIO);
}
/* Ensure we can trace it. */
error = hwt_priv_check(td->td_proc, p);
if (error) {
PROC_UNLOCK(p);
hwt_ctx_free(ctx);
return (error);
}
/* Ensure it is not being traced already. */
ctx1 = hwt_contexthash_lookup(p);
if (ctx1) {
refcount_release(&ctx1->refcnt);
PROC_UNLOCK(p);
hwt_ctx_free(ctx);
return (EEXIST);
}
/* Allocate hwt threads and buffers. */
cnt = 0;
FOREACH_THREAD_IN_PROC(p, td1) {
cnt += 1;
}
KASSERT(cnt > 0, ("no threads"));
threads = malloc(sizeof(struct thread *) * cnt, M_HWT_IOCTL,
M_NOWAIT | M_ZERO);
if (threads == NULL) {
PROC_UNLOCK(p);
hwt_ctx_free(ctx);
return (ENOMEM);
}
i = 0;
FOREACH_THREAD_IN_PROC(p, td1) {
threads[i++] = td1;
}
ctx->proc = p;
PROC_UNLOCK(p);
for (i = 0; i < cnt; i++) {
thread_id = atomic_fetchadd_int(&ctx->thread_counter, 1);
sprintf(path, "hwt_%d_%d", ctx->ident, thread_id);
error = hwt_thread_alloc(&thr, path, ctx->bufsize,
ctx->hwt_backend->kva_req);
if (error) {
free(threads, M_HWT_IOCTL);
hwt_ctx_free(ctx);
return (error);
}
/* Allocate backend-specific thread data. */
error = hwt_backend_thread_alloc(ctx, thr);
if (error != 0) {
dprintf("%s: failed to allocate thread backend data\n",
__func__);
free(threads, M_HWT_IOCTL);
hwt_ctx_free(ctx);
return (error);
}
/*
* Insert a THREAD_CREATE record so userspace picks up
* the thread's tracing buffers.
*/
entry = hwt_record_entry_alloc();
entry->record_type = HWT_RECORD_THREAD_CREATE;
entry->thread_id = thread_id;
thr->vm->ctx = ctx;
thr->td = threads[i];
thr->ctx = ctx;
thr->backend = ctx->hwt_backend;
thr->thread_id = thread_id;
HWT_CTX_LOCK(ctx);
hwt_thread_insert(ctx, thr, entry);
HWT_CTX_UNLOCK(ctx);
}
free(threads, M_HWT_IOCTL);
error = hwt_backend_init(ctx);
if (error) {
hwt_ctx_free(ctx);
return (error);
}
/* hwt_owner_insert_ctx? */
mtx_lock(&ho->mtx);
LIST_INSERT_HEAD(&ho->hwts, ctx, next_hwts);
mtx_unlock(&ho->mtx);
/*
* Hooks are now in action after this, but the ctx is not in RUNNING
* state.
*/
hwt_contexthash_insert(ctx);
p = pfind(halloc->pid);
if (p) {
p->p_flag2 |= P2_HWT;
PROC_UNLOCK(p);
}
return (0);
}
static int
hwt_ioctl_alloc_mode_cpu(struct thread *td, struct hwt_owner *ho,
struct hwt_backend *backend, struct hwt_alloc *halloc)
{
struct hwt_context *ctx;
struct hwt_cpu *cpu;
struct hwt_vm *vm;
char path[MAXPATHLEN];
size_t cpusetsize;
cpuset_t cpu_map;
int cpu_count = 0;
int cpu_id;
int error;
CPU_ZERO(&cpu_map);
cpusetsize = min(halloc->cpusetsize, sizeof(cpuset_t));
error = copyin(halloc->cpu_map, &cpu_map, cpusetsize);
if (error)
return (error);
CPU_FOREACH_ISSET(cpu_id, &cpu_map) {
+#ifdef SMP
/* Ensure CPU is not halted. */
if (CPU_ISSET(cpu_id, &hlt_cpus_mask))
return (ENXIO);
+#endif
#if 0
/* TODO: Check if the owner have this cpu configured already. */
ctx = hwt_owner_lookup_ctx_by_cpu(ho, halloc->cpu);
if (ctx)
return (EEXIST);
#endif
cpu_count++;
}
if (cpu_count == 0)
return (ENODEV);
/* Allocate a new HWT context. */
error = hwt_ctx_alloc(&ctx);
if (error)
return (error);
ctx->bufsize = halloc->bufsize;
ctx->hwt_backend = backend;
ctx->hwt_owner = ho;
ctx->mode = HWT_MODE_CPU;
ctx->cpu_map = cpu_map;
ctx->hwt_td = td;
ctx->kqueue_fd = halloc->kqueue_fd;
error = copyout(&ctx->ident, halloc->ident, sizeof(int));
if (error) {
hwt_ctx_free(ctx);
return (error);
}
CPU_FOREACH_ISSET(cpu_id, &cpu_map) {
sprintf(path, "hwt_%d_%d", ctx->ident, cpu_id);
error = hwt_vm_alloc(ctx->bufsize, ctx->hwt_backend->kva_req,
path, &vm);
if (error) {
/* TODO: remove all allocated cpus. */
hwt_ctx_free(ctx);
return (error);
}
cpu = hwt_cpu_alloc();
cpu->cpu_id = cpu_id;
cpu->vm = vm;
vm->cpu = cpu;
vm->ctx = ctx;
HWT_CTX_LOCK(ctx);
hwt_cpu_insert(ctx, cpu);
HWT_CTX_UNLOCK(ctx);
}
error = hwt_backend_init(ctx);
if (error) {
/* TODO: remove all allocated cpus. */
hwt_ctx_free(ctx);
return (error);
}
/* hwt_owner_insert_ctx? */
mtx_lock(&ho->mtx);
LIST_INSERT_HEAD(&ho->hwts, ctx, next_hwts);
mtx_unlock(&ho->mtx);
hwt_record_kernel_objects(ctx);
return (0);
}
static int
hwt_ioctl_alloc(struct thread *td, struct hwt_alloc *halloc)
{
char backend_name[HWT_BACKEND_MAXNAMELEN];
struct hwt_backend *backend;
struct hwt_owner *ho;
int error;
if (halloc->bufsize > HWT_MAXBUFSIZE)
return (EINVAL);
if (halloc->bufsize % PAGE_SIZE)
return (EINVAL);
if (halloc->backend_name == NULL)
return (EINVAL);
error = copyinstr(halloc->backend_name, (void *)backend_name,
HWT_BACKEND_MAXNAMELEN, NULL);
if (error)
return (error);
backend = hwt_backend_lookup(backend_name);
if (backend == NULL)
return (ENODEV);
/* First get the owner. */
ho = hwt_ownerhash_lookup(td->td_proc);
if (ho == NULL) {
/* Create a new owner. */
ho = hwt_owner_alloc(td->td_proc);
if (ho == NULL)
return (ENOMEM);
hwt_ownerhash_insert(ho);
}
switch (halloc->mode) {
case HWT_MODE_THREAD:
error = hwt_ioctl_alloc_mode_thread(td, ho, backend, halloc);
break;
case HWT_MODE_CPU:
error = hwt_ioctl_alloc_mode_cpu(td, ho, backend, halloc);
break;
default:
error = ENXIO;
};
return (error);
}
int
hwt_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
struct thread *td)
{
int error;
switch (cmd) {
case HWT_IOC_ALLOC:
/* Allocate HWT context. */
error = hwt_ioctl_alloc(td, (struct hwt_alloc *)addr);
return (error);
default:
return (ENXIO);
};
}
diff --git a/sys/dev/hwt/hwt_vm.c b/sys/dev/hwt/hwt_vm.c
index a6799c30300d..6c55e218dcec 100644
--- a/sys/dev/hwt/hwt_vm.c
+++ b/sys/dev/hwt/hwt_vm.c
@@ -1,501 +1,503 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2023-2025 Ruslan Bukin
*
* This work was supported by Innovate UK project 105694, "Digital Security
* by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#define HWT_THREAD_DEBUG
#undef HWT_THREAD_DEBUG
#ifdef HWT_THREAD_DEBUG
#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
#else
#define dprintf(fmt, ...)
#endif
static MALLOC_DEFINE(M_HWT_VM, "hwt_vm", "Hardware Trace");
static int
hwt_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
int prot, vm_page_t *mres)
{
return (0);
}
static int
hwt_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
*color = 0;
return (0);
}
static void
hwt_vm_dtor(void *handle)
{
}
static struct cdev_pager_ops hwt_vm_pager_ops = {
.cdev_pg_fault = hwt_vm_fault,
.cdev_pg_ctor = hwt_vm_ctor,
.cdev_pg_dtor = hwt_vm_dtor
};
static int
hwt_vm_alloc_pages(struct hwt_vm *vm, int kva_req)
{
vm_paddr_t low, high, boundary;
vm_memattr_t memattr;
#ifdef __aarch64__
uintptr_t va;
#endif
int alignment;
vm_page_t m;
int pflags;
int tries;
int i;
alignment = PAGE_SIZE;
low = 0;
high = -1UL;
boundary = 0;
pflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
memattr = VM_MEMATTR_DEVICE;
if (kva_req) {
vm->kvaddr = kva_alloc(vm->npages * PAGE_SIZE);
if (!vm->kvaddr)
return (ENOMEM);
}
vm->obj = cdev_pager_allocate(vm, OBJT_MGTDEVICE,
&hwt_vm_pager_ops, vm->npages * PAGE_SIZE, PROT_READ, 0,
curthread->td_ucred);
for (i = 0; i < vm->npages; i++) {
tries = 0;
retry:
m = vm_page_alloc_noobj_contig(pflags, 1, low, high,
alignment, boundary, memattr);
if (m == NULL) {
if (tries < 3) {
if (!vm_page_reclaim_contig(pflags, 1, low,
high, alignment, boundary))
vm_wait(NULL);
tries++;
goto retry;
}
return (ENOMEM);
}
#if 0
/* TODO: could not clean device memory on arm64. */
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
#endif
#ifdef __aarch64__
va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
cpu_dcache_wb_range((void *)va, PAGE_SIZE);
#endif
m->valid = VM_PAGE_BITS_ALL;
m->oflags &= ~VPO_UNMANAGED;
m->flags |= PG_FICTITIOUS;
vm->pages[i] = m;
VM_OBJECT_WLOCK(vm->obj);
vm_page_insert(m, vm->obj, i);
if (kva_req)
pmap_qenter(vm->kvaddr + i * PAGE_SIZE, &m, 1);
VM_OBJECT_WUNLOCK(vm->obj);
}
return (0);
}
static int
hwt_vm_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
{
dprintf("%s\n", __func__);
return (0);
}
static int
hwt_vm_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
vm_size_t mapsize, struct vm_object **objp, int nprot)
{
struct hwt_vm *vm;
vm = cdev->si_drv1;
if (nprot != PROT_READ || *offset != 0)
return (ENXIO);
vm_object_reference(vm->obj);
*objp = vm->obj;
return (0);
}
static void
hwt_vm_start_cpu_mode(struct hwt_context *ctx)
{
cpuset_t enable_cpus;
int cpu_id;
CPU_ZERO(&enable_cpus);
CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) {
+#ifdef SMP
/* Ensure CPU is not halted. */
if (CPU_ISSET(cpu_id, &hlt_cpus_mask))
continue;
+#endif
hwt_backend_configure(ctx, cpu_id, cpu_id);
CPU_SET(cpu_id, &enable_cpus);
}
if (ctx->hwt_backend->ops->hwt_backend_enable_smp == NULL) {
CPU_FOREACH_ISSET(cpu_id, &enable_cpus)
hwt_backend_enable(ctx, cpu_id);
} else {
/* Some backends require enabling all CPUs at once. */
hwt_backend_enable_smp(ctx);
}
}
static int
hwt_vm_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
struct thread *td)
{
struct hwt_record_get *rget;
struct hwt_set_config *sconf;
struct hwt_bufptr_get *ptr_get;
struct hwt_svc_buf *sbuf;
struct hwt_context *ctx;
struct hwt_vm *vm;
struct hwt_owner *ho;
vm_offset_t offset;
int ident;
int error;
uint64_t data = 0;
void *data2;
size_t data_size;
int data_version;
vm = dev->si_drv1;
KASSERT(vm != NULL, ("si_drv1 is NULL"));
ctx = vm->ctx;
/* Ensure process is registered owner of this HWT. */
ho = hwt_ownerhash_lookup(td->td_proc);
if (ho == NULL)
return (ENXIO);
if (ctx->hwt_owner != ho)
return (EPERM);
switch (cmd) {
case HWT_IOC_START:
dprintf("%s: start tracing\n", __func__);
HWT_CTX_LOCK(ctx);
if (ctx->state == CTX_STATE_RUNNING) {
/* Already running ? */
HWT_CTX_UNLOCK(ctx);
return (ENXIO);
}
ctx->state = CTX_STATE_RUNNING;
HWT_CTX_UNLOCK(ctx);
if (ctx->mode == HWT_MODE_CPU)
hwt_vm_start_cpu_mode(ctx);
else {
/*
* Tracing backend will be configured and enabled
* during hook invocation. See hwt_hook.c.
*/
}
break;
case HWT_IOC_STOP:
if (ctx->state == CTX_STATE_STOPPED)
return (ENXIO);
hwt_backend_stop(ctx);
ctx->state = CTX_STATE_STOPPED;
break;
case HWT_IOC_RECORD_GET:
rget = (struct hwt_record_get *)addr;
error = hwt_record_send(ctx, rget);
if (error)
return (error);
break;
case HWT_IOC_SET_CONFIG:
if (ctx->state == CTX_STATE_RUNNING) {
return (ENXIO);
}
sconf = (struct hwt_set_config *)addr;
error = hwt_config_set(td, ctx, sconf);
if (error)
return (error);
ctx->pause_on_mmap = sconf->pause_on_mmap ? 1 : 0;
break;
case HWT_IOC_WAKEUP:
if (ctx->mode == HWT_MODE_CPU)
return (ENXIO);
KASSERT(vm->thr != NULL, ("thr is NULL"));
wakeup(vm->thr);
break;
case HWT_IOC_BUFPTR_GET:
ptr_get = (struct hwt_bufptr_get *)addr;
error = hwt_backend_read(ctx, vm, &ident, &offset, &data);
if (error)
return (error);
if (ptr_get->ident)
error = copyout(&ident, ptr_get->ident, sizeof(int));
if (error)
return (error);
if (ptr_get->offset)
error = copyout(&offset, ptr_get->offset,
sizeof(vm_offset_t));
if (error)
return (error);
if (ptr_get->data)
error = copyout(&data, ptr_get->data, sizeof(uint64_t));
if (error)
return (error);
break;
case HWT_IOC_SVC_BUF:
if (ctx->state == CTX_STATE_STOPPED) {
return (ENXIO);
}
sbuf = (struct hwt_svc_buf *)addr;
data_size = sbuf->data_size;
data_version = sbuf->data_version;
if (data_size == 0 || data_size > PAGE_SIZE)
return (EINVAL);
data2 = malloc(data_size, M_HWT_VM, M_WAITOK | M_ZERO);
error = copyin(sbuf->data, data2, data_size);
if (error) {
free(data2, M_HWT_VM);
return (error);
}
error = hwt_backend_svc_buf(ctx, data2, data_size, data_version);
if (error) {
free(data2, M_HWT_VM);
return (error);
}
free(data2, M_HWT_VM);
break;
default:
break;
}
return (0);
}
static struct cdevsw hwt_vm_cdevsw = {
.d_version = D_VERSION,
.d_name = "hwt",
.d_open = hwt_vm_open,
.d_mmap_single = hwt_vm_mmap_single,
.d_ioctl = hwt_vm_ioctl,
};
static int
hwt_vm_create_cdev(struct hwt_vm *vm, char *path)
{
struct make_dev_args args;
int error;
dprintf("%s: path %s\n", __func__, path);
make_dev_args_init(&args);
args.mda_devsw = &hwt_vm_cdevsw;
args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
args.mda_uid = UID_ROOT;
args.mda_gid = GID_WHEEL;
args.mda_mode = 0660;
args.mda_si_drv1 = vm;
error = make_dev_s(&args, &vm->cdev, "%s", path);
if (error != 0)
return (error);
return (0);
}
static int
hwt_vm_alloc_buffers(struct hwt_vm *vm, int kva_req)
{
int error;
vm->pages = malloc(sizeof(struct vm_page *) * vm->npages,
M_HWT_VM, M_WAITOK | M_ZERO);
error = hwt_vm_alloc_pages(vm, kva_req);
if (error) {
printf("%s: could not alloc pages\n", __func__);
return (error);
}
return (0);
}
static void
hwt_vm_destroy_buffers(struct hwt_vm *vm)
{
vm_page_t m;
int i;
if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != 0) {
pmap_qremove(vm->kvaddr, vm->npages);
kva_free(vm->kvaddr, vm->npages * PAGE_SIZE);
}
VM_OBJECT_WLOCK(vm->obj);
for (i = 0; i < vm->npages; i++) {
m = vm->pages[i];
if (m == NULL)
break;
vm_page_busy_acquire(m, 0);
cdev_pager_free_page(vm->obj, m);
m->flags &= ~PG_FICTITIOUS;
vm_page_unwire_noq(m);
vm_page_free(m);
}
vm_pager_deallocate(vm->obj);
VM_OBJECT_WUNLOCK(vm->obj);
free(vm->pages, M_HWT_VM);
}
void
hwt_vm_free(struct hwt_vm *vm)
{
dprintf("%s\n", __func__);
if (vm->cdev)
destroy_dev_sched(vm->cdev);
hwt_vm_destroy_buffers(vm);
free(vm, M_HWT_VM);
}
int
hwt_vm_alloc(size_t bufsize, int kva_req, char *path, struct hwt_vm **vm0)
{
struct hwt_vm *vm;
int error;
vm = malloc(sizeof(struct hwt_vm), M_HWT_VM, M_WAITOK | M_ZERO);
vm->npages = bufsize / PAGE_SIZE;
error = hwt_vm_alloc_buffers(vm, kva_req);
if (error) {
free(vm, M_HWT_VM);
return (error);
}
error = hwt_vm_create_cdev(vm, path);
if (error) {
hwt_vm_free(vm);
return (error);
}
*vm0 = vm;
return (0);
}