diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c --- a/usr.sbin/bhyve/bhyverun.c +++ b/usr.sbin/bhyve/bhyverun.c @@ -1445,7 +1445,7 @@ exit(4); } - init_mem(); + init_mem(guest_ncpus); init_inout(); kernemu_dev_init(); init_bootrom(ctx); diff --git a/usr.sbin/bhyve/mem.h b/usr.sbin/bhyve/mem.h --- a/usr.sbin/bhyve/mem.h +++ b/usr.sbin/bhyve/mem.h @@ -52,7 +52,7 @@ #define MEM_F_RW 0x3 #define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */ -void init_mem(void); +void init_mem(int ncpu); int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie, struct vm_guest_paging *paging); diff --git a/usr.sbin/bhyve/mem.c b/usr.sbin/bhyve/mem.c --- a/usr.sbin/bhyve/mem.c +++ b/usr.sbin/bhyve/mem.c @@ -68,7 +68,8 @@ * consecutive addresses in a range, it makes sense to cache the * result of a lookup. */ -static struct mmio_rb_range *mmio_hint[VM_MAXCPU]; +static struct mmio_rb_range **mmio_hint; +static int mmio_ncpu; static pthread_rwlock_t mmio_rwlock; @@ -352,7 +353,7 @@ RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); /* flush Per-vCPU cache */ - for (i=0; i < VM_MAXCPU; i++) { + for (i = 0; i < mmio_ncpu; i++) { if (mmio_hint[i] == entry) mmio_hint[i] = NULL; } @@ -367,9 +368,11 @@ } void -init_mem(void) +init_mem(int ncpu) { + mmio_ncpu = ncpu; + mmio_hint = calloc(ncpu, sizeof(*mmio_hint)); RB_INIT(&mmio_rb_root); RB_INIT(&mmio_rb_fallback); pthread_rwlock_init(&mmio_rwlock, NULL);