Changeset View
Changeset View
Standalone View
Standalone View
head/usr.sbin/bhyve/mem.c
Show First 20 Lines • Show All 117 Lines • ▼ Show 20 Lines | #endif | ||||
return (0); | return (0); | ||||
} | } | ||||
#if 0 | #if 0 | ||||
static void | static void | ||||
mmio_rb_dump(struct mmio_rb_tree *rbt) | mmio_rb_dump(struct mmio_rb_tree *rbt) | ||||
{ | { | ||||
int perror; | |||||
struct mmio_rb_range *np; | struct mmio_rb_range *np; | ||||
pthread_rwlock_rdlock(&mmio_rwlock); | pthread_rwlock_rdlock(&mmio_rwlock); | ||||
RB_FOREACH(np, mmio_rb_tree, rbt) { | RB_FOREACH(np, mmio_rb_tree, rbt) { | ||||
printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, | printf(" %lx:%lx, %s\n", np->mr_base, np->mr_end, | ||||
np->mr_param.name); | np->mr_param.name); | ||||
} | } | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
} | } | ||||
#endif | #endif | ||||
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); | RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare); | ||||
typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, | typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa, | ||||
struct mem_range *mr, void *arg); | struct mem_range *mr, void *arg); | ||||
Show All 19 Lines | mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg) | ||||
return (error); | return (error); | ||||
} | } | ||||
static int | static int | ||||
access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, | access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb, | ||||
void *arg) | void *arg) | ||||
{ | { | ||||
struct mmio_rb_range *entry; | struct mmio_rb_range *entry; | ||||
int err, immutable; | int err, perror, immutable; | ||||
pthread_rwlock_rdlock(&mmio_rwlock); | pthread_rwlock_rdlock(&mmio_rwlock); | ||||
/* | /* | ||||
* First check the per-vCPU cache | * First check the per-vCPU cache | ||||
*/ | */ | ||||
if (mmio_hint[vcpu] && | if (mmio_hint[vcpu] && | ||||
paddr >= mmio_hint[vcpu]->mr_base && | paddr >= mmio_hint[vcpu]->mr_base && | ||||
paddr <= mmio_hint[vcpu]->mr_end) { | paddr <= mmio_hint[vcpu]->mr_end) { | ||||
entry = mmio_hint[vcpu]; | entry = mmio_hint[vcpu]; | ||||
} else | } else | ||||
entry = NULL; | entry = NULL; | ||||
if (entry == NULL) { | if (entry == NULL) { | ||||
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { | if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) { | ||||
/* Update the per-vCPU cache */ | /* Update the per-vCPU cache */ | ||||
mmio_hint[vcpu] = entry; | mmio_hint[vcpu] = entry; | ||||
} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { | } else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) { | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
return (ESRCH); | return (ESRCH); | ||||
} | } | ||||
} | } | ||||
assert(entry != NULL); | assert(entry != NULL); | ||||
/* | /* | ||||
* An 'immutable' memory range is guaranteed to be never removed | * An 'immutable' memory range is guaranteed to be never removed | ||||
* so there is no need to hold 'mmio_rwlock' while calling the | * so there is no need to hold 'mmio_rwlock' while calling the | ||||
* handler. | * handler. | ||||
* | * | ||||
* XXX writes to the PCIR_COMMAND register can cause register_mem() | * XXX writes to the PCIR_COMMAND register can cause register_mem() | ||||
* to be called. If the guest is using PCI extended config space | * to be called. If the guest is using PCI extended config space | ||||
* to modify the PCIR_COMMAND register then register_mem() can | * to modify the PCIR_COMMAND register then register_mem() can | ||||
* deadlock on 'mmio_rwlock'. However by registering the extended | * deadlock on 'mmio_rwlock'. However by registering the extended | ||||
* config space window as 'immutable' the deadlock can be avoided. | * config space window as 'immutable' the deadlock can be avoided. | ||||
*/ | */ | ||||
immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); | immutable = (entry->mr_param.flags & MEM_F_IMMUTABLE); | ||||
if (immutable) | if (immutable) { | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
} | |||||
err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); | err = cb(ctx, vcpu, paddr, &entry->mr_param, arg); | ||||
if (!immutable) | if (!immutable) { | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
} | |||||
return (err); | return (err); | ||||
} | } | ||||
struct emulate_mem_args { | struct emulate_mem_args { | ||||
struct vie *vie; | struct vie *vie; | ||||
struct vm_guest_paging *paging; | struct vm_guest_paging *paging; | ||||
}; | }; | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size) | ||||
rma.size = size; | rma.size = size; | ||||
return (access_memory(ctx, vcpu, gpa, read_mem_cb, &rma)); | return (access_memory(ctx, vcpu, gpa, read_mem_cb, &rma)); | ||||
} | } | ||||
static int | static int | ||||
register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) | register_mem_int(struct mmio_rb_tree *rbt, struct mem_range *memp) | ||||
{ | { | ||||
struct mmio_rb_range *entry, *mrp; | struct mmio_rb_range *entry, *mrp; | ||||
int err; | int err, perror; | ||||
err = 0; | err = 0; | ||||
mrp = malloc(sizeof(struct mmio_rb_range)); | mrp = malloc(sizeof(struct mmio_rb_range)); | ||||
if (mrp != NULL) { | if (mrp != NULL) { | ||||
mrp->mr_param = *memp; | mrp->mr_param = *memp; | ||||
mrp->mr_base = memp->base; | mrp->mr_base = memp->base; | ||||
mrp->mr_end = memp->base + memp->size - 1; | mrp->mr_end = memp->base + memp->size - 1; | ||||
pthread_rwlock_wrlock(&mmio_rwlock); | pthread_rwlock_wrlock(&mmio_rwlock); | ||||
if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) | if (mmio_rb_lookup(rbt, memp->base, &entry) != 0) | ||||
err = mmio_rb_add(rbt, mrp); | err = mmio_rb_add(rbt, mrp); | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
if (err) | if (err) | ||||
free(mrp); | free(mrp); | ||||
} else | } else | ||||
err = ENOMEM; | err = ENOMEM; | ||||
return (err); | return (err); | ||||
} | } | ||||
Show All 11 Lines | register_mem_fallback(struct mem_range *memp) | ||||
return (register_mem_int(&mmio_rb_fallback, memp)); | return (register_mem_int(&mmio_rb_fallback, memp)); | ||||
} | } | ||||
int | int | ||||
unregister_mem(struct mem_range *memp) | unregister_mem(struct mem_range *memp) | ||||
{ | { | ||||
struct mem_range *mr; | struct mem_range *mr; | ||||
struct mmio_rb_range *entry = NULL; | struct mmio_rb_range *entry = NULL; | ||||
int err, i; | int err, perror, i; | ||||
pthread_rwlock_wrlock(&mmio_rwlock); | pthread_rwlock_wrlock(&mmio_rwlock); | ||||
err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); | err = mmio_rb_lookup(&mmio_rb_root, memp->base, &entry); | ||||
if (err == 0) { | if (err == 0) { | ||||
mr = &entry->mr_param; | mr = &entry->mr_param; | ||||
assert(mr->name == memp->name); | assert(mr->name == memp->name); | ||||
assert(mr->base == memp->base && mr->size == memp->size); | assert(mr->base == memp->base && mr->size == memp->size); | ||||
assert((mr->flags & MEM_F_IMMUTABLE) == 0); | assert((mr->flags & MEM_F_IMMUTABLE) == 0); | ||||
RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); | RB_REMOVE(mmio_rb_tree, &mmio_rb_root, entry); | ||||
/* flush Per-vCPU cache */ | /* flush Per-vCPU cache */ | ||||
for (i=0; i < VM_MAXCPU; i++) { | for (i=0; i < VM_MAXCPU; i++) { | ||||
if (mmio_hint[i] == entry) | if (mmio_hint[i] == entry) | ||||
mmio_hint[i] = NULL; | mmio_hint[i] = NULL; | ||||
} | } | ||||
} | } | ||||
pthread_rwlock_unlock(&mmio_rwlock); | perror = pthread_rwlock_unlock(&mmio_rwlock); | ||||
assert(perror == 0); | |||||
if (entry) | if (entry) | ||||
free(entry); | free(entry); | ||||
return (err); | return (err); | ||||
} | } | ||||
void | void | ||||
init_mem(void) | init_mem(void) | ||||
{ | { | ||||
RB_INIT(&mmio_rb_root); | RB_INIT(&mmio_rb_root); | ||||
RB_INIT(&mmio_rb_fallback); | RB_INIT(&mmio_rb_fallback); | ||||
pthread_rwlock_init(&mmio_rwlock, NULL); | pthread_rwlock_init(&mmio_rwlock, NULL); | ||||
} | } |