Changeset View
Changeset View
Standalone View
Standalone View
sys/arm/vmm/vgic.c
Property | Old Value | New Value |
---|---|---|
svn:eol-style | null | native \ No newline at end of property |
svn:keywords | null | FreeBSD=%H \ No newline at end of property |
svn:mime-type | null | text/plain \ No newline at end of property |
/* | |||||
* Copyright (C) 2017 Nicolae-Alexandru Ivan <alexnivan@gmail.com> | |||||
* Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com> | |||||
* All rights reserved. | |||||
* | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions | |||||
* are met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in the | |||||
* documentation and/or other materials provided with the distribution. | |||||
* | |||||
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | |||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||||
* SUCH DAMAGE. | |||||
*/ | |||||
#include <sys/cdefs.h> | |||||
#include <sys/types.h> | |||||
#include <sys/errno.h> | |||||
#include <sys/systm.h> | |||||
#include <sys/bus.h> | |||||
#include <sys/rman.h> | |||||
#include <sys/malloc.h> | |||||
#include <sys/mutex.h> | |||||
#include <sys/smp.h> | |||||
#include <vm/vm.h> | |||||
#include <vm/pmap.h> | |||||
#include <dev/ofw/openfirm.h> | |||||
#include <machine/bus.h> | |||||
#include <machine/bitops.h> | |||||
#include <machine/param.h> | |||||
#include <machine/cpufunc.h> | |||||
#include <machine/pmap.h> | |||||
#include <machine/vmparam.h> | |||||
#include <machine/intr.h> | |||||
#include <machine/vmm.h> | |||||
#include <machine/vmm_instruction_emul.h> | |||||
#include <arm/arm/gic.h> | |||||
#include <arm/arm/gic_common.h> | |||||
#include "hyp.h" | |||||
#include "mmu.h" | |||||
#include "vgic.h" | |||||
#include "arm.h" | |||||
static struct arm_gic_softc *gic_sc; | |||||
static uint64_t virtual_int_ctrl_vaddr; | |||||
static uint64_t virtual_int_ctrl_paddr; | |||||
static uint32_t virtual_int_ctrl_size; | |||||
static uint64_t virtual_cpu_int_paddr; | |||||
static uint32_t virtual_cpu_int_size; | |||||
static uint32_t lr_num; | |||||
static struct resource_spec arm_vgic_spec[] = { | |||||
{ SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Virtual Interface Control */ | |||||
{ SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Virtual CPU interface */ | |||||
{ SYS_RES_IRQ, 0, RF_ACTIVE }, /* vGIC maintenance interrupt */ | |||||
{ -1, 0 } | |||||
}; | |||||
#define VIRTUAL_INTERFACE_CONTROL 0 | |||||
#define VIRTUAL_CPU_INTERFACE 1 | |||||
#define MAINTENANCE_INTR 2 | |||||
static struct resource *arm_vgic_res[3]; | |||||
static void *arm_vgic_maintenance_intr_ihl[1]; | |||||
static void vgic_update_state(struct hyp *hyp); | |||||
static void vgic_retire_disabled_irqs(struct hypctx *hypctx); | |||||
static void vgic_dispatch_sgi(struct hypctx *hypctx); | |||||
static uint32_t vgic_dist_conf_expand(uint16_t val) | |||||
{ | |||||
uint32_t res; | |||||
int i; | |||||
res = 0; | |||||
for (i = 0; i < 16; ++i) { | |||||
res |= (val & 1) << (2 * i + 1); | |||||
val = val >> 1; | |||||
} | |||||
return res; | |||||
} | |||||
static uint16_t vgic_dist_conf_compress(uint32_t val) | |||||
{ | |||||
uint32_t res; | |||||
int i; | |||||
res = 0; | |||||
for (i = 0; i < 16; ++i) { | |||||
val = val >> 1; | |||||
res |= (val & 1) << i; | |||||
val = val >> 1; | |||||
} | |||||
return res; | |||||
} | |||||
static int | |||||
vgic_dist_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval, int size, | |||||
void *arg) | |||||
{ | |||||
uint64_t offset; | |||||
uint64_t base_offset; | |||||
uint64_t byte_offset; | |||||
uint64_t mask; | |||||
struct hyp *hyp; | |||||
struct vgic_distributor *dist; | |||||
hyp = vm_get_cookie(vm); | |||||
dist = &hyp->vgic_distributor; | |||||
/* offset of distributor register */ | |||||
offset = gpa - dist->distributor_base; | |||||
base_offset = offset - (offset & 3); | |||||
byte_offset = (offset - base_offset) * 8; | |||||
mask = (1 << size * 8) - 1; | |||||
if (base_offset >= GICD_CTLR && base_offset < GICD_TYPER) { | |||||
*rval = (dist->enabled >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_TYPER && base_offset < GICD_IIDR) { | |||||
*rval = (((VGIC_MAXCPU - 1) << 5) | ((VGIC_NR_IRQ / 32) - 1) >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_IIDR && base_offset < GICD_IGROUPR(0)) { | |||||
*rval = (0x0000043B >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_IGROUPR(0) && base_offset < GICD_ISENABLER(0)) { | |||||
/* irq group control is RAZ */ | |||||
*rval = 0; | |||||
} else if (base_offset >= GICD_ISENABLER(0) && base_offset < GICD_ISENABLER(1)) { | |||||
/* private set-enable irq */ | |||||
*rval = (dist->irq_enabled_prv[vcpuid][0] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ISENABLER(1) && base_offset < GICD_ICENABLER(0)) { | |||||
/* shared set-enable irq */ | |||||
*rval = (dist->irq_enabled_shr[(base_offset - GICD_ISENABLER(1)) / sizeof(uint32_t)] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ICENABLER(0) && base_offset < GICD_ICENABLER(1)) { | |||||
/* private clear-enable irq */ | |||||
*rval = (dist->irq_enabled_prv[vcpuid][0] >> byte_offset) & mask; | |||||
} else if (offset >= GICD_ICENABLER(1) && offset < GICD_ISPENDR(0)) { | |||||
/* shared clear-enable irq */ | |||||
*rval = (dist->irq_enabled_shr[(base_offset - GICD_ICENABLER(1)) / sizeof(uint32_t)] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ISPENDR(0) && base_offset < GICD_ISPENDR(1)) { | |||||
/* private set-pending irq */ | |||||
*rval = (dist->irq_state_prv[vcpuid][0] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ISPENDR(1) && base_offset < GICD_ICPENDR(0)) { | |||||
/* shared set-pending irq */ | |||||
*rval = (dist->irq_state_shr[(base_offset - GICD_ISPENDR(1)) / sizeof(uint32_t)] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ICPENDR(0) && base_offset < GICD_ICPENDR(1)) { | |||||
/* private clear-pending irq */ | |||||
*rval = (dist->irq_state_prv[vcpuid][0] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ICPENDR(1) && base_offset < GICD_ICACTIVER(0)) { | |||||
/* shared clear-pending irq */ | |||||
*rval = (dist->irq_state_shr[(base_offset - GICD_ICPENDR(1)) / sizeof(uint32_t)] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ISACTIVER(0) && base_offset < GICD_IPRIORITYR(0)) { | |||||
/* active irq is RAZ */ | |||||
*rval = 0; | |||||
} else if (base_offset >= GICD_ITARGETSR(0) && base_offset < GICD_ITARGETSR(8)) { | |||||
/* target for banked interrupts is read-only and returns the processor reading this register */ | |||||
*rval = (1 << vcpuid); | |||||
*rval |= *rval << 8; | |||||
*rval |= *rval << 16; | |||||
*rval = (*rval >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ITARGETSR(8) && base_offset < GICD_ICFGR(0)) { | |||||
/* target for shared irqs */ | |||||
*rval = (dist->irq_target_shr[(base_offset - GICD_ITARGETSR(8)) / sizeof(uint32_t)] >> byte_offset) & mask; | |||||
} else if (base_offset >= GICD_ICFGR(0) && base_offset < GICD_ICFGR(1)) { | |||||
/* private configure irq */ | |||||
if (offset & 2) { | |||||
*rval = (vgic_dist_conf_expand(dist->irq_conf_prv[vcpuid][0] >> 16) >> byte_offset) & mask; | |||||
} else { | |||||
*rval = (vgic_dist_conf_expand(dist->irq_conf_prv[vcpuid][0] & 0xffff) >> byte_offset) & mask; | |||||
} | |||||
} else if (base_offset >= GICD_ICFGR(1) && base_offset < GICD_SGIR(0)) { | |||||
/* shared configure irq */ | |||||
if (offset & 2) { | |||||
*rval = (vgic_dist_conf_expand(dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] >> 16) >> byte_offset) & mask; | |||||
} else { | |||||
*rval = (vgic_dist_conf_expand(dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] & 0xffff) >> byte_offset) & mask; | |||||
} | |||||
} | |||||
printf("%s on cpu: %d with gpa: %llx size: %x\n", __func__, vcpuid, gpa, size); | |||||
return (0); | |||||
} | |||||
static int | |||||
vgic_dist_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size, | |||||
void *arg) | |||||
{ | |||||
uint64_t offset; | |||||
uint64_t base_offset; | |||||
uint64_t byte_offset; | |||||
uint64_t mask; | |||||
struct hyp *hyp; | |||||
struct vgic_distributor *dist; | |||||
hyp = vm_get_cookie(vm); | |||||
dist = &hyp->vgic_distributor; | |||||
offset = gpa - dist->distributor_base; | |||||
base_offset = offset - (offset & 3); | |||||
byte_offset = (offset - base_offset) * 8; | |||||
mask = (1 << size * 8) - 1; | |||||
if (base_offset >= GICD_CTLR && base_offset < GICD_TYPER) { | |||||
dist->enabled = ((val & mask) << byte_offset) & 1; | |||||
} else if (base_offset >= GICD_IGROUPR(0) && base_offset < GICD_ISENABLER(0)) { | |||||
/* irq group control is WI */ | |||||
} else if (base_offset >= GICD_ISENABLER(0) && base_offset < GICD_ISENABLER(1)) { | |||||
/* private set-enable irq */ | |||||
dist->irq_enabled_prv[vcpuid][0] |= (val & mask) << byte_offset; | |||||
} else if (base_offset >= GICD_ISENABLER(1) && base_offset < GICD_ICENABLER(0)) { | |||||
/* shared set-enable irq */ | |||||
dist->irq_enabled_shr[(base_offset - GICD_ISENABLER(1)) / sizeof(uint32_t)] |= (val & mask) << byte_offset; | |||||
} else if (base_offset >= GICD_ICENABLER(0) && base_offset < GICD_ICENABLER(1)) { | |||||
/* private clear-enable irq */ | |||||
dist->irq_enabled_prv[vcpuid][0] &= ~((val & mask) << byte_offset); | |||||
vgic_retire_disabled_irqs(&hyp->ctx[vcpuid]); | |||||
} else if (offset >= GICD_ICENABLER(1) && offset < GICD_ISPENDR(0)) { | |||||
/* shared clear-enable irq */ | |||||
dist->irq_enabled_shr[(base_offset - GICD_ICENABLER(1)) / sizeof(uint32_t)] &= ~((val & mask) << byte_offset); | |||||
vgic_retire_disabled_irqs(&hyp->ctx[vcpuid]); | |||||
} else if (base_offset >= GICD_ISPENDR(0) && base_offset < GICD_ISPENDR(1)) { | |||||
/* private set-pending irq */ | |||||
dist->irq_state_prv[vcpuid][0] |= (val & mask) << byte_offset; | |||||
} else if (base_offset >= GICD_ISPENDR(1) && base_offset < GICD_ICPENDR(0)) { | |||||
/* shared set-pending irq */ | |||||
dist->irq_state_shr[(base_offset - GICD_ISPENDR(1)) / sizeof(uint32_t)] |= (val & mask) << byte_offset; | |||||
} else if (base_offset >= GICD_ICPENDR(0) && base_offset < GICD_ICPENDR(1)) { | |||||
/* private clear-pending irq */ | |||||
dist->irq_state_prv[vcpuid][0] &= ~((val & mask) << byte_offset); | |||||
} else if (base_offset >= GICD_ICPENDR(1) && base_offset < GICD_ICACTIVER(0)) { | |||||
/* shared clear-pending irq */ | |||||
dist->irq_state_shr[(base_offset - GICD_ICPENDR(1)) / sizeof(uint32_t)] &= ~((val & mask) << byte_offset); | |||||
} else if (base_offset >= GICD_ISACTIVER(0) && base_offset < GICD_IPRIORITYR(0)) { | |||||
/* active irq is WI */ | |||||
} else if (base_offset >= GICD_ITARGETSR(0) && base_offset < GICD_ITARGETSR(8)) { | |||||
/* target for banked interrupts is WI */ | |||||
} else if (base_offset >= GICD_ITARGETSR(8) && base_offset < GICD_ICFGR(0)) { | |||||
/* target for shared irqs */ | |||||
dist->irq_target_shr[(base_offset - GICD_ITARGETSR(8)) / sizeof(uint32_t)] = | |||||
(dist->irq_target_shr[(base_offset - GICD_ITARGETSR(8)) / sizeof(uint32_t)] & ~(mask << byte_offset)) | |||||
| ((val & mask) << byte_offset); | |||||
} else if (base_offset >= GICD_ICFGR(0) && base_offset < GICD_ICFGR(1)) { | |||||
/* private configure irq */ | |||||
if (offset < 4) { | |||||
dist->irq_conf_prv[vcpuid][0] |= ~0U; | |||||
goto end; | |||||
} | |||||
if (offset & 2) { | |||||
val = (vgic_dist_conf_expand(dist->irq_conf_prv[vcpuid][0] >> 16) & ~(mask << byte_offset)) | |||||
| ((val & mask) << byte_offset); | |||||
val = vgic_dist_conf_compress(val); | |||||
dist->irq_conf_prv[vcpuid][0] &= 0xffff; | |||||
dist->irq_conf_prv[vcpuid][0] |= val << 16; | |||||
} else { | |||||
val = (vgic_dist_conf_expand(dist->irq_conf_prv[vcpuid][0] & 0xffff) & ~(mask << byte_offset)) | |||||
| ((val & mask) << byte_offset); | |||||
val = vgic_dist_conf_compress(val); | |||||
dist->irq_conf_prv[vcpuid][0] &= 0xffff << 16; | |||||
dist->irq_conf_prv[vcpuid][0] |= val; | |||||
} | |||||
} else if (base_offset >= GICD_ICFGR(1) && base_offset < GICD_SGIR(0)) { | |||||
/* shared configure irq */ | |||||
if (offset < 4) { | |||||
dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] |= ~0U; | |||||
goto end; | |||||
} | |||||
if (offset & 2) { | |||||
val = (vgic_dist_conf_expand(dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] >> 16) & ~(mask << byte_offset)) | |||||
| ((val & mask) << byte_offset); | |||||
val = vgic_dist_conf_compress(val); | |||||
dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] &= 0xffff; | |||||
dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] |= val << 16; | |||||
} else { | |||||
val = (vgic_dist_conf_expand(dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] & 0xffff) & ~(mask << byte_offset)) | |||||
| ((val & mask) << byte_offset); | |||||
val = vgic_dist_conf_compress(val); | |||||
dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] &= 0xffff << 16; | |||||
dist->irq_conf_shr[(base_offset - GICD_ICFGR(1)) / sizeof(uint32_t) / 2] |= val; | |||||
} | |||||
} else if (base_offset >= GICD_SGIR(0) && base_offset < GICD_SGIR(1)) { | |||||
dist->sgir = (dist->sgir & ~(mask << byte_offset)) | ((val & mask) << byte_offset); | |||||
vgic_dispatch_sgi(&hyp->ctx[vcpuid]); | |||||
} | |||||
end: | |||||
vgic_update_state(hyp); | |||||
printf("%s on cpu: %d with gpa: %llx size: %x with val: %llx\n", __func__, vcpuid, gpa, size, val); | |||||
return (0); | |||||
} | |||||
int | |||||
vgic_emulate_distributor(void *arg, int vcpuid, struct vm_exit *vme, bool *retu) | |||||
{ | |||||
struct hyp *hyp; | |||||
int error; | |||||
hyp = arg; | |||||
if (vme->u.inst_emul.gpa < hyp->vgic_distributor.distributor_base || | |||||
vme->u.inst_emul.gpa > hyp->vgic_distributor.distributor_base + PAGE_SIZE || | |||||
!hyp->vgic_attached) { | |||||
*retu = true; | |||||
return (0); | |||||
} | |||||
*retu = false; | |||||
error = vmm_emulate_instruction(hyp->vm, vcpuid, vme->u.inst_emul.gpa, &vme->u.inst_emul.vie, | |||||
vgic_dist_mmio_read, vgic_dist_mmio_write, retu); | |||||
return (error); | |||||
} | |||||
int | |||||
vgic_attach(void *arg, uint64_t distributor_paddr, uint64_t cpu_int_paddr) | |||||
{ | |||||
struct hyp *hyp; | |||||
struct hypctx *hypctx; | |||||
int i; | |||||
hyp = arg; | |||||
/* | |||||
* Set the distributor address which will be | |||||
* emulated using the MMIO infrasctructure | |||||
* */ | |||||
hyp->vgic_distributor.distributor_base = distributor_paddr; | |||||
hyp->vgic_distributor.cpu_int_base = cpu_int_paddr; | |||||
hyp->vgic_attached = true; | |||||
/* | |||||
* Set the Virtual Interface Control address to | |||||
* save/restore registers at context switch. | |||||
* Also set the number of LRs | |||||
* */ | |||||
for (i = 0; i < VM_MAXCPU; i++) { | |||||
hypctx = &hyp->ctx[i]; | |||||
hypctx->vgic_cpu_int.virtual_int_ctrl = virtual_int_ctrl_vaddr; | |||||
hypctx->vgic_cpu_int.lr_num = lr_num; | |||||
hypctx->vgic_cpu_int.hcr = GICH_HCR_EN; | |||||
hypctx->vgic_cpu_int.vmcr = 0; | |||||
} | |||||
/* Map the CPU Interface over the Virtual CPU Interface */ | |||||
lpae_vmmmap_set(arg, | |||||
(lpae_vm_vaddr_t)cpu_int_paddr, | |||||
(lpae_vm_paddr_t)virtual_cpu_int_paddr, | |||||
virtual_cpu_int_size, | |||||
VM_PROT_READ | VM_PROT_WRITE); | |||||
return (0); | |||||
} | |||||
static int | |||||
vgic_bitmap_get_irq_val(uint32_t *irq_prv, uint32_t *irq_shr, int irq) | |||||
{ | |||||
if (irq < VGIC_NR_PRV_IRQ) | |||||
return test_bit(irq, irq_prv); | |||||
return test_bit(irq - VGIC_NR_PRV_IRQ, irq_shr); | |||||
} | |||||
static void | |||||
vgic_bitmap_set_irq_val(uint32_t *irq_prv, uint32_t *irq_shr, int irq, int val) | |||||
{ | |||||
uint32_t *reg; | |||||
if (irq < VGIC_NR_PRV_IRQ) { | |||||
reg = irq_prv; | |||||
} else { | |||||
reg = irq_shr; | |||||
irq -= VGIC_NR_PRV_IRQ; | |||||
} | |||||
if (val) | |||||
set_bit(irq, reg); | |||||
else | |||||
clear_bit(irq, reg); | |||||
} | |||||
#define VGIC_CFG_LEVEL 0 | |||||
#define VGIC_CFG_EDGE 1 | |||||
static bool | |||||
vgic_irq_is_edge(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
int irq_val; | |||||
irq_val = vgic_bitmap_get_irq_val(vgic_distributor->irq_conf_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_conf_shr, irq); | |||||
return irq_val == VGIC_CFG_EDGE; | |||||
} | |||||
static int | |||||
vgic_irq_is_enabled(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
return vgic_bitmap_get_irq_val(vgic_distributor->irq_enabled_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_enabled_shr, irq); | |||||
} | |||||
static int | |||||
vgic_irq_is_active(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
return vgic_bitmap_get_irq_val(vgic_distributor->irq_active_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_active_shr, irq); | |||||
} | |||||
static void | |||||
vgic_irq_set_active(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
vgic_bitmap_set_irq_val(vgic_distributor->irq_active_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_active_shr, irq, 1); | |||||
} | |||||
static void | |||||
vgic_irq_clear_active(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
vgic_bitmap_set_irq_val(vgic_distributor->irq_active_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_active_shr, irq, 0); | |||||
} | |||||
static int | |||||
vgic_dist_irq_is_pending(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
return vgic_bitmap_get_irq_val(vgic_distributor->irq_state_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_state_shr, irq); | |||||
} | |||||
static void | |||||
vgic_dist_irq_set(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
vgic_bitmap_set_irq_val(vgic_distributor->irq_state_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_state_shr, irq, 1); | |||||
} | |||||
static void | |||||
vgic_dist_irq_clear(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
vgic_bitmap_set_irq_val(vgic_distributor->irq_state_prv[hypctx->vcpu], | |||||
vgic_distributor->irq_state_shr, irq, 0); | |||||
} | |||||
static void | |||||
vgic_cpu_irq_set(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_cpu_int *vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
if (irq < VGIC_NR_PRV_IRQ) | |||||
set_bit(irq, vgic_cpu_int->pending_prv); | |||||
else | |||||
set_bit(irq - VGIC_NR_PRV_IRQ, vgic_cpu_int->pending_shr); | |||||
} | |||||
static void | |||||
vgic_cpu_irq_clear(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_cpu_int *vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
if (irq < VGIC_NR_PRV_IRQ) | |||||
clear_bit(irq, vgic_cpu_int->pending_prv); | |||||
else | |||||
clear_bit(irq - VGIC_NR_PRV_IRQ, vgic_cpu_int->pending_shr); | |||||
} | |||||
static int | |||||
compute_pending_for_cpu(struct hyp *hyp, int vcpu) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hyp->vgic_distributor; | |||||
struct vgic_cpu_int *vgic_cpu_int = &hyp->ctx[vcpu].vgic_cpu_int; | |||||
uint32_t *pending, *enabled, *pend_percpu, *pend_shared, *target; | |||||
uint32_t pending_private, pending_shared; | |||||
pend_percpu = vgic_cpu_int->pending_prv; | |||||
pend_shared = vgic_cpu_int->pending_shr; | |||||
pending = vgic_distributor->irq_state_prv[vcpu]; | |||||
enabled = vgic_distributor->irq_enabled_prv[vcpu]; | |||||
bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRV_IRQ); | |||||
pending = vgic_distributor->irq_state_shr; | |||||
enabled = vgic_distributor->irq_enabled_shr; | |||||
target = vgic_distributor->irq_target_shr; | |||||
bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHR_IRQ); | |||||
bitmap_and(pend_shared, pend_shared, target, VGIC_NR_SHR_IRQ); | |||||
pending_private = find_first_bit(pend_percpu, VGIC_NR_PRV_IRQ); | |||||
pending_shared = find_first_bit(pend_shared, VGIC_NR_SHR_IRQ); | |||||
return (pending_private < VGIC_NR_PRV_IRQ || | |||||
pending_shared < VGIC_NR_SHR_IRQ); | |||||
} | |||||
static void | |||||
vgic_dispatch_sgi(struct hypctx *hypctx) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
// TODO Get actual number of cpus on current machine | |||||
int vcpu_num = VM_MAXCPU; | |||||
int sgi, mode, cpu; | |||||
uint8_t targets; | |||||
sgi = vgic_distributor->sgir & 0xf; | |||||
targets = (vgic_distributor->sgir >> 16) & 0xff; | |||||
mode = (vgic_distributor->sgir >> 24) & 3; | |||||
switch (mode) { | |||||
case 0: | |||||
if (!targets) | |||||
return; | |||||
case 1: | |||||
targets = ((1 << vcpu_num) - 1) & ~(1 << hypctx->vcpu) & 0xff; | |||||
break; | |||||
case 2: | |||||
targets = 1 << hypctx->vcpu; | |||||
break; | |||||
} | |||||
for (cpu = 0; cpu < vcpu_num; ++cpu) { | |||||
if ((targets >> cpu) & 1) { | |||||
vgic_dist_irq_set(hypctx, sgi); | |||||
vgic_distributor->irq_sgi_source[cpu][sgi] |= 1 << hypctx->vcpu; | |||||
//printf("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); | |||||
} | |||||
} | |||||
} | |||||
static void | |||||
vgic_update_state(struct hyp *hyp) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hyp->vgic_distributor; | |||||
int cpu; | |||||
//mtx_lock_spin(&vgic_distributor->distributor_lock); | |||||
if (!vgic_distributor->enabled) { | |||||
set_bit(0, &vgic_distributor->irq_pending_on_cpu); | |||||
goto end; | |||||
} | |||||
// TODO Get actual number of cpus on current machine | |||||
for (cpu = 0; cpu < VM_MAXCPU; ++cpu) { | |||||
if (compute_pending_for_cpu(hyp, cpu)) { | |||||
printf("CPU%d has pending interrupts\n", cpu); | |||||
set_bit(cpu, &vgic_distributor->irq_pending_on_cpu); | |||||
} | |||||
} | |||||
end: | |||||
;//mtx_unlock_spin(&vgic_distributor->distributor_lock); | |||||
} | |||||
#define LR_CPUID(lr) \ | |||||
(((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | |||||
#define MK_LR_PEND(src, irq) \ | |||||
(GICH_LR_PENDING | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) | |||||
static void | |||||
vgic_retire_disabled_irqs(struct hypctx *hypctx) | |||||
{ | |||||
struct vgic_cpu_int *vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
int lr_idx; | |||||
for_each_set_bit(lr_idx, (uint32_t *)vgic_cpu_int->lr_used, | |||||
vgic_cpu_int->lr_num) { | |||||
int irq = vgic_cpu_int->lr[lr_idx] & GICH_LR_VIRTID; | |||||
if (!vgic_irq_is_enabled(hypctx, irq)) { | |||||
vgic_cpu_int->irq_to_lr[irq] = LR_EMPTY; | |||||
clear_bit(lr_idx, (uint32_t *)vgic_cpu_int->lr_used); | |||||
vgic_cpu_int->lr[lr_idx] &= ~GICH_LR_STATE; | |||||
if (vgic_irq_is_active(hypctx, irq)) | |||||
vgic_irq_clear_active(hypctx, irq); | |||||
} | |||||
} | |||||
} | |||||
static bool | |||||
vgic_queue_irq(struct hypctx *hypctx, uint8_t sgi_source_cpu, int irq) | |||||
{ | |||||
struct vgic_cpu_int *vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
int lr_idx; | |||||
//printf("Queue IRQ%d\n", irq); | |||||
lr_idx = vgic_cpu_int->irq_to_lr[irq]; | |||||
if (lr_idx != LR_EMPTY && | |||||
(LR_CPUID(vgic_cpu_int->lr[lr_idx]) == sgi_source_cpu)) { | |||||
//printf("LR%d piggyback for IRQ%d %x\n", lr, irq, vgic_cpu->vgic_lr[lr]); | |||||
vgic_cpu_int->lr[lr_idx] |= GICH_LR_PENDING; | |||||
goto end; | |||||
} | |||||
lr_idx = find_first_zero_bit((uint32_t *)vgic_cpu_int->lr_used, | |||||
vgic_cpu_int->lr_num); | |||||
if (lr_idx >= vgic_cpu_int->lr_num) | |||||
return false; | |||||
//printf("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id); | |||||
vgic_cpu_int->lr[lr_idx] = MK_LR_PEND(sgi_source_cpu, irq); | |||||
vgic_cpu_int->irq_to_lr[irq] = lr_idx; | |||||
set_bit(lr_idx, (uint32_t *)vgic_cpu_int->lr_used); | |||||
end: | |||||
if (!vgic_irq_is_edge(hypctx, irq)) | |||||
vgic_cpu_int->lr[lr_idx] |= GICH_LR_EOI; | |||||
return true; | |||||
} | |||||
static bool | |||||
vgic_queue_sgi(struct hypctx *hypctx, int irq) | |||||
{ | |||||
struct vgic_distributor *vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
uint8_t source, cpu; | |||||
source = vgic_distributor->irq_sgi_source[hypctx->vcpu][irq]; | |||||
for_each_set_bit(cpu, (uint32_t *)&source, VGIC_MAXCPU) { | |||||
if (vgic_queue_irq(hypctx, cpu, irq)) | |||||
clear_bit(cpu, (uint32_t *)&source); | |||||
} | |||||
vgic_distributor->irq_sgi_source[hypctx->vcpu][irq] = source; | |||||
if (!source) { | |||||
vgic_dist_irq_clear(hypctx, irq); | |||||
vgic_cpu_irq_clear(hypctx, irq); | |||||
return true; | |||||
} | |||||
return false; | |||||
} | |||||
static bool | |||||
vgic_queue_hwirq(struct hypctx *hypctx, int irq) | |||||
{ | |||||
if (vgic_irq_is_active(hypctx, irq)) | |||||
return true; /* already queued */ | |||||
if (vgic_queue_irq(hypctx, 0, irq)) { | |||||
if (vgic_irq_is_edge(hypctx, irq)) { | |||||
vgic_dist_irq_clear(hypctx, irq); | |||||
vgic_cpu_irq_clear(hypctx, irq); | |||||
} else { | |||||
vgic_irq_set_active(hypctx, irq); | |||||
} | |||||
return true; | |||||
} | |||||
return false; | |||||
} | |||||
static bool | |||||
vgic_process_maintenance(struct hypctx *hypctx) | |||||
{ | |||||
struct vgic_cpu_int *vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
int lr_idx, irq; | |||||
bool level_pending = false; | |||||
//printf("MISR = %08x\n", vgic_cpu_int->misr); | |||||
if (vgic_cpu_int->misr & GICH_MISR_EOI) { | |||||
for_each_set_bit(lr_idx, (uint32_t *)&vgic_cpu_int->eisr, | |||||
vgic_cpu_int->lr_num) { | |||||
irq = vgic_cpu_int->lr[lr_idx] & GICH_LR_VIRTID; | |||||
vgic_irq_clear_active(hypctx, irq); | |||||
vgic_cpu_int->lr[lr_idx] &= ~GICH_LR_EOI; | |||||
if (vgic_dist_irq_is_pending(hypctx, irq)) { | |||||
vgic_cpu_irq_set(hypctx, irq); | |||||
level_pending = true; | |||||
} else { | |||||
vgic_cpu_irq_clear(hypctx, irq); | |||||
} | |||||
} | |||||
} | |||||
if (vgic_cpu_int->misr & GICH_MISR_U) | |||||
vgic_cpu_int->hcr &= ~GICH_HCR_UIE; | |||||
return level_pending; | |||||
} | |||||
void | |||||
vgic_flush_hwstate(void *arg) | |||||
{ | |||||
struct hypctx *hypctx; | |||||
struct vgic_cpu_int *vgic_cpu_int; | |||||
struct vgic_distributor *vgic_distributor; | |||||
int i, overflow = 0; | |||||
hypctx = arg; | |||||
vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
//printf("vgic_flush_hwstate\n"); | |||||
//mtx_lock_spin(&vgic_distributor->distributor_lock); | |||||
if (!vgic_vcpu_pending_irq(hypctx)) { | |||||
//printf("CPU%d has no pending interrupt\n", hypctx->vcpu); | |||||
goto end; | |||||
} | |||||
/* SGIs */ | |||||
for_each_set_bit(i, vgic_cpu_int->pending_prv, VGIC_NR_SGI) { | |||||
if (!vgic_queue_sgi(hypctx, i)) | |||||
overflow = 1; | |||||
} | |||||
/* PPIs */ | |||||
for_each_set_bit_from(i, vgic_cpu_int->pending_prv, VGIC_NR_PRV_IRQ) { | |||||
if (!vgic_queue_hwirq(hypctx, i)) | |||||
overflow = 1; | |||||
} | |||||
/* SPIs */ | |||||
for_each_set_bit(i, vgic_cpu_int->pending_shr, VGIC_NR_SHR_IRQ) { | |||||
if (!vgic_queue_hwirq(hypctx, i + VGIC_NR_PRV_IRQ)) | |||||
overflow = 1; | |||||
} | |||||
end: | |||||
if (overflow) { | |||||
vgic_cpu_int->hcr |= GICH_HCR_UIE; | |||||
} else { | |||||
vgic_cpu_int->hcr &= ~GICH_HCR_UIE; | |||||
clear_bit(hypctx->vcpu, &vgic_distributor->irq_pending_on_cpu); | |||||
} | |||||
//mtx_unlock_spin(&vgic_distributor->distributor_lock); | |||||
} | |||||
void | |||||
vgic_sync_hwstate(void *arg) | |||||
{ | |||||
struct hypctx *hypctx; | |||||
struct vgic_cpu_int *vgic_cpu_int; | |||||
struct vgic_distributor *vgic_distributor; | |||||
int lr_idx, pending, irq; | |||||
bool level_pending; | |||||
hypctx = arg; | |||||
vgic_cpu_int = &hypctx->vgic_cpu_int; | |||||
vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
//printf("vgic_sync_hwstate\n"); | |||||
level_pending = vgic_process_maintenance(hypctx); | |||||
for_each_set_bit(lr_idx, (uint32_t *)&vgic_cpu_int->elsr, | |||||
vgic_cpu_int->lr_num) { | |||||
if (!test_and_clear_bit(lr_idx, (uint32_t *)vgic_cpu_int->lr_used)) | |||||
continue; | |||||
irq = vgic_cpu_int->lr[lr_idx] & GICH_LR_VIRTID; | |||||
vgic_cpu_int->irq_to_lr[irq] = LR_EMPTY; | |||||
} | |||||
pending = find_first_zero_bit((uint32_t *)&vgic_cpu_int->elsr, | |||||
vgic_cpu_int->lr_num); | |||||
if (level_pending || pending < vgic_cpu_int->lr_num) | |||||
set_bit(hypctx->vcpu, &vgic_distributor->irq_pending_on_cpu); | |||||
} | |||||
int | |||||
vgic_vcpu_pending_irq(void *arg) | |||||
{ | |||||
struct hypctx *hypctx; | |||||
struct vgic_distributor *vgic_distributor; | |||||
hypctx = arg; | |||||
vgic_distributor = &hypctx->hyp->vgic_distributor; | |||||
return test_bit(hypctx->vcpu, &vgic_distributor->irq_pending_on_cpu); | |||||
} | |||||
static int | |||||
vgic_maintenance_intr(void *arg) | |||||
{ | |||||
static struct arm_gic_softc *sc; | |||||
int maintenance_intr; | |||||
sc = (struct arm_gic_softc *)arg; | |||||
maintenance_intr = gic_h_read_4(sc, GICH_MISR); | |||||
//printf("%s: %x\n",__func__, maintenance_intr); | |||||
return (FILTER_HANDLED); | |||||
} | |||||
int | |||||
vgic_hyp_init(void) | |||||
{ | |||||
int error; | |||||
if (!(gic_sc = get_arm_gic_sc())) { | |||||
//printf("vgic_hyp_init: GIC no present\n"); | |||||
return (ENXIO); | |||||
} | |||||
if (bus_alloc_resources(gic_sc->gic_dev, arm_vgic_spec, arm_vgic_res)) { | |||||
//printf("vgic_hyp_init: Could not allocate IRQ resource\n"); | |||||
return (ENXIO); | |||||
} | |||||
if (arm_vgic_res[VIRTUAL_INTERFACE_CONTROL] == NULL || | |||||
arm_vgic_res[VIRTUAL_CPU_INTERFACE] == NULL) { | |||||
printf("vgic_hyp_init: Virtual CPU interface control" | |||||
" and registers not present in DTS\n"); | |||||
return (ENXIO); | |||||
} | |||||
/* Virtual Interface Control */ | |||||
gic_sc->gic_h_bst = rman_get_bustag(arm_vgic_res[VIRTUAL_INTERFACE_CONTROL]); | |||||
gic_sc->gic_h_bsh = rman_get_bushandle(arm_vgic_res[VIRTUAL_INTERFACE_CONTROL]); | |||||
virtual_int_ctrl_vaddr = (uint64_t)rman_get_virtual(arm_vgic_res[VIRTUAL_INTERFACE_CONTROL]); | |||||
virtual_int_ctrl_paddr = (uint64_t)rman_get_start(arm_vgic_res[VIRTUAL_INTERFACE_CONTROL]); | |||||
virtual_int_ctrl_size = rman_get_size(arm_vgic_res[VIRTUAL_INTERFACE_CONTROL]); | |||||
andrew: These should be moved into the gic driver & `__BUS_ACCESSOR` functions should be added to read… | |||||
mihaiAuthorUnsubmitted Not Done Inline ActionsCan you give me an example of best practice or how should I do this? mihai: Can you give me an example of best practice or how should I do this? | |||||
andrewUnsubmitted Not Done Inline ActionsAdd new ivars to arm_gic_read_ivar in gic.c, then add a __BUS_ACCESSOR macro to gic_common.h. There are two examples there already. These will create static inline functions, so the hw_rev accessor creates gic_get_hw_rev and gic_set_hw_rev. The former returns a u_int, the latter takes one in, however as there is no ivar write function it doesn't write anything. andrew: Add new ivars to `arm_gic_read_ivar` in `gic.c`, then add a `__BUS_ACCESSOR` macro to… | |||||
/* Virtual CPU Interface */ | |||||
virtual_cpu_int_paddr = rman_get_start(arm_vgic_res[VIRTUAL_CPU_INTERFACE]); | |||||
virtual_cpu_int_size = rman_get_size(arm_vgic_res[VIRTUAL_CPU_INTERFACE]); | |||||
lr_num = (gic_h_read_4(gic_sc, GICH_VTR) & 0x3f) + 1; | |||||
/* Register the vGIC maintenance interrupt */ | |||||
error = bus_setup_intr(gic_sc->gic_dev, arm_vgic_res[MAINTENANCE_INTR], INTR_TYPE_CLK, | |||||
vgic_maintenance_intr, NULL, gic_sc, &arm_vgic_maintenance_intr_ihl[0]); | |||||
andrewUnsubmitted Not Done Inline ActionsIs this needed? The handler doesn't seem to do anything useful. andrew: Is this needed? The handler doesn't seem to do anything useful. | |||||
mihaiAuthorUnsubmitted Not Done Inline ActionsThe handler only takes the interrupt. In the VM enter/exit logic we verify the GICH_MISR to see if there was any maintenance interrupt and act accordingly. mihai: The handler only takes the interrupt. In the VM enter/exit logic we verify the GICH_MISR to see… | |||||
if (error) { | |||||
//printf("vgic_hyp_init: Unable to setup maintenance interrupt\n"); | |||||
return (ENXIO); | |||||
} | |||||
lpae_vmmmap_set(NULL, | |||||
(lpae_vm_vaddr_t)virtual_int_ctrl_vaddr, | |||||
(lpae_vm_paddr_t)virtual_int_ctrl_paddr, | |||||
virtual_int_ctrl_size, | |||||
VM_PROT_READ | VM_PROT_WRITE); | |||||
return (0); | |||||
} |
These should be moved into the gic driver & __BUS_ACCESSOR functions should be added to read them.