Index: sys/xen/interface/arch-arm.h =================================================================== --- sys/xen/interface/arch-arm.h +++ sys/xen/interface/arch-arm.h @@ -128,6 +128,8 @@ * * VCPUOP_register_vcpu_info * * VCPUOP_register_runstate_memory_area * + * HYPERVISOR_argo_op + * All generic sub-operations * * Other notes on the ARM ABI: * @@ -191,7 +193,7 @@ #define XEN_GUEST_HANDLE_PARAM(name) __guest_handle_ ## name #define set_xen_guest_handle_raw(hnd, val) \ do { \ - typeof(&(hnd)) _sxghr_tmp = &(hnd); \ + __typeof__(&(hnd)) _sxghr_tmp = &(hnd); \ _sxghr_tmp->q = 0; \ _sxghr_tmp->p = val; \ } while ( 0 ) @@ -267,10 +269,10 @@ /* Return address and mode */ __DECL_REG(pc64, pc32); /* ELR_EL2 */ - uint32_t cpsr; /* SPSR_EL2 */ + uint64_t cpsr; /* SPSR_EL2 */ union { - uint32_t spsr_el1; /* AArch64 */ + uint64_t spsr_el1; /* AArch64 */ uint32_t spsr_svc; /* AArch32 */ }; Index: sys/xen/interface/arch-x86/cpufeatureset.h =================================================================== --- sys/xen/interface/arch-x86/cpufeatureset.h +++ sys/xen/interface/arch-x86/cpufeatureset.h @@ -127,7 +127,7 @@ XEN_CPUFEATURE(DTES64, 1*32+ 2) /* 64-bit Debug Store */ XEN_CPUFEATURE(MONITOR, 1*32+ 3) /* Monitor/Mwait support */ XEN_CPUFEATURE(DSCPL, 1*32+ 4) /* CPL Qualified Debug Store */ -XEN_CPUFEATURE(VMX, 1*32+ 5) /*S Virtual Machine Extensions */ +XEN_CPUFEATURE(VMX, 1*32+ 5) /*h Virtual Machine Extensions */ XEN_CPUFEATURE(SMX, 1*32+ 6) /* Safer Mode Extensions */ XEN_CPUFEATURE(EIST, 1*32+ 7) /* Enhanced SpeedStep */ XEN_CPUFEATURE(TM2, 1*32+ 8) /* Thermal Monitor 2 */ @@ -166,7 +166,7 @@ /* AMD-defined CPU features, CPUID level 0x80000001.ecx, word 3 */ XEN_CPUFEATURE(LAHF_LM, 3*32+ 0) /*A LAHF/SAHF in long mode */ XEN_CPUFEATURE(CMP_LEGACY, 3*32+ 1) /*!A If yes HyperThreading not valid */ -XEN_CPUFEATURE(SVM, 3*32+ 2) /*S Secure virtual machine */ +XEN_CPUFEATURE(SVM, 3*32+ 2) /*h Secure virtual machine */ XEN_CPUFEATURE(EXTAPIC, 3*32+ 3) /* Extended APIC space */ XEN_CPUFEATURE(CR8_LEGACY, 3*32+ 4) /*S CR8 in 32-bit mode */ XEN_CPUFEATURE(ABM, 3*32+ 5) /*A Advanced bit manipulation */ @@ -197,14 +197,14 @@ XEN_CPUFEATURE(TSC_ADJUST, 5*32+ 1) /*S TSC_ADJUST MSR available */ XEN_CPUFEATURE(SGX, 5*32+ 2) /* Software Guard extensions */ XEN_CPUFEATURE(BMI1, 5*32+ 3) /*A 1st bit manipulation extensions */ -XEN_CPUFEATURE(HLE, 5*32+ 4) /*A Hardware Lock Elision */ +XEN_CPUFEATURE(HLE, 5*32+ 4) /*!a Hardware Lock Elision */ XEN_CPUFEATURE(AVX2, 5*32+ 5) /*A AVX2 instructions */ XEN_CPUFEATURE(FDP_EXCP_ONLY, 5*32+ 6) /*! x87 FDP only updated on exception. */ XEN_CPUFEATURE(SMEP, 5*32+ 7) /*S Supervisor Mode Execution Protection */ XEN_CPUFEATURE(BMI2, 5*32+ 8) /*A 2nd bit manipulation extensions */ XEN_CPUFEATURE(ERMS, 5*32+ 9) /*A Enhanced REP MOVSB/STOSB */ XEN_CPUFEATURE(INVPCID, 5*32+10) /*H Invalidate Process Context ID */ -XEN_CPUFEATURE(RTM, 5*32+11) /*A Restricted Transactional Memory */ +XEN_CPUFEATURE(RTM, 5*32+11) /*!A Restricted Transactional Memory */ XEN_CPUFEATURE(PQM, 5*32+12) /* Platform QoS Monitoring */ XEN_CPUFEATURE(NO_FPU_SEL, 5*32+13) /*! FPU CS/DS stored as zero */ XEN_CPUFEATURE(MPX, 5*32+14) /*s Memory Protection Extensions */ @@ -217,6 +217,7 @@ XEN_CPUFEATURE(AVX512_IFMA, 5*32+21) /*A AVX-512 Integer Fused Multiply Add */ XEN_CPUFEATURE(CLFLUSHOPT, 5*32+23) /*A CLFLUSHOPT instruction */ XEN_CPUFEATURE(CLWB, 5*32+24) /*A CLWB instruction */ +XEN_CPUFEATURE(PROC_TRACE, 5*32+25) /* Processor Trace */ XEN_CPUFEATURE(AVX512PF, 5*32+26) /*A AVX-512 Prefetch Instructions */ XEN_CPUFEATURE(AVX512ER, 5*32+27) /*A AVX-512 Exponent & Reciprocal Instrs */ XEN_CPUFEATURE(AVX512CD, 5*32+28) /*A AVX-512 Conflict Detection Instrs */ @@ -246,7 +247,7 @@ XEN_CPUFEATURE(ENQCMD, 6*32+29) /* ENQCMD{,S} instructions */ /* AMD-defined CPU features, CPUID level 0x80000007.edx, word 7 */ -XEN_CPUFEATURE(ITSC, 7*32+ 8) /* Invariant TSC */ +XEN_CPUFEATURE(ITSC, 7*32+ 8) /*a Invariant TSC */ XEN_CPUFEATURE(EFRO, 7*32+10) /* APERF/MPERF Read Only interface */ /* AMD-defined CPU features, CPUID level 0x80000008.ebx, word 8 */ @@ -254,13 +255,27 @@ XEN_CPUFEATURE(RSTR_FP_ERR_PTRS, 8*32+ 2) /*A (F)X{SAVE,RSTOR} always saves/restores FPU Error pointers */ XEN_CPUFEATURE(WBNOINVD, 8*32+ 9) /* WBNOINVD instruction */ XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by AMD) */ +XEN_CPUFEATURE(IBRS, 8*32+14) /* MSR_SPEC_CTRL.IBRS */ +XEN_CPUFEATURE(AMD_STIBP, 8*32+15) /* MSR_SPEC_CTRL.STIBP */ +XEN_CPUFEATURE(IBRS_ALWAYS, 8*32+16) /* IBRS preferred always on */ +XEN_CPUFEATURE(STIBP_ALWAYS, 8*32+17) /* STIBP preferred always on */ +XEN_CPUFEATURE(IBRS_FAST, 8*32+18) /* IBRS preferred over software options */ +XEN_CPUFEATURE(IBRS_SAME_MODE, 8*32+19) /* IBRS provides same-mode protection */ +XEN_CPUFEATURE(NO_LMSL, 8*32+20) /*S EFER.LMSLE no longer supported. */ XEN_CPUFEATURE(AMD_PPIN, 8*32+23) /* Protected Processor Inventory Number */ +XEN_CPUFEATURE(AMD_SSBD, 8*32+24) /* MSR_SPEC_CTRL.SSBD available */ +XEN_CPUFEATURE(VIRT_SSBD, 8*32+25) /* MSR_VIRT_SPEC_CTRL.SSBD */ +XEN_CPUFEATURE(SSB_NO, 8*32+26) /* Hardware not vulnerable to SSB */ +XEN_CPUFEATURE(PSFD, 8*32+28) /* MSR_SPEC_CTRL.PSFD */ /* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */ XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */ XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */ +XEN_CPUFEATURE(FSRM, 9*32+ 4) /*A Fast Short REP MOVS */ +XEN_CPUFEATURE(AVX512_VP2INTERSECT, 9*32+8) /*a VP2INTERSECT{D,Q} insns */ XEN_CPUFEATURE(SRBDS_CTRL, 9*32+ 9) /* MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. */ XEN_CPUFEATURE(MD_CLEAR, 9*32+10) /*A VERW clears microarchitectural buffers */ +XEN_CPUFEATURE(RTM_ALWAYS_ABORT, 9*32+11) /*! June 2021 TSX defeaturing in microcode. */ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */ XEN_CPUFEATURE(SERIALIZE, 9*32+14) /*a SERIALIZE insn */ XEN_CPUFEATURE(CET_IBT, 9*32+20) /* CET - Indirect Branch Tracking */ @@ -272,7 +287,15 @@ XEN_CPUFEATURE(SSBD, 9*32+31) /*A MSR_SPEC_CTRL.SSBD available */ /* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */ +XEN_CPUFEATURE(AVX_VNNI, 10*32+ 4) /*A AVX-VNNI Instructions */ XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /*A AVX512 BFloat16 Instructions */ +XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */ +XEN_CPUFEATURE(FSRS, 10*32+11) /*A Fast Short REP STOSB */ +XEN_CPUFEATURE(FSRCS, 10*32+12) /*A Fast Short REP CMPSB/SCASB */ + +/* AMD-defined CPU features, CPUID level 0x80000021.eax, word 11 */ +XEN_CPUFEATURE(LFENCE_DISPATCH, 11*32+ 2) /*A LFENCE always serializing */ +XEN_CPUFEATURE(NSCB, 11*32+ 6) /*A Null Selector Clears Base (and limit too) */ #endif /* XEN_CPUFEATURE */ Index: sys/xen/interface/arch-x86/cpuid.h =================================================================== --- sys/xen/interface/arch-x86/cpuid.h +++ sys/xen/interface/arch-x86/cpuid.h @@ -113,6 +113,10 @@ /* Max. address width in bits taking memory hotplug into account. */ #define XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK (0xffu << 0) -#define XEN_CPUID_MAX_NUM_LEAVES 5 +#define XEN_CPUID_MAX_PV_NUM_LEAVES 5 +#define XEN_CPUID_MAX_HVM_NUM_LEAVES 4 +#define XEN_CPUID_MAX_NUM_LEAVES \ + (XEN_CPUID_MAX_PV_NUM_LEAVES > XEN_CPUID_MAX_HVM_NUM_LEAVES ? \ + XEN_CPUID_MAX_PV_NUM_LEAVES : XEN_CPUID_MAX_HVM_NUM_LEAVES) #endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */ Index: sys/xen/interface/arch-x86/pmu.h =================================================================== --- sys/xen/interface/arch-x86/pmu.h +++ sys/xen/interface/arch-x86/pmu.h @@ -105,7 +105,7 @@ * Processor's registers at the time of interrupt. * WO for hypervisor, RO for guests. */ - struct xen_pmu_regs regs; + xen_pmu_regs_t regs; /* Padding for adding new registers to xen_pmu_regs in the future */ #define XENPMU_REGS_PAD_SZ 64 uint8_t pad[XENPMU_REGS_PAD_SZ]; @@ -132,8 +132,8 @@ * hypervisor into hardware during XENPMU_flush */ union { - struct xen_pmu_amd_ctxt amd; - struct xen_pmu_intel_ctxt intel; + xen_pmu_amd_ctxt_t amd; + xen_pmu_intel_ctxt_t intel; /* * Padding for contexts (fixed parts only, does not include MSR banks Index: sys/xen/interface/arch-x86/xen-mca.h =================================================================== --- sys/xen/interface/arch-x86/xen-mca.h +++ sys/xen/interface/arch-x86/xen-mca.h @@ -112,7 +112,7 @@ uint16_t type; /* structure type */ uint16_t size; /* size of this struct in bytes */ }; - +typedef struct mcinfo_common xen_mcinfo_common_t; #define MC_FLAG_CORRECTABLE (1 << 0) #define MC_FLAG_UNCORRECTABLE (1 << 1) @@ -123,7 +123,7 @@ #define MC_FLAG_MCE (1 << 6) /* contains global x86 mc information */ struct mcinfo_global { - struct mcinfo_common common; + xen_mcinfo_common_t common; /* running domain at the time in error (most likely the impacted one) */ uint16_t mc_domid; @@ -138,7 +138,7 @@ /* contains bank local x86 mc information */ struct mcinfo_bank { - struct mcinfo_common common; + xen_mcinfo_common_t common; uint16_t mc_bank; /* bank nr */ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0 @@ -156,11 +156,12 @@ uint64_t reg; /* MSR */ uint64_t value; /* MSR value */ }; +typedef struct mcinfo_msr xen_mcinfo_msr_t; /* contains mc information from other * or additional mc MSRs */ struct mcinfo_extended { - struct mcinfo_common common; + xen_mcinfo_common_t common; /* You can fill up to five registers. * If you need more, then use this structure @@ -172,7 +173,7 @@ * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be * useful at present. So expand this array to 32 to leave room. */ - struct mcinfo_msr mc_msr[32]; + xen_mcinfo_msr_t mc_msr[32]; }; /* Recovery Action flags. Giving recovery result information to DOM0 */ @@ -208,6 +209,7 @@ uint64_t mfn; uint64_t status; }; +typedef struct page_offline_action xen_page_offline_action_t; struct cpu_offline_action { @@ -216,17 +218,18 @@ uint16_t mc_coreid; uint16_t mc_core_threadid; }; +typedef struct cpu_offline_action xen_cpu_offline_action_t; #define MAX_UNION_SIZE 16 struct mcinfo_recovery { - struct mcinfo_common common; + xen_mcinfo_common_t common; uint16_t mc_bank; /* bank nr */ uint8_t action_flags; uint8_t action_types; union { - struct page_offline_action page_retire; - struct cpu_offline_action cpu_offline; + xen_page_offline_action_t page_retire; + xen_cpu_offline_action_t cpu_offline; uint8_t pad[MAX_UNION_SIZE]; } action_info; }; @@ -279,7 +282,7 @@ uint32_t mc_cache_size; uint32_t mc_cache_alignment; int32_t mc_nmsrvals; - struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE]; + xen_mcinfo_msr_t mc_msrvalues[__MC_MSR_ARRAYSIZE]; }; typedef struct mcinfo_logical_cpu xen_mc_logical_cpu_t; DEFINE_XEN_GUEST_HANDLE(xen_mc_logical_cpu_t); @@ -388,6 +391,7 @@ /* OUT */ XEN_GUEST_HANDLE(xen_mc_logical_cpu_t) info; }; +typedef struct xen_mc_physcpuinfo xen_mc_physcpuinfo_t; #define XEN_MC_msrinject 4 #define MC_MSRINJ_MAXMSRS 8 @@ -399,8 +403,9 @@ domid_t mcinj_domid; /* valid only if MC_MSRINJ_F_GPADDR is present in mcinj_flags */ uint16_t _pad0; - struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS]; + xen_mcinfo_msr_t mcinj_msr[MC_MSRINJ_MAXMSRS]; }; +typedef struct xen_mc_msrinject xen_mc_msrinject_t; /* Flags for mcinj_flags above; bits 16-31 are reserved */ #define MC_MSRINJ_F_INTERPOSE 0x1 @@ -410,6 +415,7 @@ struct xen_mc_mceinject { unsigned int mceinj_cpunr; /* target processor id */ }; +typedef struct xen_mc_mceinject xen_mc_mceinject_t; #if defined(__XEN__) || defined(__XEN_TOOLS__) #define XEN_MC_inject_v2 6 @@ -422,21 +428,22 @@ struct xen_mc_inject_v2 { uint32_t flags; - struct xenctl_bitmap cpumap; + xenctl_bitmap_t cpumap; }; +typedef struct xen_mc_inject_v2 xen_mc_inject_v2_t; #endif struct xen_mc { uint32_t cmd; uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */ union { - struct xen_mc_fetch mc_fetch; - struct xen_mc_notifydomain mc_notifydomain; - struct xen_mc_physcpuinfo mc_physcpuinfo; - struct xen_mc_msrinject mc_msrinject; - struct xen_mc_mceinject mc_mceinject; + xen_mc_fetch_t mc_fetch; + xen_mc_notifydomain_t mc_notifydomain; + xen_mc_physcpuinfo_t mc_physcpuinfo; + xen_mc_msrinject_t mc_msrinject; + xen_mc_mceinject_t mc_mceinject; #if defined(__XEN__) || defined(__XEN_TOOLS__) - struct xen_mc_inject_v2 mc_inject_v2; + xen_mc_inject_v2_t mc_inject_v2; #endif } u; }; Index: sys/xen/interface/arch-x86/xen.h =================================================================== --- sys/xen/interface/arch-x86/xen.h +++ sys/xen/interface/arch-x86/xen.h @@ -143,6 +143,12 @@ * Level == 1: Kernel may enter * Level == 2: Kernel may enter * Level == 3: Everyone may enter + * + * Note: For compatibility with kernels not setting up exception handlers + * early enough, Xen will avoid trying to inject #GP (and hence crash + * the domain) when an RDMSR would require this, but no handler was + * set yet. The precise conditions are implementation specific, and + * new code may not rely on such behavior anyway. */ #define TI_GET_DPL(_ti) ((_ti)->flags & 3) #define TI_GET_IF(_ti) ((_ti)->flags & 4) @@ -304,6 +310,14 @@ XEN_X86_EMU_PIT | XEN_X86_EMU_USE_PIRQ |\ XEN_X86_EMU_VPCI) uint32_t emulation_flags; + +/* + * Select whether to use a relaxed behavior for accesses to MSRs not explicitly + * handled by Xen instead of injecting a #GP to the guest. Note this option + * doesn't allow the guest to read or write to the underlying MSR. + */ +#define XEN_X86_MSR_RELAXED (1u << 0) + uint32_t misc_flags; }; /* Location of online VCPU bitmap. */ Index: sys/xen/interface/argo.h =================================================================== --- sys/xen/interface/argo.h +++ sys/xen/interface/argo.h @@ -67,8 +67,8 @@ typedef struct xen_argo_send_addr { - struct xen_argo_addr src; - struct xen_argo_addr dst; + xen_argo_addr_t src; + xen_argo_addr_t dst; } xen_argo_send_addr_t; typedef struct xen_argo_ring @@ -121,7 +121,7 @@ typedef struct xen_argo_ring_data_ent { - struct xen_argo_addr ring; + xen_argo_addr_t ring; uint16_t flags; uint16_t pad; uint32_t space_required; @@ -132,13 +132,13 @@ { uint32_t nent; uint32_t pad; - struct xen_argo_ring_data_ent data[XEN_FLEX_ARRAY_DIM]; + xen_argo_ring_data_ent_t data[XEN_FLEX_ARRAY_DIM]; } xen_argo_ring_data_t; struct xen_argo_ring_message_header { uint32_t len; - struct xen_argo_addr source; + xen_argo_addr_t source; uint32_t message_type; uint8_t data[XEN_FLEX_ARRAY_DIM]; }; Index: sys/xen/interface/domctl.h =================================================================== --- sys/xen/interface/domctl.h +++ sys/xen/interface/domctl.h @@ -38,7 +38,7 @@ #include "hvm/save.h" #include "memory.h" -#define XEN_DOMCTL_INTERFACE_VERSION 0x00000012 +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000014 /* * NB. xen_domctl.domain is an IN/OUT parameter for this operation. @@ -68,9 +68,11 @@ /* Should this domain be permitted to use the IOMMU? */ #define _XEN_DOMCTL_CDF_iommu 5 #define XEN_DOMCTL_CDF_iommu (1U<<_XEN_DOMCTL_CDF_iommu) +#define _XEN_DOMCTL_CDF_nested_virt 6 +#define XEN_DOMCTL_CDF_nested_virt (1U << _XEN_DOMCTL_CDF_nested_virt) /* Max XEN_DOMCTL_CDF_* constant. Used for ABI checking. */ -#define XEN_DOMCTL_CDF_MAX XEN_DOMCTL_CDF_iommu +#define XEN_DOMCTL_CDF_MAX XEN_DOMCTL_CDF_nested_virt uint32_t flags; @@ -93,6 +95,9 @@ int32_t max_grant_frames; int32_t max_maptrack_frames; + /* Per-vCPU buffer size in bytes. 0 to disable. */ + uint32_t vmtrace_size; + struct xen_arch_domainconfig arch; }; @@ -436,8 +441,9 @@ /* XEN_DOMCTL_irq_permission */ struct xen_domctl_irq_permission { - uint8_t pirq; + uint32_t pirq; uint8_t allow_access; /* flag to specify enable/disable of IRQ access */ + uint8_t pad[3]; }; @@ -730,11 +736,6 @@ XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */ }; -/* XEN_DOMCTL_disable_migrate */ -struct xen_domctl_disable_migrate { - uint32_t disable; /* IN: 1: disable migration and restore */ -}; - /* XEN_DOMCTL_gettscinfo */ /* XEN_DOMCTL_settscinfo */ @@ -1135,6 +1136,39 @@ */ }; +/* XEN_DOMCTL_vmtrace_op: Perform VM tracing operations. */ +struct xen_domctl_vmtrace_op { + uint32_t cmd; /* IN */ + uint32_t vcpu; /* IN */ + uint64_aligned_t key; /* IN - @cmd specific data. */ + uint64_aligned_t value; /* IN/OUT - @cmd specific data. */ + + /* + * General enable/disable of tracing. + * + * XEN_DOMCTL_vmtrace_reset_and_enable is provided as optimisation for + * common usecases, which want to reset status and position information + * when turning tracing back on. + */ +#define XEN_DOMCTL_vmtrace_enable 1 +#define XEN_DOMCTL_vmtrace_disable 2 +#define XEN_DOMCTL_vmtrace_reset_and_enable 3 + + /* Obtain the current output position within the buffer. Fills @value. */ +#define XEN_DOMCTL_vmtrace_output_position 4 + + /* + * Get/Set platform specific configuration. + * + * For Intel Processor Trace, @key/@value are interpreted as MSR + * reads/writes to MSR_RTIT_*, filtered to a safe subset. + */ +#define XEN_DOMCTL_vmtrace_get_option 5 +#define XEN_DOMCTL_vmtrace_set_option 6 +}; +typedef struct xen_domctl_vmtrace_op xen_domctl_vmtrace_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_vmtrace_op_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -1194,7 +1228,7 @@ #define XEN_DOMCTL_gethvmcontext_partial 55 #define XEN_DOMCTL_vm_event_op 56 #define XEN_DOMCTL_mem_sharing_op 57 -#define XEN_DOMCTL_disable_migrate 58 +/* #define XEN_DOMCTL_disable_migrate 58 - Obsolete */ #define XEN_DOMCTL_gettscinfo 59 #define XEN_DOMCTL_settscinfo 60 #define XEN_DOMCTL_getpageframeinfo3 61 @@ -1219,12 +1253,14 @@ #define XEN_DOMCTL_vuart_op 81 #define XEN_DOMCTL_get_cpu_policy 82 #define XEN_DOMCTL_set_cpu_policy 83 +#define XEN_DOMCTL_vmtrace_op 84 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 #define XEN_DOMCTL_gdbsx_domstatus 1003 uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */ domid_t domain; + uint16_t _pad[3]; union { struct xen_domctl_createdomain createdomain; struct xen_domctl_getdomaininfo getdomaininfo; @@ -1244,7 +1280,6 @@ struct xen_domctl_ioport_permission ioport_permission; struct xen_domctl_hypercall_init hypercall_init; struct xen_domctl_settimeoffset settimeoffset; - struct xen_domctl_disable_migrate disable_migrate; struct xen_domctl_tsc_info tsc_info; struct xen_domctl_hvmcontext hvmcontext; struct xen_domctl_hvmcontext_partial hvmcontext_partial; @@ -1279,6 +1314,7 @@ struct xen_domctl_monitor_op monitor_op; struct xen_domctl_psr_alloc psr_alloc; struct xen_domctl_vuart_op vuart_op; + struct xen_domctl_vmtrace_op vmtrace_op; uint8_t pad[128]; } u; }; Index: sys/xen/interface/errno.h =================================================================== --- sys/xen/interface/errno.h +++ sys/xen/interface/errno.h @@ -78,6 +78,7 @@ XEN_ERRNO(EEXIST, 17) /* File exists */ XEN_ERRNO(EXDEV, 18) /* Cross-device link */ XEN_ERRNO(ENODEV, 19) /* No such device */ +XEN_ERRNO(ENOTDIR, 20) /* Not a directory */ XEN_ERRNO(EISDIR, 21) /* Is a directory */ XEN_ERRNO(EINVAL, 22) /* Invalid argument */ XEN_ERRNO(ENFILE, 23) /* File table overflow */ Index: sys/xen/interface/event_channel.h =================================================================== --- sys/xen/interface/event_channel.h +++ sys/xen/interface/event_channel.h @@ -324,16 +324,16 @@ struct evtchn_op { uint32_t cmd; /* enum event_channel_op */ union { - struct evtchn_alloc_unbound alloc_unbound; - struct evtchn_bind_interdomain bind_interdomain; - struct evtchn_bind_virq bind_virq; - struct evtchn_bind_pirq bind_pirq; - struct evtchn_bind_ipi bind_ipi; - struct evtchn_close close; - struct evtchn_send send; - struct evtchn_status status; - struct evtchn_bind_vcpu bind_vcpu; - struct evtchn_unmask unmask; + evtchn_alloc_unbound_t alloc_unbound; + evtchn_bind_interdomain_t bind_interdomain; + evtchn_bind_virq_t bind_virq; + evtchn_bind_pirq_t bind_pirq; + evtchn_bind_ipi_t bind_ipi; + evtchn_close_t close; + evtchn_send_t send; + evtchn_status_t status; + evtchn_bind_vcpu_t bind_vcpu; + evtchn_unmask_t unmask; } u; }; typedef struct evtchn_op evtchn_op_t; Index: sys/xen/interface/features.h =================================================================== --- sys/xen/interface/features.h +++ sys/xen/interface/features.h @@ -114,6 +114,20 @@ */ #define XENFEAT_linux_rsdp_unrestricted 15 +/* + * A direct-mapped (or 1:1 mapped) domain is a domain for which its + * local pages have gfn == mfn. If a domain is direct-mapped, + * XENFEAT_direct_mapped is set; otherwise XENFEAT_not_direct_mapped + * is set. + * + * If neither flag is set (e.g. older Xen releases) the assumptions are: + * - not auto_translated domains (x86 only) are always direct-mapped + * - on x86, auto_translated domains are not direct-mapped + * - on ARM, Dom0 is direct-mapped, DomUs are not + */ +#define XENFEAT_not_direct_mapped 16 +#define XENFEAT_direct_mapped 17 + #define XENFEAT_NR_SUBMAPS 1 #endif /* __XEN_PUBLIC_FEATURES_H__ */ Index: sys/xen/interface/gcov.h =================================================================== --- sys/xen/interface/gcov.h +++ /dev/null @@ -1,113 +0,0 @@ -/****************************************************************************** - * gcov.h - * - * Coverage structures exported by Xen. - * Structure is different from Gcc one. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Copyright (c) 2013, Citrix Systems R&D Ltd. - */ - -#ifndef __XEN_PUBLIC_GCOV_H__ -#define __XEN_PUBLIC_GCOV_H__ __XEN_PUBLIC_GCOV_H__ - -#define XENCOV_COUNTERS 5 -#define XENCOV_TAG_BASE 0x58544300u -#define XENCOV_TAG_FILE (XENCOV_TAG_BASE+0x46u) -#define XENCOV_TAG_FUNC (XENCOV_TAG_BASE+0x66u) -#define XENCOV_TAG_COUNTER(n) (XENCOV_TAG_BASE+0x30u+((n)&0xfu)) -#define XENCOV_TAG_END (XENCOV_TAG_BASE+0x2eu) -#define XENCOV_IS_TAG_COUNTER(n) \ - ((n) >= XENCOV_TAG_COUNTER(0) && (n) < XENCOV_TAG_COUNTER(XENCOV_COUNTERS)) -#define XENCOV_COUNTER_NUM(n) ((n)-XENCOV_TAG_COUNTER(0)) - -/* - * The main structure for the blob is - * BLOB := FILE.. END - * FILE := TAG_FILE VERSION STAMP FILENAME COUNTERS FUNCTIONS - * FILENAME := LEN characters - * characters are padded to 32 bit - * LEN := 32 bit value - * COUNTERS := TAG_COUNTER(n) NUM COUNTER.. - * NUM := 32 bit valie - * COUNTER := 64 bit value - * FUNCTIONS := TAG_FUNC NUM FUNCTION.. - * FUNCTION := IDENT CHECKSUM NUM_COUNTERS - * - * All tagged structures are aligned to 8 bytes - */ - -/** - * File information - * Prefixed with XENCOV_TAG_FILE and a string with filename - * Aligned to 8 bytes - */ -struct xencov_file -{ - uint32_t tag; /* XENCOV_TAG_FILE */ - uint32_t version; - uint32_t stamp; - uint32_t fn_len; - char filename[1]; -}; - -/** - * Counters information - * Prefixed with XENCOV_TAG_COUNTER(n) where n is 0..(XENCOV_COUNTERS-1) - * Aligned to 8 bytes - */ -struct xencov_counter -{ - uint32_t tag; /* XENCOV_TAG_COUNTER(n) */ - uint32_t num; - uint64_t values[1]; -}; - -/** - * Information for each function - * Number of counter is equal to the number of counter structures got before - */ -struct xencov_function -{ - uint32_t ident; - uint32_t checksum; - uint32_t num_counters[1]; -}; - -/** - * Information for all functions - * Aligned to 8 bytes - */ -struct xencov_functions -{ - uint32_t tag; /* XENCOV_TAG_FUNC */ - uint32_t num; - struct xencov_function xencov_function[1]; -}; - -/** - * Terminator - */ -struct xencov_end -{ - uint32_t tag; /* XENCOV_TAG_END */ -}; - -#endif /* __XEN_PUBLIC_GCOV_H__ */ Index: sys/xen/interface/grant_table.h =================================================================== --- sys/xen/interface/grant_table.h +++ sys/xen/interface/grant_table.h @@ -166,11 +166,13 @@ #define GTF_type_mask (3U<<0) /* - * Subflags for GTF_permit_access. + * Subflags for GTF_permit_access and GTF_transitive. * GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST] * GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN] * GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN] - * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST] + * Further subflags for GTF_permit_access only. + * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags to be used for + * mappings of the grant [GST] * GTF_sub_page: Grant access to only a subrange of the page. @domid * will only be allowed to copy from the grant, and not * map it. [GST] @@ -626,9 +628,6 @@ #define _GNTMAP_contains_pte (4) #define GNTMAP_contains_pte (1<<_GNTMAP_contains_pte) -#define _GNTMAP_can_fail (5) -#define GNTMAP_can_fail (1<<_GNTMAP_can_fail) - /* * Bits to be placed in guest kernel available PTE bits (architecture * dependent; only supported when XENFEAT_gnttab_map_avail_bits is set). @@ -653,6 +652,7 @@ #define GNTST_bad_copy_arg (-10) /* copy arguments cross page boundary. */ #define GNTST_address_too_big (-11) /* transfer page address too large. */ #define GNTST_eagain (-12) /* Operation not done; try again. */ +#define GNTST_no_space (-13) /* Out of space (handles etc). */ /* ` } */ #define GNTTABOP_error_msgs { \ @@ -668,7 +668,8 @@ "bad page", \ "copy arguments cross page boundary", \ "page address size too large", \ - "operation not done; try again" \ + "operation not done; try again", \ + "out of space", \ } #endif /* __XEN_PUBLIC_GRANT_TABLE_H__ */ Index: sys/xen/interface/hvm/dm_op.h =================================================================== --- sys/xen/interface/hvm/dm_op.h +++ sys/xen/interface/hvm/dm_op.h @@ -25,9 +25,6 @@ #define __XEN_PUBLIC_HVM_DM_OP_H__ #include "../xen.h" - -#if defined(__XEN__) || defined(__XEN_TOOLS__) - #include "../event_channel.h" #ifndef uint64_aligned_t @@ -74,6 +71,7 @@ /* OUT - server id */ ioservid_t id; }; +typedef struct xen_dm_op_create_ioreq_server xen_dm_op_create_ioreq_server_t; /* * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to @@ -113,6 +111,7 @@ /* OUT - buffered ioreq gfn (see block comment above)*/ uint64_aligned_t bufioreq_gfn; }; +typedef struct xen_dm_op_get_ioreq_server_info xen_dm_op_get_ioreq_server_info_t; /* * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for @@ -148,6 +147,7 @@ /* IN - inclusive start and end of range */ uint64_aligned_t start, end; }; +typedef struct xen_dm_op_ioreq_server_range xen_dm_op_ioreq_server_range_t; #define XEN_DMOP_PCI_SBDF(s,b,d,f) \ ((((s) & 0xffff) << 16) | \ @@ -173,6 +173,7 @@ uint8_t enabled; uint8_t pad; }; +typedef struct xen_dm_op_set_ioreq_server_state xen_dm_op_set_ioreq_server_state_t; /* * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server . @@ -186,6 +187,7 @@ ioservid_t id; uint16_t pad; }; +typedef struct xen_dm_op_destroy_ioreq_server xen_dm_op_destroy_ioreq_server_t; /* * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn @@ -203,6 +205,7 @@ /* IN - first pfn to track */ uint64_aligned_t first_pfn; }; +typedef struct xen_dm_op_track_dirty_vram xen_dm_op_track_dirty_vram_t; /* * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's @@ -217,6 +220,7 @@ /* IN - Level: 0 -> deasserted, 1 -> asserted */ uint8_t level; }; +typedef struct xen_dm_op_set_pci_intx_level xen_dm_op_set_pci_intx_level_t; /* * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's @@ -230,6 +234,7 @@ /* IN - Level: 0 -> deasserted, 1 -> asserted */ uint8_t level; }; +typedef struct xen_dm_op_set_isa_irq_level xen_dm_op_set_isa_irq_level_t; /* * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line. @@ -242,6 +247,7 @@ /* ISA IRQ (1-15) or 0 -> disable link */ uint8_t isa_irq; }; +typedef struct xen_dm_op_set_pci_link_route xen_dm_op_set_pci_link_route_t; /* * XEN_DMOP_modified_memory: Notify that a set of pages were modified by @@ -265,6 +271,7 @@ /* IN/OUT - Must be set to 0 */ uint32_t opaque; }; +typedef struct xen_dm_op_modified_memory xen_dm_op_modified_memory_t; struct xen_dm_op_modified_memory_extent { /* IN - number of contiguous pages modified */ @@ -294,6 +301,7 @@ /* IN - first pfn in region */ uint64_aligned_t first_pfn; }; +typedef struct xen_dm_op_set_mem_type xen_dm_op_set_mem_type_t; /* * XEN_DMOP_inject_event: Inject an event into a VCPU, which will @@ -327,6 +335,7 @@ /* IN - type-specific extra data (%cr2 for #PF, pending_dbg for #DB) */ uint64_aligned_t cr2; }; +typedef struct xen_dm_op_inject_event xen_dm_op_inject_event_t; /* * XEN_DMOP_inject_msi: Inject an MSI for an emulated device. @@ -340,6 +349,7 @@ /* IN - MSI address (0xfeexxxxx) */ uint64_aligned_t addr; }; +typedef struct xen_dm_op_inject_msi xen_dm_op_inject_msi_t; /* * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server @@ -366,6 +376,7 @@ uint64_t opaque; /* IN/OUT - only used for hypercall continuation, has to be set to zero by the caller */ }; +typedef struct xen_dm_op_map_mem_type_to_ioreq_server xen_dm_op_map_mem_type_to_ioreq_server_t; /* * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain @@ -377,6 +388,7 @@ uint32_t reason; /* SHUTDOWN_* => enum sched_shutdown_reason */ /* (Other reason values are not blocked) */ }; +typedef struct xen_dm_op_remote_shutdown xen_dm_op_remote_shutdown_t; /* * XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest. @@ -395,6 +407,7 @@ /* Starting GFN where GFNs should be relocated. */ uint64_aligned_t dst_gfn; }; +typedef struct xen_dm_op_relocate_memory xen_dm_op_relocate_memory_t; /* * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space. @@ -416,35 +429,65 @@ uint32_t type; /* XEN_DMOP_MEM_CACHEATTR_* */ uint32_t pad; }; +typedef struct xen_dm_op_pin_memory_cacheattr xen_dm_op_pin_memory_cacheattr_t; + +/* + * XEN_DMOP_set_irq_level: Set the logical level of a one of a domain's + * IRQ lines (currently Arm only). + * Only SPIs are supported. + */ +#define XEN_DMOP_set_irq_level 19 + +struct xen_dm_op_set_irq_level { + uint32_t irq; + /* IN - Level: 0 -> deasserted, 1 -> asserted */ + uint8_t level; + uint8_t pad[3]; +}; +typedef struct xen_dm_op_set_irq_level xen_dm_op_set_irq_level_t; + +/* + * XEN_DMOP_nr_vcpus: Query the number of vCPUs a domain has. + * + * This is the number of vcpu objects allocated in Xen for the domain, and is + * fixed from creation time. This bound is applicable to e.g. the vcpuid + * parameter of XEN_DMOP_inject_event, or number of struct ioreq objects + * mapped via XENMEM_acquire_resource. + */ +#define XEN_DMOP_nr_vcpus 20 + +struct xen_dm_op_nr_vcpus { + uint32_t vcpus; /* OUT */ +}; +typedef struct xen_dm_op_nr_vcpus xen_dm_op_nr_vcpus_t; struct xen_dm_op { uint32_t op; uint32_t pad; union { - struct xen_dm_op_create_ioreq_server create_ioreq_server; - struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info; - struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server; - struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server; - struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state; - struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server; - struct xen_dm_op_track_dirty_vram track_dirty_vram; - struct xen_dm_op_set_pci_intx_level set_pci_intx_level; - struct xen_dm_op_set_isa_irq_level set_isa_irq_level; - struct xen_dm_op_set_pci_link_route set_pci_link_route; - struct xen_dm_op_modified_memory modified_memory; - struct xen_dm_op_set_mem_type set_mem_type; - struct xen_dm_op_inject_event inject_event; - struct xen_dm_op_inject_msi inject_msi; - struct xen_dm_op_map_mem_type_to_ioreq_server - map_mem_type_to_ioreq_server; - struct xen_dm_op_remote_shutdown remote_shutdown; - struct xen_dm_op_relocate_memory relocate_memory; - struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr; + xen_dm_op_create_ioreq_server_t create_ioreq_server; + xen_dm_op_get_ioreq_server_info_t get_ioreq_server_info; + xen_dm_op_ioreq_server_range_t map_io_range_to_ioreq_server; + xen_dm_op_ioreq_server_range_t unmap_io_range_from_ioreq_server; + xen_dm_op_set_ioreq_server_state_t set_ioreq_server_state; + xen_dm_op_destroy_ioreq_server_t destroy_ioreq_server; + xen_dm_op_track_dirty_vram_t track_dirty_vram; + xen_dm_op_set_pci_intx_level_t set_pci_intx_level; + xen_dm_op_set_isa_irq_level_t set_isa_irq_level; + xen_dm_op_set_irq_level_t set_irq_level; + xen_dm_op_set_pci_link_route_t set_pci_link_route; + xen_dm_op_modified_memory_t modified_memory; + xen_dm_op_set_mem_type_t set_mem_type; + xen_dm_op_inject_event_t inject_event; + xen_dm_op_inject_msi_t inject_msi; + xen_dm_op_map_mem_type_to_ioreq_server_t map_mem_type_to_ioreq_server; + xen_dm_op_remote_shutdown_t remote_shutdown; + xen_dm_op_relocate_memory_t relocate_memory; + xen_dm_op_pin_memory_cacheattr_t pin_memory_cacheattr; + xen_dm_op_nr_vcpus_t nr_vcpus; } u; }; -#endif /* __XEN__ || __XEN_TOOLS__ */ - struct xen_dm_op_buf { XEN_GUEST_HANDLE(void) h; xen_ulong_t size; Index: sys/xen/interface/hvm/hvm_vcpu.h =================================================================== --- sys/xen/interface/hvm/hvm_vcpu.h +++ sys/xen/interface/hvm/hvm_vcpu.h @@ -69,6 +69,7 @@ uint16_t pad2[3]; }; +typedef struct vcpu_hvm_x86_32 xen_vcpu_hvm_x86_32_t; /* * The layout of the _ar fields of the segment registers is the @@ -114,6 +115,7 @@ * the 32-bit structure should be used instead. */ }; +typedef struct vcpu_hvm_x86_64 xen_vcpu_hvm_x86_64_t; struct vcpu_hvm_context { #define VCPU_HVM_MODE_32B 0 /* 32bit fields of the structure will be used. */ @@ -124,8 +126,8 @@ /* CPU registers. */ union { - struct vcpu_hvm_x86_32 x86_32; - struct vcpu_hvm_x86_64 x86_64; + xen_vcpu_hvm_x86_32_t x86_32; + xen_vcpu_hvm_x86_64_t x86_64; } cpu_regs; }; typedef struct vcpu_hvm_context vcpu_hvm_context_t; Index: sys/xen/interface/hvm/params.h =================================================================== --- sys/xen/interface/hvm/params.h +++ sys/xen/interface/hvm/params.h @@ -34,6 +34,7 @@ #define HVM_PARAM_MEMORY_EVENT_CR3 21 #define HVM_PARAM_MEMORY_EVENT_CR4 22 #define HVM_PARAM_MEMORY_EVENT_INT3 23 +#define HVM_PARAM_NESTEDHVM 24 #define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP 25 #define HVM_PARAM_BUFIOREQ_EVTCHN 26 #define HVM_PARAM_MEMORY_EVENT_MSR 30 @@ -163,6 +164,18 @@ #define _HVMPV_hcall_ipi 9 #define HVMPV_hcall_ipi (1 << _HVMPV_hcall_ipi) +/* Enable ExProcessorMasks */ +#define _HVMPV_ex_processor_masks 10 +#define HVMPV_ex_processor_masks (1 << _HVMPV_ex_processor_masks) + +/* Allow more than 64 VPs */ +#define _HVMPV_no_vp_limit 11 +#define HVMPV_no_vp_limit (1 << _HVMPV_no_vp_limit) + +/* Enable vCPU hotplug */ +#define _HVMPV_cpu_hotplug 12 +#define HVMPV_cpu_hotplug (1 << _HVMPV_cpu_hotplug) + #define HVMPV_feature_mask \ (HVMPV_base_freq | \ HVMPV_no_freq | \ @@ -173,7 +186,10 @@ HVMPV_crash_ctl | \ HVMPV_synic | \ HVMPV_stimer | \ - HVMPV_hcall_ipi) + HVMPV_hcall_ipi | \ + HVMPV_ex_processor_masks | \ + HVMPV_no_vp_limit | \ + HVMPV_cpu_hotplug) #endif @@ -232,9 +248,6 @@ */ #define HVM_PARAM_ACPI_IOPORTS_LOCATION 19 -/* Boolean: Enable nestedhvm (hvm only) */ -#define HVM_PARAM_NESTEDHVM 24 - /* Params for the mem event rings */ #define HVM_PARAM_PAGING_RING_PFN 27 #define HVM_PARAM_MONITOR_RING_PFN 28 Index: sys/xen/interface/hvm/save.h =================================================================== --- sys/xen/interface/hvm/save.h +++ sys/xen/interface/hvm/save.h @@ -82,12 +82,12 @@ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; char cpt[1];} #endif -#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) +#define HVM_SAVE_TYPE(_x) __typeof__ (((struct __HVM_SAVE_TYPE_##_x *)(0))->t) #define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x))) #define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c)) #ifdef __XEN__ -# define HVM_SAVE_TYPE_COMPAT(_x) typeof (((struct __HVM_SAVE_TYPE_COMPAT_##_x *)(0))->t) +# define HVM_SAVE_TYPE_COMPAT(_x) __typeof__ (((struct __HVM_SAVE_TYPE_COMPAT_##_x *)(0))->t) # define HVM_SAVE_LENGTH_COMPAT(_x) (sizeof (HVM_SAVE_TYPE_COMPAT(_x))) # define HVM_SAVE_HAS_COMPAT(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->cpt)-1) Index: sys/xen/interface/hypfs.h =================================================================== --- sys/xen/interface/hypfs.h +++ sys/xen/interface/hypfs.h @@ -53,9 +53,10 @@ uint32_t content_len; /* Current length of data. */ uint32_t max_write_len; /* Max. length for writes (0 if read-only). */ }; +typedef struct xen_hypfs_direntry xen_hypfs_direntry_t; struct xen_hypfs_dirlistentry { - struct xen_hypfs_direntry e; + xen_hypfs_direntry_t e; /* Offset in bytes to next entry (0 == this is the last entry). */ uint16_t off_next; /* Zero terminated entry name, possibly with some padding for alignment. */ Index: sys/xen/interface/io/displif.h =================================================================== --- sys/xen/interface/io/displif.h +++ sys/xen/interface/io/displif.h @@ -38,7 +38,8 @@ * Protocol version ****************************************************************************** */ -#define XENDISPL_PROTOCOL_VERSION "1" +#define XENDISPL_PROTOCOL_VERSION "2" +#define XENDISPL_PROTOCOL_VERSION_INT 2 /* ****************************************************************************** @@ -202,6 +203,9 @@ * Width and height of the connector in pixels separated by * XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the * display. + * If backend provides extended display identification data (EDID) with + * XENDISPL_OP_GET_EDID request then EDID values must take precedence + * over the resolutions defined here. * *------------------ Connector Request Transport Parameters ------------------- * @@ -349,6 +353,8 @@ #define XENDISPL_OP_FB_DETACH 0x13 #define XENDISPL_OP_SET_CONFIG 0x14 #define XENDISPL_OP_PG_FLIP 0x15 +/* The below command is available in protocol version 2 and above. */ +#define XENDISPL_OP_GET_EDID 0x16 /* ****************************************************************************** @@ -377,6 +383,10 @@ #define XENDISPL_FIELD_BE_ALLOC "be-alloc" #define XENDISPL_FIELD_UNIQUE_ID "unique-id" +#define XENDISPL_EDID_BLOCK_SIZE 128 +#define XENDISPL_EDID_BLOCK_COUNT 256 +#define XENDISPL_EDID_MAX_SIZE (XENDISPL_EDID_BLOCK_SIZE * XENDISPL_EDID_BLOCK_COUNT) + /* ****************************************************************************** * STATUS RETURN CODES @@ -451,7 +461,9 @@ * +----------------+----------------+----------------+----------------+ * | gref_directory | 40 * +----------------+----------------+----------------+----------------+ - * | reserved | 44 + * | data_ofs | 44 + * +----------------+----------------+----------------+----------------+ + * | reserved | 48 * +----------------+----------------+----------------+----------------+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| * +----------------+----------------+----------------+----------------+ @@ -494,6 +506,7 @@ * buffer size (buffer_sz) exceeds what can be addressed by this single page, * then reference to the next page must be supplied (see gref_dir_next_page * below) + * data_ofs - uint32_t, offset of the data in the buffer, octets */ #define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0) @@ -506,6 +519,7 @@ uint32_t buffer_sz; uint32_t flags; grant_ref_t gref_directory; + uint32_t data_ofs; }; /* @@ -731,6 +745,44 @@ uint64_t fb_cookie; }; +/* + * Request EDID - request EDID describing current connector: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | _OP_GET_EDID | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | buffer_sz | 8 + * +----------------+----------------+----------------+----------------+ + * | gref_directory | 12 + * +----------------+----------------+----------------+----------------+ + * | reserved | 16 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * Notes: + * - This command is not available in protocol version 1 and should be + * ignored. + * - This request is optional and if not supported then visible area + * is defined by the relevant XenStore's "resolution" property. + * - Shared buffer, allocated for EDID storage, must not be less then + * XENDISPL_EDID_MAX_SIZE octets. + * + * buffer_sz - uint32_t, buffer size to be allocated, octets + * gref_directory - grant_ref_t, a reference to the first shared page + * describing EDID buffer references. See XENDISPL_OP_DBUF_CREATE for + * grant page directory structure (struct xendispl_page_directory). + * + * See response format for this request. + */ + +struct xendispl_get_edid_req { + uint32_t buffer_sz; + grant_ref_t gref_directory; +}; + /* *---------------------------------- Responses -------------------------------- * @@ -753,6 +805,35 @@ * id - uint16_t, private guest value, echoed from request * status - int32_t, response status, zero on success and -XEN_EXX on failure * + * + * Get EDID response - response for XENDISPL_OP_GET_EDID: + * 0 1 2 3 octet + * +----------------+----------------+----------------+----------------+ + * | id | operation | reserved | 4 + * +----------------+----------------+----------------+----------------+ + * | status | 8 + * +----------------+----------------+----------------+----------------+ + * | edid_sz | 12 + * +----------------+----------------+----------------+----------------+ + * | reserved | 16 + * +----------------+----------------+----------------+----------------+ + * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/| + * +----------------+----------------+----------------+----------------+ + * | reserved | 64 + * +----------------+----------------+----------------+----------------+ + * + * Notes: + * - This response is not available in protocol version 1 and should be + * ignored. + * + * edid_sz - uint32_t, size of the EDID, octets + */ + +struct xendispl_get_edid_resp { + uint32_t edid_sz; +}; + +/* *----------------------------------- Events ---------------------------------- * * Events are sent via a shared page allocated by the front and propagated by @@ -804,6 +885,7 @@ struct xendispl_fb_detach_req fb_detach; struct xendispl_set_config_req set_config; struct xendispl_page_flip_req pg_flip; + struct xendispl_get_edid_req get_edid; uint8_t reserved[56]; } op; }; @@ -813,7 +895,10 @@ uint8_t operation; uint8_t reserved; int32_t status; - uint8_t reserved1[56]; + union { + struct xendispl_get_edid_resp get_edid; + uint8_t reserved1[56]; + } op; }; struct xendispl_evt { Index: sys/xen/interface/io/netif.h =================================================================== --- sys/xen/interface/io/netif.h +++ sys/xen/interface/io/netif.h @@ -204,6 +204,18 @@ * present). */ +/* + * MTU + * === + * + * The toolstack may set a value of MTU for the frontend by setting the + * /local/domain//device/vif//mtu node with the MTU value in + * octets. If this node is absent the frontend should assume an MTU value + * of 1500 octets. A frontend is also at liberty to ignore this value so + * it is only suitable for informing the frontend that a packet payload + * >1500 octets is permitted. + */ + /* * Hash types * ========== Index: sys/xen/interface/io/ring.h =================================================================== --- sys/xen/interface/io/ring.h +++ sys/xen/interface/io/ring.h @@ -231,22 +231,25 @@ #define RING_GET_REQUEST(_r, _idx) \ (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) +#define RING_GET_RESPONSE(_r, _idx) \ + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) + /* - * Get a local copy of a request. + * Get a local copy of a request/response. * - * Use this in preference to RING_GET_REQUEST() so all processing is + * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is * done on a local copy that cannot be modified by the other end. * * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this - * to be ineffective where _req is a struct which consists of only bitfields. + * to be ineffective where dest is a struct which consists of only bitfields. */ -#define RING_COPY_REQUEST(_r, _idx, _req) do { \ - /* Use volatile to force the copy into _req. */ \ - *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ +#define RING_COPY_(type, r, idx, dest) do { \ + /* Use volatile to force the copy into dest. */ \ + *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \ } while (0) -#define RING_GET_RESPONSE(_r, _idx) \ - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) +#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) +#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) /* Loop termination condition: Would the specified index overflow the ring? */ #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ @@ -256,6 +259,10 @@ #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) +/* Ill-behaved backend determination: Can there be this many responses? */ +#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ + (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) + #define RING_PUSH_REQUESTS(_r) do { \ xen_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ Index: sys/xen/interface/memory.h =================================================================== --- sys/xen/interface/memory.h +++ sys/xen/interface/memory.h @@ -148,16 +148,23 @@ */ #define XENMEM_maximum_ram_page 2 +struct xen_memory_domain { + /* [IN] Domain information is being queried for. */ + domid_t domid; +}; + /* * Returns the current or maximum memory reservation, in pages, of the * specified domain (may be DOMID_SELF). Returns -ve errcode on failure. - * arg == addr of domid_t. + * arg == addr of struct xen_memory_domain. */ #define XENMEM_current_reservation 3 #define XENMEM_maximum_reservation 4 /* - * Returns the maximum GPFN in use by the guest, or -ve errcode on failure. + * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF). + * Returns -ve errcode on failure. + * arg == addr of struct xen_memory_domain. */ #define XENMEM_maximum_gpfn 14 @@ -604,7 +611,7 @@ XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer; /* IN */ union { - struct physdev_pci_device pci; + physdev_pci_device_t pci; } dev; }; typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t; @@ -625,6 +632,7 @@ #define XENMEM_resource_ioreq_server 0 #define XENMEM_resource_grant_table 1 +#define XENMEM_resource_vmtrace_buf 2 /* * IN - a type-specific resource identifier, which must be zero @@ -639,16 +647,26 @@ #define XENMEM_resource_grant_table_id_status 1 /* - * IN/OUT - As an IN parameter number of frames of the resource - * to be mapped. However, if the specified value is 0 and - * frame_list is NULL then this field will be set to the - * maximum value supported by the implementation on return. + * IN/OUT + * + * As an IN parameter number of frames of the resource to be mapped. + * This value may be updated over the course of the operation. + * + * When frame_list is NULL and nr_frames is 0, this is interpreted as a + * request for the size of the resource, which shall be returned in the + * nr_frames field. + * + * The size of a resource will never be zero, but a nonzero result doesn't + * guarantee that a subsequent mapping request will be successful. There + * are further type/id specific constraints which may change between the + * two calls. */ uint32_t nr_frames; uint32_t pad; /* * IN - the index of the initial frame to be mapped. This parameter - * is ignored if nr_frames is 0. + * is ignored if nr_frames is 0. This value may be updated + * over the course of the operation. */ uint64_t frame; @@ -664,7 +682,8 @@ * If -EIO is returned then the frame_list has only been * partially mapped and it is up to the caller to unmap all * the GFNs. - * This parameter may be NULL if nr_frames is 0. + * This parameter may be NULL if nr_frames is 0. This + * value may be updated over the course of the operation. */ XEN_GUEST_HANDLE(xen_pfn_t) frame_list; }; Index: sys/xen/interface/physdev.h =================================================================== --- sys/xen/interface/physdev.h +++ sys/xen/interface/physdev.h @@ -229,11 +229,11 @@ struct physdev_op { uint32_t cmd; union { - struct physdev_irq_status_query irq_status_query; - struct physdev_set_iopl set_iopl; - struct physdev_set_iobitmap set_iobitmap; - struct physdev_apic apic_op; - struct physdev_irq irq_op; + physdev_irq_status_query_t irq_status_query; + physdev_set_iopl_t set_iopl; + physdev_set_iobitmap_t set_iobitmap; + physdev_apic_t apic_op; + physdev_irq_t irq_op; } u; }; typedef struct physdev_op physdev_op_t; @@ -334,7 +334,7 @@ uint8_t op; uint8_t bus; union { - struct physdev_pci_device pci; + physdev_pci_device_t pci; } u; }; typedef struct physdev_dbgp_op physdev_dbgp_op_t; Index: sys/xen/interface/platform.h =================================================================== --- sys/xen/interface/platform.h +++ sys/xen/interface/platform.h @@ -42,6 +42,7 @@ uint32_t nsecs; uint64_t system_time; }; +typedef struct xenpf_settime32 xenpf_settime32_t; #define XENPF_settime64 62 struct xenpf_settime64 { /* IN variables. */ @@ -50,6 +51,7 @@ uint32_t mbz; uint64_t system_time; }; +typedef struct xenpf_settime64 xenpf_settime64_t; #if __XEN_INTERFACE_VERSION__ < 0x00040600 #define XENPF_settime XENPF_settime32 #define xenpf_settime xenpf_settime32 @@ -529,6 +531,7 @@ uint32_t acpi_id; uint32_t pxm; }; +typedef struct xenpf_cpu_hotadd xenpf_cpu_hotadd_t; #define XENPF_mem_hotadd 59 struct xenpf_mem_hotadd @@ -538,6 +541,7 @@ uint32_t pxm; uint32_t flags; }; +typedef struct xenpf_mem_hotadd xenpf_mem_hotadd_t; #define XENPF_core_parking 60 @@ -622,29 +626,29 @@ uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ union { - struct xenpf_settime settime; - struct xenpf_settime32 settime32; - struct xenpf_settime64 settime64; - struct xenpf_add_memtype add_memtype; - struct xenpf_del_memtype del_memtype; - struct xenpf_read_memtype read_memtype; - struct xenpf_microcode_update microcode; - struct xenpf_platform_quirk platform_quirk; - struct xenpf_efi_runtime_call efi_runtime_call; - struct xenpf_firmware_info firmware_info; - struct xenpf_enter_acpi_sleep enter_acpi_sleep; - struct xenpf_change_freq change_freq; - struct xenpf_getidletime getidletime; - struct xenpf_set_processor_pminfo set_pminfo; - struct xenpf_pcpuinfo pcpu_info; - struct xenpf_pcpu_version pcpu_version; - struct xenpf_cpu_ol cpu_ol; - struct xenpf_cpu_hotadd cpu_add; - struct xenpf_mem_hotadd mem_add; - struct xenpf_core_parking core_parking; - struct xenpf_resource_op resource_op; - struct xenpf_symdata symdata; - uint8_t pad[128]; + xenpf_settime_t settime; + xenpf_settime32_t settime32; + xenpf_settime64_t settime64; + xenpf_add_memtype_t add_memtype; + xenpf_del_memtype_t del_memtype; + xenpf_read_memtype_t read_memtype; + xenpf_microcode_update_t microcode; + xenpf_platform_quirk_t platform_quirk; + xenpf_efi_runtime_call_t efi_runtime_call; + xenpf_firmware_info_t firmware_info; + xenpf_enter_acpi_sleep_t enter_acpi_sleep; + xenpf_change_freq_t change_freq; + xenpf_getidletime_t getidletime; + xenpf_set_processor_pminfo_t set_pminfo; + xenpf_pcpuinfo_t pcpu_info; + xenpf_pcpu_version_t pcpu_version; + xenpf_cpu_ol_t cpu_ol; + xenpf_cpu_hotadd_t cpu_add; + xenpf_mem_hotadd_t mem_add; + xenpf_core_parking_t core_parking; + xenpf_resource_op_t resource_op; + xenpf_symdata_t symdata; + uint8_t pad[128]; } u; }; typedef struct xen_platform_op xen_platform_op_t; Index: sys/xen/interface/pmu.h =================================================================== --- sys/xen/interface/pmu.h +++ sys/xen/interface/pmu.h @@ -127,7 +127,7 @@ uint8_t pad[6]; /* Architecture-specific information */ - struct xen_pmu_arch pmu; + xen_pmu_arch_t pmu; }; #endif /* __XEN_PUBLIC_PMU_H__ */ Index: sys/xen/interface/sysctl.h =================================================================== --- sys/xen/interface/sysctl.h +++ sys/xen/interface/sysctl.h @@ -100,9 +100,10 @@ #define _XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share 5 #define XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share \ (1u << _XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share) +#define XEN_SYSCTL_PHYSCAP_vmtrace (1 << 6) /* Max XEN_SYSCTL_PHYSCAP_* constant. Used for ABI checking. */ -#define XEN_SYSCTL_PHYSCAP_MAX XEN_SYSCTL_PHYSCAP_iommu_hap_pt_share +#define XEN_SYSCTL_PHYSCAP_MAX XEN_SYSCTL_PHYSCAP_vmtrace struct xen_sysctl_physinfo { uint32_t threads_per_core; Index: sys/xen/interface/vm_event.h =================================================================== --- sys/xen/interface/vm_event.h +++ sys/xen/interface/vm_event.h @@ -29,7 +29,7 @@ #include "xen.h" -#define VM_EVENT_INTERFACE_VERSION 0x00000006 +#define VM_EVENT_INTERFACE_VERSION 0x00000007 #if defined(__XEN__) || defined(__XEN_TOOLS__) @@ -119,6 +119,14 @@ * which singlestep gets automatically disabled. */ #define VM_EVENT_FLAG_FAST_SINGLESTEP (1 << 11) +/* + * Set if the event comes from a nested VM and thus npt_base is valid. + */ +#define VM_EVENT_FLAG_NESTED_P2M (1 << 12) +/* + * Reset the vmtrace buffer (if vmtrace is enabled) + */ +#define VM_EVENT_FLAG_RESET_VMTRACE (1 << 13) /* * Reasons for the vm event request @@ -208,6 +216,24 @@ uint64_t msr_star; uint64_t msr_lstar; uint64_t gdtr_base; + + /* + * When VM_EVENT_FLAG_NESTED_P2M is set, this event comes from a nested + * VM. npt_base is the guest physical address of the L1 hypervisors + * EPT/NPT tables for the nested guest. + * + * All bits outside of architectural address ranges are reserved for + * future metadata. + */ + uint64_t npt_base; + + /* + * Current position in the vmtrace buffer, or ~0 if vmtrace is not active. + * + * For Intel Processor Trace, it is the upper half of MSR_RTIT_OUTPUT_MASK. + */ + uint64_t vmtrace_pos; + uint32_t cs_base; uint32_t ss_base; uint32_t ds_base; @@ -240,8 +266,7 @@ uint64_t ttbr1; uint64_t ttbcr; uint64_t pc; - uint32_t cpsr; - uint32_t _pad; + uint64_t cpsr; }; /* Index: sys/xen/interface/xen.h =================================================================== --- sys/xen/interface/xen.h +++ sys/xen/interface/xen.h @@ -726,7 +726,7 @@ #endif /* XEN_HAVE_PV_UPCALL_MASK */ xen_ulong_t evtchn_pending_sel; struct arch_vcpu_info arch; - struct vcpu_time_info time; + vcpu_time_info_t time; }; /* 64 bytes (x86) */ #ifndef __XEN__ typedef struct vcpu_info vcpu_info_t; @@ -1031,6 +1031,7 @@ XEN_GUEST_HANDLE_64(uint8) bitmap; uint32_t nr_bits; }; +typedef struct xenctl_bitmap xenctl_bitmap_t; #endif #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */ Index: sys/xen/interface/xsm/flask_op.h =================================================================== --- sys/xen/interface/xsm/flask_op.h +++ sys/xen/interface/xsm/flask_op.h @@ -33,10 +33,12 @@ XEN_GUEST_HANDLE(char) buffer; uint32_t size; }; +typedef struct xen_flask_load xen_flask_load_t; struct xen_flask_setenforce { uint32_t enforcing; }; +typedef struct xen_flask_setenforce xen_flask_setenforce_t; struct xen_flask_sid_context { /* IN/OUT: sid to convert to/from string */ @@ -47,6 +49,7 @@ uint32_t size; XEN_GUEST_HANDLE(char) context; }; +typedef struct xen_flask_sid_context xen_flask_sid_context_t; struct xen_flask_access { /* IN: access request */ @@ -60,6 +63,7 @@ uint32_t audit_deny; uint32_t seqno; }; +typedef struct xen_flask_access xen_flask_access_t; struct xen_flask_transition { /* IN: transition SIDs and class */ @@ -69,6 +73,7 @@ /* OUT: new SID */ uint32_t newsid; }; +typedef struct xen_flask_transition xen_flask_transition_t; #if __XEN_INTERFACE_VERSION__ < 0x00040800 struct xen_flask_userlist { @@ -106,11 +111,13 @@ */ XEN_GUEST_HANDLE(char) name; }; +typedef struct xen_flask_boolean xen_flask_boolean_t; struct xen_flask_setavc_threshold { /* IN */ uint32_t threshold; }; +typedef struct xen_flask_setavc_threshold xen_flask_setavc_threshold_t; struct xen_flask_hash_stats { /* OUT */ @@ -119,6 +126,7 @@ uint32_t buckets_total; uint32_t max_chain_len; }; +typedef struct xen_flask_hash_stats xen_flask_hash_stats_t; struct xen_flask_cache_stats { /* IN */ @@ -131,6 +139,7 @@ uint32_t reclaims; uint32_t frees; }; +typedef struct xen_flask_cache_stats xen_flask_cache_stats_t; struct xen_flask_ocontext { /* IN */ @@ -138,6 +147,7 @@ uint32_t sid; uint64_t low, high; }; +typedef struct xen_flask_ocontext xen_flask_ocontext_t; struct xen_flask_peersid { /* IN */ @@ -145,12 +155,14 @@ /* OUT */ uint32_t sid; }; +typedef struct xen_flask_peersid xen_flask_peersid_t; struct xen_flask_relabel { /* IN */ uint32_t domid; uint32_t sid; }; +typedef struct xen_flask_relabel xen_flask_relabel_t; struct xen_flask_devicetree_label { /* IN */ @@ -158,6 +170,7 @@ uint32_t length; XEN_GUEST_HANDLE(char) path; }; +typedef struct xen_flask_devicetree_label xen_flask_devicetree_label_t; struct xen_flask_op { uint32_t cmd; @@ -188,26 +201,26 @@ #define FLASK_DEVICETREE_LABEL 25 uint32_t interface_version; /* XEN_FLASK_INTERFACE_VERSION */ union { - struct xen_flask_load load; - struct xen_flask_setenforce enforce; + xen_flask_load_t load; + xen_flask_setenforce_t enforce; /* FLASK_CONTEXT_TO_SID and FLASK_SID_TO_CONTEXT */ - struct xen_flask_sid_context sid_context; - struct xen_flask_access access; + xen_flask_sid_context_t sid_context; + xen_flask_access_t access; /* FLASK_CREATE, FLASK_RELABEL, FLASK_MEMBER */ - struct xen_flask_transition transition; + xen_flask_transition_t transition; #if __XEN_INTERFACE_VERSION__ < 0x00040800 struct xen_flask_userlist userlist; #endif /* FLASK_GETBOOL, FLASK_SETBOOL */ - struct xen_flask_boolean boolean; - struct xen_flask_setavc_threshold setavc_threshold; - struct xen_flask_hash_stats hash_stats; - struct xen_flask_cache_stats cache_stats; + xen_flask_boolean_t boolean; + xen_flask_setavc_threshold_t setavc_threshold; + xen_flask_hash_stats_t hash_stats; + xen_flask_cache_stats_t cache_stats; /* FLASK_ADD_OCONTEXT, FLASK_DEL_OCONTEXT */ - struct xen_flask_ocontext ocontext; - struct xen_flask_peersid peersid; - struct xen_flask_relabel relabel; - struct xen_flask_devicetree_label devicetree_label; + xen_flask_ocontext_t ocontext; + xen_flask_peersid_t peersid; + xen_flask_relabel_t relabel; + xen_flask_devicetree_label_t devicetree_label; } u; }; typedef struct xen_flask_op xen_flask_op_t;