diff --git a/sys/arm/arm/identcpu-v6.c b/sys/arm/arm/identcpu-v6.c index 1302d24daf02..6293a5ccaceb 100644 --- a/sys/arm/arm/identcpu-v6.c +++ b/sys/arm/arm/identcpu-v6.c @@ -1,380 +1,380 @@ /* $NetBSD: cpu.c,v 1.55 2004/02/13 11:36:10 wiz Exp $ */ /*- * Copyright (c) 1995 Mark Brinicombe. * Copyright (c) 1995 Brini. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Brini. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RiscBSD kernel project * * cpu.c * * Probing and configuration for the master CPU * * Created : 10/10/95 */ #include #include #include #include #include #include #include #include char machine[] = "arm"; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD | CTLFLAG_CAPRD, machine, 0, "Machine class"); static char cpu_model[64]; -SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, +SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_CAPRD, cpu_model, sizeof(cpu_model), "Machine model"); static char hw_buf[81]; static int hw_buf_idx; static bool hw_buf_newline; enum cpu_class cpu_class = CPU_CLASS_NONE; static struct { int implementer; int part_number; char *impl_name; char *core_name; enum cpu_class cpu_class; } cpu_names[] = { {CPU_IMPLEMENTER_ARM, CPU_ARCH_ARM1176, "ARM", "ARM1176", CPU_CLASS_ARM11J}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A5 , "ARM", "Cortex-A5", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A7 , "ARM", "Cortex-A7", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A8 , "ARM", "Cortex-A8", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A9 , "ARM", "Cortex-A9", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A12, "ARM", "Cortex-A12", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A15, "ARM", "Cortex-A15", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A17, "ARM", "Cortex-A17", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A53, "ARM", "Cortex-A53", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A57, "ARM", "Cortex-A57", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A72, "ARM", "Cortex-A72", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_ARM, CPU_ARCH_CORTEX_A73, "ARM", "Cortex-A73", CPU_CLASS_CORTEXA}, {CPU_IMPLEMENTER_MRVL, CPU_ARCH_SHEEVA_581, "Marvell", "PJ4 v7", CPU_CLASS_MARVELL}, {CPU_IMPLEMENTER_MRVL, CPU_ARCH_SHEEVA_584, "Marvell", "PJ4MP v7", CPU_CLASS_MARVELL}, {CPU_IMPLEMENTER_QCOM, CPU_ARCH_KRAIT_300, "Qualcomm", "Krait 300", CPU_CLASS_KRAIT}, }; static void print_v5_cache(void) { uint32_t isize, dsize; uint32_t multiplier; int pcache_type; int pcache_unified; int picache_size; int picache_line_size; int picache_ways; int pdcache_size; int pdcache_line_size; int pdcache_ways; pcache_unified = 0; picache_size = 0 ; picache_line_size = 0 ; picache_ways = 0 ; pdcache_size = 0; pdcache_line_size = 0; pdcache_ways = 0; if ((cpuinfo.ctr & CPU_CT_S) == 0) pcache_unified = 1; /* * If you want to know how this code works, go read the ARM ARM. */ pcache_type = CPU_CT_CTYPE(cpuinfo.ctr); if (pcache_unified == 0) { isize = CPU_CT_ISIZE(cpuinfo.ctr); multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); if (CPU_CT_xSIZE_ASSOC(isize) == 0) { if (isize & CPU_CT_xSIZE_M) picache_line_size = 0; /* not present */ else picache_ways = 1; } else { picache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(isize) - 1); } picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); } dsize = CPU_CT_DSIZE(cpuinfo.ctr); multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { if (dsize & CPU_CT_xSIZE_M) pdcache_line_size = 0; /* not present */ else pdcache_ways = 1; } else { pdcache_ways = multiplier << (CPU_CT_xSIZE_ASSOC(dsize) - 1); } pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); /* Print cache info. */ if (picache_line_size == 0 && pdcache_line_size == 0) return; if (pcache_unified) { printf(" %dKB/%dB %d-way %s unified cache\n", pdcache_size / 1024, pdcache_line_size, pdcache_ways, pcache_type == 0 ? "WT" : "WB"); } else { printf(" %dKB/%dB %d-way instruction cache\n", picache_size / 1024, picache_line_size, picache_ways); printf(" %dKB/%dB %d-way %s data cache\n", pdcache_size / 1024, pdcache_line_size, pdcache_ways, pcache_type == 0 ? "WT" : "WB"); } } static void print_v7_cache(void ) { uint32_t type, val, size, sets, ways, linesize; int i; printf("LoUU:%d LoC:%d LoUIS:%d \n", CPU_CLIDR_LOUU(cpuinfo.clidr) + 1, CPU_CLIDR_LOC(cpuinfo.clidr) + 1, CPU_CLIDR_LOUIS(cpuinfo.clidr) + 1); for (i = 0; i < 7; i++) { type = CPU_CLIDR_CTYPE(cpuinfo.clidr, i); if (type == 0) break; printf("Cache level %d:\n", i + 1); if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE || type == CACHE_SEP_CACHE) { cp15_csselr_set(i << 1); val = cp15_ccsidr_get(); ways = CPUV7_CT_xSIZE_ASSOC(val) + 1; sets = CPUV7_CT_xSIZE_SET(val) + 1; linesize = 1 << (CPUV7_CT_xSIZE_LEN(val) + 4); size = (ways * sets * linesize) / 1024; if (type == CACHE_UNI_CACHE) printf(" %dKB/%dB %d-way unified cache", size, linesize,ways); else printf(" %dKB/%dB %d-way data cache", size, linesize, ways); if (val & CPUV7_CT_CTYPE_WT) printf(" WT"); if (val & CPUV7_CT_CTYPE_WB) printf(" WB"); if (val & CPUV7_CT_CTYPE_RA) printf(" Read-Alloc"); if (val & CPUV7_CT_CTYPE_WA) printf(" Write-Alloc"); printf("\n"); } if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) { cp15_csselr_set(i << 1 | 1); val = cp15_ccsidr_get(); ways = CPUV7_CT_xSIZE_ASSOC(val) + 1; sets = CPUV7_CT_xSIZE_SET(val) + 1; linesize = 1 << (CPUV7_CT_xSIZE_LEN(val) + 4); size = (ways * sets * linesize) / 1024; printf(" %dKB/%dB %d-way instruction cache", size, linesize, ways); if (val & CPUV7_CT_CTYPE_WT) printf(" WT"); if (val & CPUV7_CT_CTYPE_WB) printf(" WB"); if (val & CPUV7_CT_CTYPE_RA) printf(" Read-Alloc"); if (val & CPUV7_CT_CTYPE_WA) printf(" Write-Alloc"); printf("\n"); } } cp15_csselr_set(0); } static void add_cap(char *cap) { int len; len = strlen(cap); if ((hw_buf_idx + len + 2) >= 79) { printf("%s,\n", hw_buf); hw_buf_idx = 0; hw_buf_newline = true; } if (hw_buf_newline) hw_buf_idx += sprintf(hw_buf + hw_buf_idx, " "); else hw_buf_idx += sprintf(hw_buf + hw_buf_idx, ", "); hw_buf_newline = false; hw_buf_idx += sprintf(hw_buf + hw_buf_idx, "%s", cap); } void identify_arm_cpu(void) { int i; u_int val; /* * CPU */ for(i = 0; i < nitems(cpu_names); i++) { if (cpu_names[i].implementer == cpuinfo.implementer && cpu_names[i].part_number == cpuinfo.part_number) { cpu_class = cpu_names[i].cpu_class; snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d (ECO: 0x%08X)", cpu_names[i].impl_name, cpu_names[i].core_name, cpuinfo.revision, cpuinfo.patch, cpuinfo.midr != cpuinfo.revidr ? cpuinfo.revidr : 0); printf("CPU: %s\n", cpu_model); break; } } if (i >= nitems(cpu_names)) printf("unknown CPU (ID = 0x%x)\n", cpuinfo.midr); printf("CPU Features: \n"); hw_buf_idx = 0; hw_buf_newline = true; val = (cpuinfo.mpidr >> 4)& 0xF; if (cpuinfo.mpidr & (1 << 31U)) add_cap("Multiprocessing"); val = (cpuinfo.id_pfr0 >> 4)& 0xF; if (val == 1) add_cap("Thumb"); else if (val == 3) add_cap("Thumb2"); val = (cpuinfo.id_pfr1 >> 4)& 0xF; if (val == 1 || val == 2) add_cap("Security"); val = (cpuinfo.id_pfr1 >> 12)& 0xF; if (val == 1) add_cap("Virtualization"); val = (cpuinfo.id_pfr1 >> 16)& 0xF; if (val == 1) add_cap("Generic Timer"); val = (cpuinfo.id_mmfr0 >> 0)& 0xF; if (val == 2) { add_cap("VMSAv6"); } else if (val >= 3) { add_cap("VMSAv7"); if (val >= 4) add_cap("PXN"); if (val >= 5) add_cap("LPAE"); } val = (cpuinfo.id_mmfr3 >> 20)& 0xF; if (val == 1) add_cap("Coherent Walk"); if (hw_buf_idx != 0) printf("%s\n", hw_buf); printf("Optional instructions: \n"); hw_buf_idx = 0; hw_buf_newline = true; val = (cpuinfo.id_isar0 >> 24)& 0xF; if (val == 1) add_cap("SDIV/UDIV (Thumb)"); else if (val == 2) add_cap("SDIV/UDIV"); val = (cpuinfo.id_isar2 >> 20)& 0xF; if (val == 1 || val == 2) add_cap("UMULL"); val = (cpuinfo.id_isar2 >> 16)& 0xF; if (val == 1 || val == 2 || val == 3) add_cap("SMULL"); val = (cpuinfo.id_isar2 >> 12)& 0xF; if (val == 1) add_cap("MLA"); val = (cpuinfo.id_isar3 >> 4)& 0xF; if (val == 1) add_cap("SIMD"); else if (val == 3) add_cap("SIMD(ext)"); if (hw_buf_idx != 0) printf("%s\n", hw_buf); /* * Cache */ if (CPU_CT_FORMAT(cpuinfo.ctr) == CPU_CT_ARMV7) print_v7_cache(); else print_v5_cache(); } diff --git a/sys/arm64/arm64/identcpu.c b/sys/arm64/arm64/identcpu.c index fced2ffab258..698a98e3da1d 100644 --- a/sys/arm64/arm64/identcpu.c +++ b/sys/arm64/arm64/identcpu.c @@ -1,2798 +1,2798 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_IDENTCPU, "CPU ID", "arm64 CPU identification memory"); struct cpu_desc; static void print_cpu_midr(struct sbuf *sb, u_int cpu); static void print_cpu_features(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc); static void print_cpu_caches(struct sbuf *sb, struct cpu_desc *desc); #ifdef COMPAT_FREEBSD32 static u_long parse_cpu_features_hwcap32(void); #endif char machine[] = "arm64"; #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static SYSCTL_NODE(_machdep, OID_AUTO, cache, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Cache management tuning"); static int allow_dic = 1; SYSCTL_INT(_machdep_cache, OID_AUTO, allow_dic, CTLFLAG_RDTUN, &allow_dic, 0, "Allow optimizations based on the DIC cache bit"); static int allow_idc = 1; SYSCTL_INT(_machdep_cache, OID_AUTO, allow_idc, CTLFLAG_RDTUN, &allow_idc, 0, "Allow optimizations based on the IDC cache bit"); static void check_cpu_regs(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc); /* * The default implementation of I-cache sync assumes we have an * aliasing cache until we know otherwise. */ void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t) = &arm64_aliasing_icache_sync_range; static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "arm"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); static char cpu_model[64]; -SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, +SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_CAPRD, cpu_model, sizeof(cpu_model), "Machine model"); #define MAX_CACHES 8 /* Maximum number of caches supported architecturally. */ /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64isar2; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; #ifdef NOTYET uint64_t id_aa64mmfr3; uint64_t id_aa64mmfr4; #endif uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; #ifdef NOTYET uint64_t id_aa64pfr2; #endif uint64_t id_aa64zfr0; uint64_t ctr; #ifdef COMPAT_FREEBSD32 uint64_t id_isar5; uint64_t mvfr0; uint64_t mvfr1; #endif uint64_t clidr; uint32_t ccsidr[MAX_CACHES][2]; /* 2 possible types. */ bool have_sve; }; static struct cpu_desc cpu_desc0; static struct cpu_desc *cpu_desc; static struct cpu_desc kern_cpu_desc; static struct cpu_desc user_cpu_desc; static struct cpu_desc * get_cpu_desc(u_int cpu) { /* The cpu_desc for CPU 0 is used before the allocator is ready. */ if (cpu == 0) return (&cpu_desc0); MPASS(cpu_desc != NULL); return (&cpu_desc[cpu - 1]); } struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, NULL } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, NULL, NULL } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_AEM_V8, "AEMv8" }, { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A34, "Cortex-A34" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A65, "Cortex-A65" }, { CPU_PART_CORTEX_A65AE, "Cortex-A65AE" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, { CPU_PART_CORTEX_A76, "Cortex-A76" }, { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" }, { CPU_PART_CORTEX_A77, "Cortex-A77" }, { CPU_PART_CORTEX_A78, "Cortex-A78" }, { CPU_PART_CORTEX_A78C, "Cortex-A78C" }, { CPU_PART_CORTEX_A510, "Cortex-A510" }, { CPU_PART_CORTEX_A710, "Cortex-A710" }, { CPU_PART_CORTEX_A715, "Cortex-A715" }, { CPU_PART_CORTEX_X1, "Cortex-X1" }, { CPU_PART_CORTEX_X1C, "Cortex-X1C" }, { CPU_PART_CORTEX_X2, "Cortex-X2" }, { CPU_PART_CORTEX_X3, "Cortex-X3" }, { CPU_PART_NEOVERSE_E1, "Neoverse-E1" }, { CPU_PART_NEOVERSE_N1, "Neoverse-N1" }, { CPU_PART_NEOVERSE_N2, "Neoverse-N2" }, { CPU_PART_NEOVERSE_V1, "Neoverse-V1" }, { CPU_PART_NEOVERSE_V2, "Neoverse-V2" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { { CPU_PART_THUNDERX, "ThunderX" }, { CPU_PART_THUNDERX2, "ThunderX2" }, CPU_PART_NONE, }; /* APM / Ampere */ static const struct cpu_parts cpu_parts_apm[] = { { CPU_PART_EMAG8180, "eMAG 8180" }, CPU_PART_NONE, }; /* Qualcomm */ static const struct cpu_parts cpu_parts_qcom[] = { { CPU_PART_KRYO400_GOLD, "Kryo 400 Gold" }, { CPU_PART_KRYO400_SILVER, "Kryo 400 Silver" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_AMPERE, "Ampere", cpu_parts_none }, { CPU_IMPL_APPLE, "Apple", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_apm }, { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_FUJITSU, "Fujitsu", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_qcom }, CPU_IMPLEMENTER_NONE, }; #define MRS_TYPE_MASK 0xf #define MRS_INVALID 0 #define MRS_EXACT 1 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4)) #define MRS_EXACT_FIELD(x) ((x) >> 4) #define MRS_LOWER 2 struct mrs_field_value { uint64_t value; const char *desc; }; #define MRS_FIELD_VALUE(_value, _desc) \ { \ .value = (_value), \ .desc = (_desc), \ } #define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field) #define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s") #define MRS_FIELD_VALUE_END { .desc = NULL } struct mrs_field_hwcap { u_long *hwcap; uint64_t min; u_long hwcap_val; }; #define MRS_HWCAP(_hwcap, _val, _min) \ { \ .hwcap = (_hwcap), \ .hwcap_val = (_val), \ .min = (_min), \ } #define MRS_HWCAP_END { .hwcap = NULL } struct mrs_field { const char *name; const struct mrs_field_value *values; const struct mrs_field_hwcap *hwcaps; uint64_t mask; bool sign; u_int type; u_int shift; }; #define MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, _hwcap) \ { \ .name = #_name, \ .sign = (_sign), \ .type = (_type), \ .shift = _register ## _ ## _name ## _SHIFT, \ .mask = _register ## _ ## _name ## _MASK, \ .values = (_values), \ .hwcaps = (_hwcap), \ } #define MRS_FIELD(_register, _name, _sign, _type, _values) \ MRS_FIELD_HWCAP(_register, _name, _sign, _type, _values, NULL) #define MRS_FIELD_END { .type = MRS_INVALID, } /* ID_AA64AFR0_EL1 */ static const struct mrs_field id_aa64afr0_fields[] = { MRS_FIELD_END, }; /* ID_AA64AFR1_EL1 */ static const struct mrs_field id_aa64afr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64DFR0_EL1 */ static const struct mrs_field_value id_aa64dfr0_hpmn0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, HPMN0, CONSTR, DEFINED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_brbe[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, BRBE, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64DFR0_BRBE_EL3, "BRBE EL3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_mtpmu[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, MTPMU, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64DFR0_MTPMU_NONE_MT_RES0, "MTPMU res0"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracebuffer[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64DFR0, TraceBuffer, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracefilt[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceFilt_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceFilt_8_4, "Trace v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_doublelock[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DoubleLock_IMPL, "DoubleLock"), MRS_FIELD_VALUE(ID_AA64DFR0_DoubleLock_NONE, ""), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_pmsver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE, "SPE"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_1, "SPEv1p1"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_2, "SPEv1p2"), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_SPE_1_3, "SPEv1p3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_ctx_cmps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_wrps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_brps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_pmuver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3p1"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_4, "PMUv3p4"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_5, "PMUv3p5"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_7, "PMUv3p7"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_8, "PMUv3p8"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_tracever[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64dfr0_debugver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8p2"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_4, "Debugv8p4"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_8, "Debugv8p8"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64dfr0_fields[] = { MRS_FIELD(ID_AA64DFR0, HPMN0, false, MRS_EXACT, id_aa64dfr0_hpmn0), MRS_FIELD(ID_AA64DFR0, BRBE, false, MRS_EXACT, id_aa64dfr0_brbe), MRS_FIELD(ID_AA64DFR0, MTPMU, true, MRS_EXACT, id_aa64dfr0_mtpmu), MRS_FIELD(ID_AA64DFR0, TraceBuffer, false, MRS_EXACT, id_aa64dfr0_tracebuffer), MRS_FIELD(ID_AA64DFR0, TraceFilt, false, MRS_EXACT, id_aa64dfr0_tracefilt), MRS_FIELD(ID_AA64DFR0, DoubleLock, false, MRS_EXACT, id_aa64dfr0_doublelock), MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver), MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT, id_aa64dfr0_ctx_cmps), MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_LOWER, id_aa64dfr0_wrps), MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps), MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver), MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT, id_aa64dfr0_tracever), MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6), id_aa64dfr0_debugver), MRS_FIELD_END, }; /* ID_AA64DFR1_EL1 */ static const struct mrs_field id_aa64dfr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64ISAR0_EL1 */ static const struct mrs_field_value id_aa64isar0_rndr[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_RNDR_IMPL, "RNG"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_rndr_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_RNG, ID_AA64ISAR0_RNDR_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_tlb[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOS, "TLBI-OS"), MRS_FIELD_VALUE(ID_AA64ISAR0_TLB_TLBIOSR, "TLBI-OSR"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar0_ts[] = { MRS_FIELD_VALUE(ID_AA64ISAR0_TS_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_4, "CondM-8.4"), MRS_FIELD_VALUE(ID_AA64ISAR0_TS_CondM_8_5, "CondM-8.5"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_ts_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_FLAGM, ID_AA64ISAR0_TS_CondM_8_4), MRS_HWCAP(&elf_hwcap2, HWCAP2_FLAGM2, ID_AA64ISAR0_TS_CondM_8_5), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_fhm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, FHM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_fhm_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_ASIMDFHM, ID_AA64ISAR0_FHM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_dp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_dp_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_ASIMDDP, ID_AA64ISAR0_DP_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sm4_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SM4, ID_AA64ISAR0_SM4_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sm3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sm3_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SM3, ID_AA64ISAR0_SM3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha3_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SHA3, ID_AA64ISAR0_SHA3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_rdm_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_ASIMDRDM, ID_AA64ISAR0_RDM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_tme[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, TME, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar0_atomic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_atomic_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_ATOMICS, ID_AA64ISAR0_Atomic_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_crc32_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_CRC32, ID_AA64ISAR0_CRC32_BASE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha2_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SHA2, ID_AA64ISAR0_SHA2_BASE), MRS_HWCAP(&elf_hwcap, HWCAP_SHA512, ID_AA64ISAR0_SHA2_512), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_sha1_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SHA1, ID_AA64ISAR0_SHA1_BASE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar0_aes_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_AES, ID_AA64ISAR0_AES_BASE), MRS_HWCAP(&elf_hwcap, HWCAP_PMULL, ID_AA64ISAR0_AES_PMULL), MRS_HWCAP_END }; static const struct mrs_field id_aa64isar0_fields[] = { MRS_FIELD_HWCAP(ID_AA64ISAR0, RNDR, false, MRS_LOWER, id_aa64isar0_rndr, id_aa64isar0_rndr_caps), MRS_FIELD(ID_AA64ISAR0, TLB, false, MRS_EXACT, id_aa64isar0_tlb), MRS_FIELD_HWCAP(ID_AA64ISAR0, TS, false, MRS_LOWER, id_aa64isar0_ts, id_aa64isar0_ts_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, FHM, false, MRS_LOWER, id_aa64isar0_fhm, id_aa64isar0_fhm_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp, id_aa64isar0_dp_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4, id_aa64isar0_sm4_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3, id_aa64isar0_sm3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3, id_aa64isar0_sha3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm, id_aa64isar0_rdm_caps), MRS_FIELD(ID_AA64ISAR0, TME, false, MRS_EXACT, id_aa64isar0_tme), MRS_FIELD_HWCAP(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic, id_aa64isar0_atomic_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32, id_aa64isar0_crc32_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2, id_aa64isar0_sha2_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1, id_aa64isar0_sha1_caps), MRS_FIELD_HWCAP(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes, id_aa64isar0_aes_caps), MRS_FIELD_END, }; /* ID_AA64ISAR1_EL1 */ static const struct mrs_field_value id_aa64isar1_ls64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LS64, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64ISAR1_LS64_V, "LS64v"), MRS_FIELD_VALUE(ID_AA64ISAR1_LS64_ACCDATA, "LS64+ACCDATA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_xs[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, XS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_i8mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, I8MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_i8mm_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_I8MM, ID_AA64ISAR1_I8MM_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_dgh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DGH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_dgh_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_DGH, ID_AA64ISAR1_DGH_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_bf16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, BF16, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64ISAR1_BF16_EBF, "EBF16"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_bf16_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_BF16, ID_AA64ISAR1_BF16_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_specres[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_SPECRES_IMPL, "PredInv"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar1_sb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, SB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_sb_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SB, ID_AA64ISAR1_SB_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_frintts[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FRINTTS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_frintts_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_FRINT, ID_AA64ISAR1_FRINTTS_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_gpi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_gpi_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPI_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_gpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_gpa_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPA_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_lrcpc[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"), MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_4, "RCPC-8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_lrcpc_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_LRCPC, ID_AA64ISAR1_LRCPC_RCPC_8_3), MRS_HWCAP(&elf_hwcap, HWCAP_ILRCPC, ID_AA64ISAR1_LRCPC_RCPC_8_4), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_fcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_fcma_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_FCMA, ID_AA64ISAR1_FCMA_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_jscvt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_jscvt_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_JSCVT, ID_AA64ISAR1_JSCVT_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_api[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_API_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_API_PAC, "API PAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_EPAC, "API EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_EPAC2, "Impl PAuth+EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_FPAC, "Impl PAuth+FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_API_FPAC_COMBINED, "Impl PAuth+FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_api_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_API_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_apa[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_APA_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_PAC, "APA PAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_EPAC, "APA EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_EPAC2, "APA EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_FPAC, "APA FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR1_APA_FPAC_COMBINED, "APA FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_apa_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_APA_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar1_dpb[] = { MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"), MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVADP, "DCCVADP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar1_dpb_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_DCPOP, ID_AA64ISAR1_DPB_DCCVAP), MRS_HWCAP(&elf_hwcap2, HWCAP2_DCPODP, ID_AA64ISAR1_DPB_DCCVADP), MRS_HWCAP_END }; static const struct mrs_field id_aa64isar1_fields[] = { MRS_FIELD(ID_AA64ISAR1, LS64, false, MRS_EXACT, id_aa64isar1_ls64), MRS_FIELD(ID_AA64ISAR1, XS, false, MRS_EXACT, id_aa64isar1_xs), MRS_FIELD_HWCAP(ID_AA64ISAR1, I8MM, false, MRS_LOWER, id_aa64isar1_i8mm, id_aa64isar1_i8mm_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, DGH, false, MRS_LOWER, id_aa64isar1_dgh, id_aa64isar1_dgh_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, BF16, false, MRS_LOWER, id_aa64isar1_bf16, id_aa64isar1_bf16_caps), MRS_FIELD(ID_AA64ISAR1, SPECRES, false, MRS_EXACT, id_aa64isar1_specres), MRS_FIELD_HWCAP(ID_AA64ISAR1, SB, false, MRS_LOWER, id_aa64isar1_sb, id_aa64isar1_sb_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER, id_aa64isar1_frintts, id_aa64isar1_frintts_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi, id_aa64isar1_gpi_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa, id_aa64isar1_gpa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma, id_aa64isar1_fcma_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api, id_aa64isar1_api_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa, id_aa64isar1_apa_caps), MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb, id_aa64isar1_dpb_caps), MRS_FIELD_END, }; /* ID_AA64ISAR2_EL1 */ static const struct mrs_field_value id_aa64isar2_pac_frac[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, PAC_frac, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_bc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, BC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_mops[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, MOPS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_apa3[] = { MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_NONE, ""), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_PAC, "APA3 PAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_EPAC, "APA3 EPAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_EPAC2, "APA3 EPAC2"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_FPAC, "APA3 FPAC"), MRS_FIELD_VALUE(ID_AA64ISAR2_APA3_FPAC_COMBINED, "APA3 FPAC+Combined"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar2_apa3_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR2_APA3_PAC), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar2_gpa3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, GPA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64isar2_gpa3_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR2_GPA3_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64isar2_rpres[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, RPRES, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64isar2_wfxt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR2, WFxT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64isar2_fields[] = { MRS_FIELD(ID_AA64ISAR2, PAC_frac, false, MRS_EXACT, id_aa64isar2_pac_frac), MRS_FIELD(ID_AA64ISAR2, BC, false, MRS_EXACT, id_aa64isar2_bc), MRS_FIELD(ID_AA64ISAR2, MOPS, false, MRS_EXACT, id_aa64isar2_mops), MRS_FIELD_HWCAP(ID_AA64ISAR2, APA3, false, MRS_EXACT, id_aa64isar2_apa3, id_aa64isar2_apa3_caps), MRS_FIELD_HWCAP(ID_AA64ISAR2, GPA3, false, MRS_EXACT, id_aa64isar2_gpa3, id_aa64isar2_gpa3_caps), MRS_FIELD(ID_AA64ISAR2, RPRES, false, MRS_EXACT, id_aa64isar2_rpres), MRS_FIELD(ID_AA64ISAR2, WFxT, false, MRS_EXACT, id_aa64isar2_wfxt), MRS_FIELD_END, }; /* ID_AA64MMFR0_EL1 */ static const struct mrs_field_value id_aa64mmfr0_ecv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, ECV, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_ECV_CNTHCTL, "ECV+CNTHCTL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_fgt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, FGT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_exs[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, ExS, ALL, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran4_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_TGran4, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_NONE, "No S2 TGran4"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_IMPL, "S2 TGran4"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_2_LPA2, "S2 TGran4+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran64_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_TGran64, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_NONE, "No S2 TGran64"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran64_2_IMPL, "S2 TGran64"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran16_2[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_TGran16, ""), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_NONE, "No S2 TGran16"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_IMPL, "S2 TGran16"), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_2_LPA2, "S2 TGran16+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran4_LPA2, "TGran4+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_tgran16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR0_TGran16_LPA2, "TGran16+LPA2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_bigendel0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_snsmem[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_bigend[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_asidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"), MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr0_parange[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr0_fields[] = { MRS_FIELD(ID_AA64MMFR0, ECV, false, MRS_EXACT, id_aa64mmfr0_ecv), MRS_FIELD(ID_AA64MMFR0, FGT, false, MRS_EXACT, id_aa64mmfr0_fgt), MRS_FIELD(ID_AA64MMFR0, ExS, false, MRS_EXACT, id_aa64mmfr0_exs), MRS_FIELD(ID_AA64MMFR0, TGran4_2, false, MRS_EXACT, id_aa64mmfr0_tgran4_2), MRS_FIELD(ID_AA64MMFR0, TGran64_2, false, MRS_EXACT, id_aa64mmfr0_tgran64_2), MRS_FIELD(ID_AA64MMFR0, TGran16_2, false, MRS_EXACT, id_aa64mmfr0_tgran16_2), MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4), MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT, id_aa64mmfr0_tgran64), MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT, id_aa64mmfr0_tgran16), MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT, id_aa64mmfr0_bigendel0), MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem), MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend), MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT, id_aa64mmfr0_asidbits), MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT, id_aa64mmfr0_parange), MRS_FIELD_END, }; /* ID_AA64MMFR1_EL1 */ static const struct mrs_field_value id_aa64mmfr1_cmovw[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, CMOVW, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_tidcp1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, TIDCP1, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_ntlbpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, nTLBPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_afp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, AFP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hcx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, HCX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_ets[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, ETS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_twed[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, TWED, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_xnx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_specsei[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_pan[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_EPAN, "EPAN"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_lo[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hpds[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_vh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_vmidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"), MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr1_hafdbs[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr1_fields[] = { MRS_FIELD(ID_AA64MMFR1, CMOVW, false, MRS_EXACT, id_aa64mmfr1_cmovw), MRS_FIELD(ID_AA64MMFR1, TIDCP1, false, MRS_EXACT, id_aa64mmfr1_tidcp1), MRS_FIELD(ID_AA64MMFR1, nTLBPA, false, MRS_EXACT, id_aa64mmfr1_ntlbpa), MRS_FIELD(ID_AA64MMFR1, AFP, false, MRS_EXACT, id_aa64mmfr1_afp), MRS_FIELD(ID_AA64MMFR1, HCX, false, MRS_EXACT, id_aa64mmfr1_hcx), MRS_FIELD(ID_AA64MMFR1, ETS, false, MRS_EXACT, id_aa64mmfr1_ets), MRS_FIELD(ID_AA64MMFR1, TWED, false, MRS_EXACT, id_aa64mmfr1_twed), MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx), MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT, id_aa64mmfr1_specsei), MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan), MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo), MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds), MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh), MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT, id_aa64mmfr1_vmidbits), MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs), MRS_FIELD_END, }; /* ID_AA64MMFR2_EL1 */ static const struct mrs_field_value id_aa64mmfr2_e0pd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, E0PD, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_evt[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_2, "EVT-8.2"), MRS_FIELD_VALUE(ID_AA64MMFR2_EVT_8_5, "EVT-8.5"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_bbm[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL0, ""), MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL1, "BBM level 1"), MRS_FIELD_VALUE(ID_AA64MMFR2_BBM_LEVEL2, "BBM level 2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ttl[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, TTL, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_fwb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, FWB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ids[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IDS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_at[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, AT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64mmfr2_at_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_USCAT, ID_AA64MMFR2_AT_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64mmfr2_st[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, ST, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_nv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, 8_3), MRS_FIELD_VALUE(ID_AA64MMFR2_NV_8_4, "NV v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_ccidx[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"), MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "64bit CCIDX"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_varange[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"), MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_iesb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_lsm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_uao[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr2_cnp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr2_fields[] = { MRS_FIELD(ID_AA64MMFR2, E0PD, false, MRS_EXACT, id_aa64mmfr2_e0pd), MRS_FIELD(ID_AA64MMFR2, EVT, false, MRS_EXACT, id_aa64mmfr2_evt), MRS_FIELD(ID_AA64MMFR2, BBM, false, MRS_EXACT, id_aa64mmfr2_bbm), MRS_FIELD(ID_AA64MMFR2, TTL, false, MRS_EXACT, id_aa64mmfr2_ttl), MRS_FIELD(ID_AA64MMFR2, FWB, false, MRS_EXACT, id_aa64mmfr2_fwb), MRS_FIELD(ID_AA64MMFR2, IDS, false, MRS_EXACT, id_aa64mmfr2_ids), MRS_FIELD_HWCAP(ID_AA64MMFR2, AT, false, MRS_LOWER, id_aa64mmfr2_at, id_aa64mmfr2_at_caps), MRS_FIELD(ID_AA64MMFR2, ST, false, MRS_EXACT, id_aa64mmfr2_st), MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv), MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx), MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT, id_aa64mmfr2_varange), MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb), MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm), MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao), MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp), MRS_FIELD_END, }; #ifdef NOTYET /* ID_AA64MMFR2_EL1 */ static const struct mrs_field_value id_aa64mmfr3_spec_fpacc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, Spec_FPACC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_mec[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, MEC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_sctlrx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, SCTLRX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64mmfr3_tcrx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR3, TCRX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64mmfr3_fields[] = { MRS_FIELD(ID_AA64MMFR3, Spec_FPACC, false, MRS_EXACT, id_aa64mmfr3_spec_fpacc), MRS_FIELD(ID_AA64MMFR3, MEC, false, MRS_EXACT, id_aa64mmfr3_mec), MRS_FIELD(ID_AA64MMFR3, SCTLRX, false, MRS_EXACT, id_aa64mmfr3_sctlrx), MRS_FIELD(ID_AA64MMFR3, TCRX, false, MRS_EXACT, id_aa64mmfr3_tcrx), MRS_FIELD_END, }; /* ID_AA64MMFR4_EL1 */ static const struct mrs_field id_aa64mmfr4_fields[] = { MRS_FIELD_END, }; #endif /* ID_AA64PFR0_EL1 */ static const struct mrs_field_value id_aa64pfr0_csv3[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_csv2[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "CSV2_2"), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_3, "CSV2_3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_rme[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, RME, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_dit[] = { MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_dit_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_DIT, ID_AA64PFR0_DIT_PSTATE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_amu[] = { MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"), MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1_1, "AMUv1p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_mpam[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_sel2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_sve[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL), MRS_FIELD_VALUE_END, }; #if 0 /* Enable when we add SVE support */ static const struct mrs_field_hwcap id_aa64pfr0_sve_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SVE, ID_AA64PFR0_SVE_IMPL), MRS_HWCAP_END }; #endif static const struct mrs_field_value id_aa64pfr0_ras[] = { MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_IMPL, "RAS"), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_8_4, "RAS v8.4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_gic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_EN, "GIC"), MRS_FIELD_VALUE(ID_AA64PFR0_GIC_CPUIF_4_1, "GIC 4.1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_advsimd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_advsimd_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_ASIMD, ID_AA64PFR0_AdvSIMD_IMPL), MRS_HWCAP(&elf_hwcap, HWCAP_ASIMDHP, ID_AA64PFR0_AdvSIMD_HP), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_fp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr0_fp_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_FP, ID_AA64PFR0_FP_IMPL), MRS_HWCAP(&elf_hwcap, HWCAP_FPHP, ID_AA64PFR0_FP_HP), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr0_el3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el1[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"), MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr0_el0[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"), MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64pfr0_fields[] = { MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3), MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2), MRS_FIELD(ID_AA64PFR0, RME, false, MRS_EXACT, id_aa64pfr0_rme), MRS_FIELD_HWCAP(ID_AA64PFR0, DIT, false, MRS_LOWER, id_aa64pfr0_dit, id_aa64pfr0_dit_caps), MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu), MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam), MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2), MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve), MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras), MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic), MRS_FIELD_HWCAP(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd, id_aa64pfr0_advsimd_caps), MRS_FIELD_HWCAP(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp, id_aa64pfr0_fp_caps), MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3), MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2), MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1), MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0), MRS_FIELD_END, }; /* ID_AA64PFR1_EL1 */ static const struct mrs_field_value id_aa64pfr1_nmi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, NMI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_csv2_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p1, "CSV2 p1"), MRS_FIELD_VALUE(ID_AA64PFR1_CSV2_frac_p2, "CSV2 p2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_rndr_trap[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR1, RNDR_trap, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_sme[] = { MRS_FIELD_VALUE(ID_AA64PFR1_SME_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_SME_SME, "SME"), MRS_FIELD_VALUE(ID_AA64PFR1_SME_SME2, "SME2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_mpam_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_MPAM_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_MPAM_frac_p1, "MPAM p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_ras_frac[] = { MRS_FIELD_VALUE(ID_AA64PFR1_RAS_frac_p0, ""), MRS_FIELD_VALUE(ID_AA64PFR1_RAS_frac_p1, "RAS p1"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_mte[] = { MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE, "MTE"), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE2, "MTE2"), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_MTE3, "MTE3"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64pfr1_ssbs[] = { MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_aa64pfr1_ssbs_caps[] = { MRS_HWCAP(&elf_hwcap, HWCAP_SSBS, ID_AA64PFR1_SSBS_PSTATE), MRS_HWCAP_END }; static const struct mrs_field_value id_aa64pfr1_bt[] = { MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"), MRS_FIELD_VALUE_END, }; #if 0 /* Enable when we add BTI support */ static const struct mrs_field_hwcap id_aa64pfr1_bt_caps[] = { MRS_HWCAP(&elf_hwcap2, HWCAP2_BTI, ID_AA64PFR1_BT_IMPL), MRS_HWCAP_END }; #endif static const struct mrs_field id_aa64pfr1_fields[] = { MRS_FIELD(ID_AA64PFR1, NMI, false, MRS_EXACT, id_aa64pfr1_nmi), MRS_FIELD(ID_AA64PFR1, CSV2_frac, false, MRS_EXACT, id_aa64pfr1_csv2_frac), MRS_FIELD(ID_AA64PFR1, RNDR_trap, false, MRS_EXACT, id_aa64pfr1_rndr_trap), MRS_FIELD(ID_AA64PFR1, SME, false, MRS_EXACT, id_aa64pfr1_sme), MRS_FIELD(ID_AA64PFR1, MPAM_frac, false, MRS_EXACT, id_aa64pfr1_mpam_frac), MRS_FIELD(ID_AA64PFR1, RAS_frac, false, MRS_EXACT, id_aa64pfr1_ras_frac), MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte), MRS_FIELD_HWCAP(ID_AA64PFR1, SSBS, false, MRS_LOWER, id_aa64pfr1_ssbs, id_aa64pfr1_ssbs_caps), MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt), MRS_FIELD_END, }; #ifdef NOTYET /* ID_AA64PFR2_EL1 */ static const struct mrs_field id_aa64pfr2_fields[] = { MRS_FIELD_END, }; #endif /* ID_AA64ZFR0_EL1 */ static const struct mrs_field_value id_aa64zfr0_f64mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, F64MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_f32mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, F32MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_i8mm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, I8MM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_bf16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BF16, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ZFR0_BF16_EBF, "BF16+EBF"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_bitperm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, BitPerm, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ZFR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ZFR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_aa64zfr0_svever[] = { MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE1, "SVE1"), MRS_FIELD_VALUE(ID_AA64ZFR0_SVEver_SVE2, "SVE2"), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_aa64zfr0_fields[] = { MRS_FIELD(ID_AA64ZFR0, F64MM, false, MRS_EXACT, id_aa64zfr0_f64mm), MRS_FIELD(ID_AA64ZFR0, F32MM, false, MRS_EXACT, id_aa64zfr0_f32mm), MRS_FIELD(ID_AA64ZFR0, I8MM, false, MRS_EXACT, id_aa64zfr0_i8mm), MRS_FIELD(ID_AA64ZFR0, SM4, false, MRS_EXACT, id_aa64zfr0_sm4), MRS_FIELD(ID_AA64ZFR0, SHA3, false, MRS_EXACT, id_aa64zfr0_sha3), MRS_FIELD(ID_AA64ZFR0, BF16, false, MRS_EXACT, id_aa64zfr0_bf16), MRS_FIELD(ID_AA64ZFR0, BitPerm, false, MRS_EXACT, id_aa64zfr0_bitperm), MRS_FIELD(ID_AA64ZFR0, AES, false, MRS_EXACT, id_aa64zfr0_aes), MRS_FIELD(ID_AA64ZFR0, SVEver, false, MRS_EXACT, id_aa64zfr0_svever), MRS_FIELD_END, }; #ifdef COMPAT_FREEBSD32 /* ID_ISAR5_EL1 */ static const struct mrs_field_value id_isar5_vcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, VCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_isar5_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value id_isar5_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, CRC32, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_crc32_caps[] = { MRS_HWCAP(&elf32_hwcap2, HWCAP32_2_CRC32, ID_ISAR5_CRC32_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SHA2, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_sha2_caps[] = { MRS_HWCAP(&elf32_hwcap2, HWCAP32_2_SHA2, ID_ISAR5_SHA2_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SHA1, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_sha1_caps[] = { MRS_HWCAP(&elf32_hwcap2, HWCAP32_2_SHA1, ID_ISAR5_SHA1_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, AES, NONE, BASE), MRS_FIELD_VALUE(ID_ISAR5_AES_VMULL, "AES+VMULL"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap id_isar5_aes_caps[] = { MRS_HWCAP(&elf32_hwcap2, HWCAP32_2_AES, ID_ISAR5_AES_BASE), MRS_HWCAP(&elf32_hwcap2, HWCAP32_2_PMULL, ID_ISAR5_AES_VMULL), MRS_HWCAP_END }; static const struct mrs_field_value id_isar5_sevl[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_ISAR5, SEVL, NOP, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field id_isar5_fields[] = { MRS_FIELD(ID_ISAR5, VCMA, false, MRS_LOWER, id_isar5_vcma), MRS_FIELD(ID_ISAR5, RDM, false, MRS_LOWER, id_isar5_rdm), MRS_FIELD_HWCAP(ID_ISAR5, CRC32, false, MRS_LOWER, id_isar5_crc32, id_isar5_crc32_caps), MRS_FIELD_HWCAP(ID_ISAR5, SHA2, false, MRS_LOWER, id_isar5_sha2, id_isar5_sha2_caps), MRS_FIELD_HWCAP(ID_ISAR5, SHA1, false, MRS_LOWER, id_isar5_sha1, id_isar5_sha1_caps), MRS_FIELD_HWCAP(ID_ISAR5, AES, false, MRS_LOWER, id_isar5_aes, id_isar5_aes_caps), MRS_FIELD(ID_ISAR5, SEVL, false, MRS_LOWER, id_isar5_sevl), MRS_FIELD_END, }; /* MVFR0 */ static const struct mrs_field_value mvfr0_fpround[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPRound, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpsqrt[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPSqrt, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpdivide[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPDivide, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fptrap[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR0, FPTrap, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_fpdp[] = { MRS_FIELD_VALUE(MVFR0_FPDP_NONE, ""), MRS_FIELD_VALUE(MVFR0_FPDP_VFP_v2, "DP VFPv2"), MRS_FIELD_VALUE(MVFR0_FPDP_VFP_v3_v4, "DP VFPv3+v4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr0_fpdp_caps[] = { MRS_HWCAP(&elf32_hwcap, HWCAP32_VFP, MVFR0_FPDP_VFP_v2), MRS_HWCAP(&elf32_hwcap, HWCAP32_VFPv3, MVFR0_FPDP_VFP_v3_v4), }; static const struct mrs_field_value mvfr0_fpsp[] = { MRS_FIELD_VALUE(MVFR0_FPSP_NONE, ""), MRS_FIELD_VALUE(MVFR0_FPSP_VFP_v2, "SP VFPv2"), MRS_FIELD_VALUE(MVFR0_FPSP_VFP_v3_v4, "SP VFPv3+v4"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr0_simdreg[] = { MRS_FIELD_VALUE(MVFR0_SIMDReg_NONE, ""), MRS_FIELD_VALUE(MVFR0_SIMDReg_FP, "FP 16x64"), MRS_FIELD_VALUE(MVFR0_SIMDReg_AdvSIMD, "AdvSIMD"), MRS_FIELD_VALUE_END, }; static const struct mrs_field mvfr0_fields[] = { MRS_FIELD(MVFR0, FPRound, false, MRS_LOWER, mvfr0_fpround), MRS_FIELD(MVFR0, FPSqrt, false, MRS_LOWER, mvfr0_fpsqrt), MRS_FIELD(MVFR0, FPDivide, false, MRS_LOWER, mvfr0_fpdivide), MRS_FIELD(MVFR0, FPTrap, false, MRS_LOWER, mvfr0_fptrap), MRS_FIELD_HWCAP(MVFR0, FPDP, false, MRS_LOWER, mvfr0_fpdp, mvfr0_fpdp_caps), MRS_FIELD(MVFR0, FPSP, false, MRS_LOWER, mvfr0_fpsp), MRS_FIELD(MVFR0, SIMDReg, false, MRS_LOWER, mvfr0_simdreg), MRS_FIELD_END, }; /* MVFR1 */ static const struct mrs_field_value mvfr1_simdfmac[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDFMAC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr1_simdfmac_caps[] = { MRS_HWCAP(&elf32_hwcap, HWCAP32_VFPv4, MVFR1_SIMDFMAC_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value mvfr1_fphp[] = { MRS_FIELD_VALUE(MVFR1_FPHP_NONE, ""), MRS_FIELD_VALUE(MVFR1_FPHP_CONV_SP, "FPHP SP Conv"), MRS_FIELD_VALUE(MVFR1_FPHP_CONV_DP, "FPHP DP Conv"), MRS_FIELD_VALUE(MVFR1_FPHP_ARITH, "FPHP Arith"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdhp[] = { MRS_FIELD_VALUE(MVFR1_SIMDHP_NONE, ""), MRS_FIELD_VALUE(MVFR1_SIMDHP_CONV_SP, "SIMDHP SP Conv"), MRS_FIELD_VALUE(MVFR1_SIMDHP_ARITH, "SIMDHP Arith"), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdsp[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDSP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdint[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDInt, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_simdls[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, SIMDLS, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_hwcap mvfr1_simdls_caps[] = { MRS_HWCAP(&elf32_hwcap, HWCAP32_VFPv4, MVFR1_SIMDFMAC_IMPL), MRS_HWCAP_END }; static const struct mrs_field_value mvfr1_fpdnan[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, FPDNaN, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field_value mvfr1_fpftz[] = { MRS_FIELD_VALUE_NONE_IMPL(MVFR1, FPFtZ, NONE, IMPL), MRS_FIELD_VALUE_END, }; static const struct mrs_field mvfr1_fields[] = { MRS_FIELD_HWCAP(MVFR1, SIMDFMAC, false, MRS_LOWER, mvfr1_simdfmac, mvfr1_simdfmac_caps), MRS_FIELD(MVFR1, FPHP, false, MRS_LOWER, mvfr1_fphp), MRS_FIELD(MVFR1, SIMDHP, false, MRS_LOWER, mvfr1_simdhp), MRS_FIELD(MVFR1, SIMDSP, false, MRS_LOWER, mvfr1_simdsp), MRS_FIELD(MVFR1, SIMDInt, false, MRS_LOWER, mvfr1_simdint), MRS_FIELD_HWCAP(MVFR1, SIMDLS, false, MRS_LOWER, mvfr1_simdls, mvfr1_simdls_caps), MRS_FIELD(MVFR1, FPDNaN, false, MRS_LOWER, mvfr1_fpdnan), MRS_FIELD(MVFR1, FPFtZ, false, MRS_LOWER, mvfr1_fpftz), MRS_FIELD_END, }; #endif /* COMPAT_FREEBSD32 */ struct mrs_user_reg { u_int reg; u_int CRm; u_int Op2; size_t offset; const struct mrs_field *fields; }; #define USER_REG(name, field_name) \ { \ .reg = name, \ .CRm = name##_CRm, \ .Op2 = name##_op2, \ .offset = __offsetof(struct cpu_desc, field_name), \ .fields = field_name##_fields, \ } static const struct mrs_user_reg user_regs[] = { USER_REG(ID_AA64AFR0_EL1, id_aa64afr0), USER_REG(ID_AA64AFR1_EL1, id_aa64afr1), USER_REG(ID_AA64DFR0_EL1, id_aa64dfr0), USER_REG(ID_AA64DFR1_EL1, id_aa64dfr1), USER_REG(ID_AA64ISAR0_EL1, id_aa64isar0), USER_REG(ID_AA64ISAR1_EL1, id_aa64isar1), USER_REG(ID_AA64ISAR2_EL1, id_aa64isar2), USER_REG(ID_AA64MMFR0_EL1, id_aa64mmfr0), USER_REG(ID_AA64MMFR1_EL1, id_aa64mmfr1), USER_REG(ID_AA64MMFR2_EL1, id_aa64mmfr2), #ifdef NOTYET USER_REG(ID_AA64MMFR3_EL1, id_aa64mmfr3), USER_REG(ID_AA64MMFR4_EL1, id_aa64mmfr4), #endif USER_REG(ID_AA64PFR0_EL1, id_aa64pfr0), USER_REG(ID_AA64PFR1_EL1, id_aa64pfr1), #ifdef NOTYET USER_REG(ID_AA64PFR2_EL1, id_aa64pfr2), #endif USER_REG(ID_AA64ZFR0_EL1, id_aa64zfr0), #ifdef COMPAT_FREEBSD32 USER_REG(ID_ISAR5_EL1, id_isar5), USER_REG(MVFR0_EL1, mvfr0), USER_REG(MVFR1_EL1, mvfr1), #endif /* COMPAT_FREEBSD32 */ }; #define CPU_DESC_FIELD(desc, idx) \ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset) static int user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { uint64_t value; int CRm, Op2, i, reg; if ((insn & MRS_MASK) != MRS_VALUE) return (0); /* * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}. * These are in the EL1 CPU identification space. * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1. * CRm == {4-7} holds the ID_AA64 registers. * * For full details see the ARMv8 ARM (ARM DDI 0487C.a) * Table D9-2 System instruction encodings for non-Debug System * register accesses. */ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0) return (0); CRm = mrs_CRm(insn); if (CRm > 7 || (CRm < 4 && CRm != 0)) return (0); Op2 = mrs_Op2(insn); value = 0; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) { value = CPU_DESC_FIELD(user_cpu_desc, i); break; } } if (CRm == 0) { switch (Op2) { case 0: value = READ_SPECIALREG(midr_el1); break; case 5: value = READ_SPECIALREG(mpidr_el1); break; case 6: value = READ_SPECIALREG(revidr_el1); break; default: return (0); } } /* * We will handle this instruction, move to the next so we * don't trap here again. */ frame->tf_elr += INSN_SIZE; reg = MRS_REGISTER(insn); /* If reg is 31 then write to xzr, i.e. do nothing */ if (reg == 31) return (1); if (reg < nitems(frame->tf_x)) frame->tf_x[reg] = value; else if (reg == 30) frame->tf_lr = value; return (1); } /* * Compares two field values that may be signed or unsigned. * Returns: * < 0 when a is less than b * = 0 when a equals b * > 0 when a is greater than b */ static int mrs_field_cmp(uint64_t a, uint64_t b, u_int shift, int width, bool sign) { uint64_t mask; KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__, width)); mask = (1ul << width) - 1; /* Move the field to the lower bits */ a = (a >> shift) & mask; b = (b >> shift) & mask; if (sign) { /* * The field is signed. Toggle the upper bit so the comparison * works on unsigned values as this makes positive numbers, * i.e. those with a 0 bit, larger than negative numbers, * i.e. those with a 1 bit, in an unsigned comparison. */ a ^= 1ul << (width - 1); b ^= 1ul << (width - 1); } return (a - b); } static uint64_t update_lower_register(uint64_t val, uint64_t new_val, u_int shift, int width, bool sign) { uint64_t mask; KASSERT(width > 0 && width < 64, ("%s: Invalid width %d", __func__, width)); /* * If the new value is less than the existing value update it. */ if (mrs_field_cmp(new_val, val, shift, width, sign) < 0) { mask = (1ul << width) - 1; val &= ~(mask << shift); val |= new_val & (mask << shift); } return (val); } bool extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val) { uint64_t value; int i; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { value = CPU_DESC_FIELD(user_cpu_desc, i); *val = value >> field_shift; return (true); } } return (false); } bool get_kernel_reg(u_int reg, uint64_t *val) { int i; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { *val = CPU_DESC_FIELD(kern_cpu_desc, i); return (true); } } return (false); } /* * Fetch the specified register's value, ensuring that individual field values * do not exceed those in the mask. */ bool get_kernel_reg_masked(u_int reg, uint64_t *valp, uint64_t mask) { const struct mrs_field *fields; uint64_t val; for (int i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { val = CPU_DESC_FIELD(kern_cpu_desc, i); fields = user_regs[i].fields; for (int j = 0; fields[j].type != 0; j++) { mask = update_lower_register(mask, val, fields[j].shift, 4, fields[j].sign); } *valp = mask; return (true); } } return (false); } void update_special_regs(u_int cpu) { struct cpu_desc *desc; const struct mrs_field *fields; uint64_t user_reg, kern_reg, value; int i, j; if (cpu == 0) { /* Create a user visible cpu description with safe values */ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc)); /* Safe values for these registers */ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE | ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64; user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8; } desc = get_cpu_desc(cpu); for (i = 0; i < nitems(user_regs); i++) { value = CPU_DESC_FIELD(*desc, i); if (cpu == 0) { kern_reg = value; user_reg = value; } else { kern_reg = CPU_DESC_FIELD(kern_cpu_desc, i); user_reg = CPU_DESC_FIELD(user_cpu_desc, i); } fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { switch (fields[j].type & MRS_TYPE_MASK) { case MRS_EXACT: user_reg &= ~(0xful << fields[j].shift); user_reg |= (uint64_t)MRS_EXACT_FIELD(fields[j].type) << fields[j].shift; break; case MRS_LOWER: user_reg = update_lower_register(user_reg, value, fields[j].shift, 4, fields[j].sign); break; default: panic("Invalid field type: %d", fields[j].type); } kern_reg = update_lower_register(kern_reg, value, fields[j].shift, 4, fields[j].sign); } CPU_DESC_FIELD(kern_cpu_desc, i) = kern_reg; CPU_DESC_FIELD(user_cpu_desc, i) = user_reg; } } void cpu_desc_init(void) { if (mp_ncpus == 1) return; /* * Allocate memory for the non-boot CPUs to store their registers. * As this is indexed by CPU ID we need to allocate space for CPUs * 1 to mp_maxid. Because of this mp_maxid is already the correct * number of elements. */ cpu_desc = mallocarray(mp_maxid, sizeof(*cpu_desc), M_IDENTCPU, M_ZERO | M_WAITOK); } /* HWCAP */ bool __read_frequently lse_supported = false; bool __read_frequently icache_aliasing = false; bool __read_frequently icache_vmid = false; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ /* * Find the values to export to userspace as AT_HWCAP and AT_HWCAP2. */ static void parse_cpu_features(void) { const struct mrs_field_hwcap *hwcaps; const struct mrs_field *fields; uint64_t min, reg; int i, j, k; for (i = 0; i < nitems(user_regs); i++) { reg = CPU_DESC_FIELD(user_cpu_desc, i); fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { hwcaps = fields[j].hwcaps; if (hwcaps == NULL) continue; for (k = 0; hwcaps[k].hwcap != NULL; k++) { min = hwcaps[k].min; /* * If the field is greater than the minimum * value we can set the hwcap; */ if (mrs_field_cmp(reg, min, fields[j].shift, 4, fields[j].sign) >= 0) { *hwcaps[k].hwcap |= hwcaps[k].hwcap_val; } } } } } static void identify_cpu_sysinit(void *dummy __unused) { struct cpu_desc *desc, *prev_desc; int cpu; bool dic, idc; dic = (allow_dic != 0); idc = (allow_idc != 0); prev_desc = NULL; CPU_FOREACH(cpu) { desc = get_cpu_desc(cpu); if (cpu != 0) { check_cpu_regs(cpu, desc, prev_desc); update_special_regs(cpu); } if (CTR_DIC_VAL(desc->ctr) == 0) dic = false; if (CTR_IDC_VAL(desc->ctr) == 0) idc = false; prev_desc = desc; } /* Find the values to export to userspace as AT_HWCAP and AT_HWCAP2 */ parse_cpu_features(); #ifdef COMPAT_FREEBSD32 /* Set the default caps and any that need to check multiple fields */ elf32_hwcap |= parse_cpu_features_hwcap32(); #endif if (dic && idc) { arm64_icache_sync_range = &arm64_dic_idc_icache_sync_range; if (bootverbose) printf("Enabling DIC & IDC ICache sync\n"); } else if (idc) { arm64_icache_sync_range = &arm64_idc_aliasing_icache_sync_range; if (bootverbose) printf("Enabling IDC ICache sync\n"); } if ((elf_hwcap & HWCAP_ATOMICS) != 0) { lse_supported = true; if (bootverbose) printf("Enabling LSE atomics in the kernel\n"); } #ifdef LSE_ATOMICS if (!lse_supported) panic("CPU does not support LSE atomic instructions"); #endif install_undef_handler(true, user_mrs_handler); } SYSINIT(identify_cpu, SI_SUB_CPU, SI_ORDER_MIDDLE, identify_cpu_sysinit, NULL); static void cpu_features_sysinit(void *dummy __unused) { struct sbuf sb; struct cpu_desc *desc, *prev_desc; u_int cpu; prev_desc = NULL; CPU_FOREACH(cpu) { desc = get_cpu_desc(cpu); print_cpu_features(cpu, desc, prev_desc); prev_desc = desc; } /* Fill in cpu_model for the hw.model sysctl */ sbuf_new(&sb, cpu_model, sizeof(cpu_model), SBUF_FIXEDLEN); print_cpu_midr(&sb, 0); sbuf_finish(&sb); sbuf_delete(&sb); free(cpu_desc, M_IDENTCPU); } /* Log features before APs are released and start printing to the dmesg. */ SYSINIT(cpu_features, SI_SUB_SMP - 1, SI_ORDER_ANY, cpu_features_sysinit, NULL); #ifdef COMPAT_FREEBSD32 static u_long parse_cpu_features_hwcap32(void) { u_long hwcap = HWCAP32_DEFAULT; if ((MVFR1_SIMDLS_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDLS_IMPL) && (MVFR1_SIMDInt_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDInt_IMPL) && (MVFR1_SIMDSP_VAL(user_cpu_desc.mvfr1) >= MVFR1_SIMDSP_IMPL)) hwcap |= HWCAP32_NEON; return (hwcap); } #endif /* COMPAT_FREEBSD32 */ static void print_ctr_fields(struct sbuf *sb, uint64_t reg, const void *arg __unused) { sbuf_printf(sb, "%u byte D-cacheline,", CTR_DLINE_SIZE(reg)); sbuf_printf(sb, "%u byte I-cacheline,", CTR_ILINE_SIZE(reg)); reg &= ~(CTR_DLINE_MASK | CTR_ILINE_MASK); switch(CTR_L1IP_VAL(reg)) { case CTR_L1IP_VPIPT: sbuf_printf(sb, "VPIPT"); break; case CTR_L1IP_AIVIVT: sbuf_printf(sb, "AIVIVT"); break; case CTR_L1IP_VIPT: sbuf_printf(sb, "VIPT"); break; case CTR_L1IP_PIPT: sbuf_printf(sb, "PIPT"); break; } sbuf_printf(sb, " ICache,"); reg &= ~CTR_L1IP_MASK; sbuf_printf(sb, "%d byte ERG,", CTR_ERG_SIZE(reg)); sbuf_printf(sb, "%d byte CWG", CTR_CWG_SIZE(reg)); reg &= ~(CTR_ERG_MASK | CTR_CWG_MASK); if (CTR_IDC_VAL(reg) != 0) sbuf_printf(sb, ",IDC"); if (CTR_DIC_VAL(reg) != 0) sbuf_printf(sb, ",DIC"); reg &= ~(CTR_IDC_MASK | CTR_DIC_MASK); reg &= ~CTR_RES1; if (reg != 0) sbuf_printf(sb, ",%lx", reg); } static void print_register(struct sbuf *sb, const char *reg_name, uint64_t reg, void (*print_fields)(struct sbuf *, uint64_t, const void *), const void *arg) { sbuf_printf(sb, "%29s = <", reg_name); print_fields(sb, reg, arg); sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } static void print_id_fields(struct sbuf *sb, uint64_t reg, const void *arg) { const struct mrs_field *fields = arg; const struct mrs_field_value *fv; int field, i, j, printed; #define SEP_STR ((printed++) == 0) ? "" : "," printed = 0; for (i = 0; fields[i].type != 0; i++) { fv = fields[i].values; /* TODO: Handle with an unknown message */ if (fv == NULL) continue; field = (reg & fields[i].mask) >> fields[i].shift; for (j = 0; fv[j].desc != NULL; j++) { if ((fv[j].value >> fields[i].shift) != field) continue; if (fv[j].desc[0] != '\0') sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc); break; } if (fv[j].desc == NULL) sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR, fields[i].name, field); reg &= ~(0xful << fields[i].shift); } if (reg != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, reg); #undef SEP_STR } static void print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg, const struct mrs_field *fields) { print_register(sb, reg_name, reg, print_id_fields, fields); } static void print_cpu_midr(struct sbuf *sb, u_int cpu) { const struct cpu_parts *cpu_partsp; const char *cpu_impl_name; const char *cpu_part_name; u_int midr; u_int impl_id; u_int part_id; midr = pcpu_find(cpu)->pc_midr; cpu_impl_name = NULL; cpu_partsp = NULL; impl_id = CPU_IMPL(midr); for (int i = 0; cpu_implementers[i].impl_name != NULL; i++) { if (impl_id == cpu_implementers[i].impl_id) { cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } /* Unknown implementer, so unknown part */ if (cpu_impl_name == NULL) { sbuf_printf(sb, "Unknown Implementer (midr: %08x)", midr); return; } KASSERT(cpu_partsp != NULL, ("%s: No parts table for implementer %s", __func__, cpu_impl_name)); cpu_part_name = NULL; part_id = CPU_PART(midr); for (int i = 0; cpu_partsp[i].part_name != NULL; i++) { if (part_id == cpu_partsp[i].part_id) { cpu_part_name = cpu_partsp[i].part_name; break; } } /* Known Implementer, Unknown part */ if (cpu_part_name == NULL) { sbuf_printf(sb, "%s Unknown CPU r%dp%d (midr: %08x)", cpu_impl_name, CPU_VAR(midr), CPU_REV(midr), midr); return; } sbuf_printf(sb, "%s %s r%dp%d", cpu_impl_name, cpu_part_name, CPU_VAR(midr), CPU_REV(midr)); } static void print_cpu_cache(struct cpu_desc *desc, struct sbuf *sb, uint64_t ccs, bool icache, bool unified) { size_t cache_size; size_t line_size; /* LineSize is Log2(S) - 4. */ line_size = 1 << ((ccs & CCSIDR_LineSize_MASK) + 4); /* * Calculate cache size (sets * ways * line size). There are different * formats depending on the FEAT_CCIDX bit in ID_AA64MMFR2 feature * register. */ if ((desc->id_aa64mmfr2 & ID_AA64MMFR2_CCIDX_64)) cache_size = (CCSIDR_NSETS_64(ccs) + 1) * (CCSIDR_ASSOC_64(ccs) + 1); else cache_size = (CCSIDR_NSETS(ccs) + 1) * (CCSIDR_ASSOC(ccs) + 1); cache_size *= line_size; sbuf_printf(sb, "%zuKB (%s)", cache_size / 1024, icache ? "instruction" : unified ? "unified" : "data"); } static void print_cpu_caches(struct sbuf *sb, struct cpu_desc *desc) { /* Print out each cache combination */ uint64_t clidr; int i = 1; clidr = desc->clidr; for (i = 0; (clidr & CLIDR_CTYPE_MASK) != 0; i++, clidr >>= 3) { int j = 0; int ctype_m = (clidr & CLIDR_CTYPE_MASK); sbuf_printf(sb, " L%d cache: ", i + 1); if ((clidr & CLIDR_CTYPE_IO)) { print_cpu_cache(desc, sb, desc->ccsidr[i][j++], true, false); /* If there's more, add to the line. */ if ((ctype_m & ~CLIDR_CTYPE_IO) != 0) sbuf_printf(sb, ", "); } if ((ctype_m & ~CLIDR_CTYPE_IO) != 0) { print_cpu_cache(desc, sb, desc->ccsidr[i][j], false, (clidr & CLIDR_CTYPE_UNIFIED)); } sbuf_printf(sb, "\n"); } sbuf_finish(sb); printf("%s", sbuf_data(sb)); } static void print_cpu_features(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc) { struct sbuf *sb; sb = sbuf_new_auto(); sbuf_printf(sb, "CPU%3u: ", cpu); print_cpu_midr(sb, cpu); sbuf_cat(sb, " affinity:"); switch(cpu_aff_levels) { default: case 4: sbuf_printf(sb, " %2d", CPU_AFF3(desc->mpidr)); /* FALLTHROUGH */ case 3: sbuf_printf(sb, " %2d", CPU_AFF2(desc->mpidr)); /* FALLTHROUGH */ case 2: sbuf_printf(sb, " %2d", CPU_AFF1(desc->mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ sbuf_printf(sb, " %2d", CPU_AFF0(desc->mpidr)); break; } sbuf_finish(sb); printf("%s\n", sbuf_data(sb)); sbuf_clear(sb); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); #define SHOULD_PRINT_REG(_reg) \ (prev_desc == NULL || desc->_reg != prev_desc->_reg) /* Cache Type Register */ if (SHOULD_PRINT_REG(ctr)) { print_register(sb, "Cache Type", desc->ctr, print_ctr_fields, NULL); } /* AArch64 Instruction Set Attribute Register 0 */ if (SHOULD_PRINT_REG(id_aa64isar0)) print_id_register(sb, "Instruction Set Attributes 0", desc->id_aa64isar0, id_aa64isar0_fields); /* AArch64 Instruction Set Attribute Register 1 */ if (SHOULD_PRINT_REG(id_aa64isar1)) print_id_register(sb, "Instruction Set Attributes 1", desc->id_aa64isar1, id_aa64isar1_fields); /* AArch64 Instruction Set Attribute Register 2 */ if (SHOULD_PRINT_REG(id_aa64isar2)) print_id_register(sb, "Instruction Set Attributes 2", desc->id_aa64isar2, id_aa64isar2_fields); /* AArch64 Processor Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64pfr0)) print_id_register(sb, "Processor Features 0", desc->id_aa64pfr0, id_aa64pfr0_fields); /* AArch64 Processor Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64pfr1)) print_id_register(sb, "Processor Features 1", desc->id_aa64pfr1, id_aa64pfr1_fields); #ifdef NOTYET /* AArch64 Processor Feature Register 2 */ if (SHOULD_PRINT_REG(id_aa64pfr2)) print_id_register(sb, "Processor Features 2", desc->id_aa64pfr2, id_aa64pfr2_fields); #endif /* AArch64 Memory Model Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64mmfr0)) print_id_register(sb, "Memory Model Features 0", desc->id_aa64mmfr0, id_aa64mmfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64mmfr1)) print_id_register(sb, "Memory Model Features 1", desc->id_aa64mmfr1, id_aa64mmfr1_fields); /* AArch64 Memory Model Feature Register 2 */ if (SHOULD_PRINT_REG(id_aa64mmfr2)) print_id_register(sb, "Memory Model Features 2", desc->id_aa64mmfr2, id_aa64mmfr2_fields); #ifdef NOTYET /* AArch64 Memory Model Feature Register 3 */ if (SHOULD_PRINT_REG(id_aa64mmfr3)) print_id_register(sb, "Memory Model Features 3", desc->id_aa64mmfr3, id_aa64mmfr3_fields); /* AArch64 Memory Model Feature Register 4 */ if (SHOULD_PRINT_REG(id_aa64mmfr4)) print_id_register(sb, "Memory Model Features 4", desc->id_aa64mmfr4, id_aa64mmfr4_fields); #endif /* AArch64 Debug Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64dfr0)) print_id_register(sb, "Debug Features 0", desc->id_aa64dfr0, id_aa64dfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64dfr1)) print_id_register(sb, "Debug Features 1", desc->id_aa64dfr1, id_aa64dfr1_fields); /* AArch64 Auxiliary Feature Register 0 */ if (SHOULD_PRINT_REG(id_aa64afr0)) print_id_register(sb, "Auxiliary Features 0", desc->id_aa64afr0, id_aa64afr0_fields); /* AArch64 Auxiliary Feature Register 1 */ if (SHOULD_PRINT_REG(id_aa64afr1)) print_id_register(sb, "Auxiliary Features 1", desc->id_aa64afr1, id_aa64afr1_fields); /* AArch64 SVE Feature Register 0 */ if (desc->have_sve) { if (SHOULD_PRINT_REG(id_aa64zfr0) || !prev_desc->have_sve) { print_id_register(sb, "SVE Features 0", desc->id_aa64zfr0, id_aa64zfr0_fields); } } #ifdef COMPAT_FREEBSD32 /* AArch32 Instruction Set Attribute Register 5 */ if (SHOULD_PRINT_REG(id_isar5)) print_id_register(sb, "AArch32 Instruction Set Attributes 5", desc->id_isar5, id_isar5_fields); /* AArch32 Media and VFP Feature Register 0 */ if (SHOULD_PRINT_REG(mvfr0)) print_id_register(sb, "AArch32 Media and VFP Features 0", desc->mvfr0, mvfr0_fields); /* AArch32 Media and VFP Feature Register 1 */ if (SHOULD_PRINT_REG(mvfr1)) print_id_register(sb, "AArch32 Media and VFP Features 1", desc->mvfr1, mvfr1_fields); #endif if (bootverbose) print_cpu_caches(sb, desc); sbuf_delete(sb); sb = NULL; #undef SHOULD_PRINT_REG #undef SEP_STR } void identify_cache(uint64_t ctr) { /* Identify the L1 cache type */ switch (CTR_L1IP_VAL(ctr)) { case CTR_L1IP_PIPT: break; case CTR_L1IP_VPIPT: icache_vmid = true; break; default: case CTR_L1IP_VIPT: icache_aliasing = true; break; } if (dcache_line_size == 0) { KASSERT(icache_line_size == 0, ("%s: i-cacheline size set: %ld", __func__, icache_line_size)); /* Get the D cache line size */ dcache_line_size = CTR_DLINE_SIZE(ctr); /* And the same for the I cache */ icache_line_size = CTR_ILINE_SIZE(ctr); idcache_line_size = MIN(dcache_line_size, icache_line_size); } if (dcache_line_size != CTR_DLINE_SIZE(ctr)) { printf("WARNING: D-cacheline size mismatch %ld != %d\n", dcache_line_size, CTR_DLINE_SIZE(ctr)); } if (icache_line_size != CTR_ILINE_SIZE(ctr)) { printf("WARNING: I-cacheline size mismatch %ld != %d\n", icache_line_size, CTR_ILINE_SIZE(ctr)); } } void identify_cpu(u_int cpu) { struct cpu_desc *desc; uint64_t clidr; desc = get_cpu_desc(cpu); /* Save affinity for current CPU */ desc->mpidr = get_mpidr(); CPU_AFFINITY(cpu) = desc->mpidr & CPU_AFF_MASK; desc->ctr = READ_SPECIALREG(ctr_el0); desc->id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); desc->id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1); desc->id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1); desc->id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1); desc->id_aa64isar2 = READ_SPECIALREG(id_aa64isar2_el1); desc->id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1); desc->id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); desc->id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1); #ifdef NOTYET desc->id_aa64mmfr3 = READ_SPECIALREG(id_aa64mmfr3_el1); desc->id_aa64mmfr4 = READ_SPECIALREG(id_aa64mmfr4_el1); #endif desc->id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1); desc->id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1); #ifdef NOTYET desc->id_aa64pfr2 = READ_SPECIALREG(id_aa64pfr2_el1); #endif /* * ID_AA64ZFR0_EL1 is only valid when at least one of: * - ID_AA64PFR0_EL1.SVE is non-zero * - ID_AA64PFR1_EL1.SME is non-zero * In other cases it is zero, but still safe to read */ desc->have_sve = (ID_AA64PFR0_SVE_VAL(desc->id_aa64pfr0) != 0); desc->id_aa64zfr0 = READ_SPECIALREG(ID_AA64ZFR0_EL1_REG); desc->clidr = READ_SPECIALREG(clidr_el1); clidr = desc->clidr; for (int i = 0; (clidr & CLIDR_CTYPE_MASK) != 0; i++, clidr >>= 3) { int j = 0; if ((clidr & CLIDR_CTYPE_IO)) { WRITE_SPECIALREG(csselr_el1, CSSELR_Level(i) | CSSELR_InD); desc->ccsidr[i][j++] = READ_SPECIALREG(ccsidr_el1); } if ((clidr & ~CLIDR_CTYPE_IO) == 0) continue; WRITE_SPECIALREG(csselr_el1, CSSELR_Level(i)); desc->ccsidr[i][j] = READ_SPECIALREG(ccsidr_el1); } #ifdef COMPAT_FREEBSD32 /* Only read aarch32 SRs if EL0-32 is available */ if (ID_AA64PFR0_EL0_VAL(desc->id_aa64pfr0) == ID_AA64PFR0_EL0_64_32) { desc->id_isar5 = READ_SPECIALREG(id_isar5_el1); desc->mvfr0 = READ_SPECIALREG(mvfr0_el1); desc->mvfr1 = READ_SPECIALREG(mvfr1_el1); } #endif } static void check_cpu_regs(u_int cpu, struct cpu_desc *desc, struct cpu_desc *prev_desc) { switch (cpu_aff_levels) { case 0: if (CPU_AFF0(desc->mpidr) != CPU_AFF0(prev_desc->mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(desc->mpidr) != CPU_AFF1(prev_desc->mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(desc->mpidr) != CPU_AFF2(prev_desc->mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(desc->mpidr) != CPU_AFF3(prev_desc->mpidr)) cpu_aff_levels = 4; break; } if (desc->ctr != prev_desc->ctr) { /* * If the cache type register is different we may * have a different l1 cache type. */ identify_cache(desc->ctr); } } diff --git a/sys/powerpc/powerpc/cpu.c b/sys/powerpc/powerpc/cpu.c index 518623af9de4..19907e3973e7 100644 --- a/sys/powerpc/powerpc/cpu.c +++ b/sys/powerpc/powerpc/cpu.c @@ -1,846 +1,846 @@ /*- * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause * * Copyright (c) 2001 Matt Thomas. * Copyright (c) 2001 Tsubai Masanari. * Copyright (c) 1998, 1999, 2001 Internet Research Institute, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by * Internet Research Institute, Inc. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2003 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from $NetBSD: cpu_subr.c,v 1.1 2003/02/03 17:10:09 matt Exp $ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void cpu_6xx_setup(int cpuid, uint16_t vers); static void cpu_970_setup(int cpuid, uint16_t vers); static void cpu_booke_setup(int cpuid, uint16_t vers); static void cpu_powerx_setup(int cpuid, uint16_t vers); int powerpc_pow_enabled; void (*cpu_idle_hook)(sbintime_t) = NULL; static void cpu_idle_60x(sbintime_t); static void cpu_idle_booke(sbintime_t); #ifdef BOOKE_E500 static void cpu_idle_e500mc(sbintime_t sbt); #endif #if defined(__powerpc64__) && defined(AIM) static void cpu_idle_powerx(sbintime_t); static void cpu_idle_power9(sbintime_t); #endif struct cputab { const char *name; uint16_t version; uint16_t revfmt; int features; /* Do not include PPC_FEATURE_32 or * PPC_FEATURE_HAS_MMU */ int features2; void (*cpu_setup)(int cpuid, uint16_t vers); }; #define REVFMT_MAJMIN 1 /* %u.%u */ #define REVFMT_HEX 2 /* 0x%04x */ #define REVFMT_DEC 3 /* %u */ static const struct cputab models[] = { { "Motorola PowerPC 601", MPC601, REVFMT_DEC, PPC_FEATURE_HAS_FPU | PPC_FEATURE_UNIFIED_CACHE, 0, cpu_6xx_setup }, { "Motorola PowerPC 602", MPC602, REVFMT_DEC, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603", MPC603, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603e", MPC603e, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 603ev", MPC603ev, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 604", MPC604, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 604ev", MPC604ev, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 620", MPC620, REVFMT_HEX, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU, 0, NULL }, { "Motorola PowerPC 750", MPC750, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "IBM PowerPC 750FX", IBM750FX, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "IBM PowerPC 970", IBM970, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970FX", IBM970FX, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970GX", IBM970GX, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM PowerPC 970MP", IBM970MP, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_970_setup }, { "IBM POWER4", IBMPOWER4, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4, 0, NULL }, { "IBM POWER4+", IBMPOWER4PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4, 0, NULL }, { "IBM POWER5", IBMPOWER5, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER4 | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP, 0, NULL }, { "IBM POWER5+", IBMPOWER5PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_POWER5_PLUS | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP, 0, NULL }, { "IBM POWER6", IBMPOWER6, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_TRUE_LE, 0, NULL }, { "IBM POWER7", IBMPOWER7, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_DSCR, NULL }, { "IBM POWER7+", IBMPOWER7PLUS, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX, PPC_FEATURE2_DSCR, NULL }, { "IBM POWER8E", IBMPOWER8E, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER8NVL", IBMPOWER8NVL, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER8", IBMPOWER8, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO, cpu_powerx_setup }, { "IBM POWER9", IBMPOWER9, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06 | PPC_FEATURE_HAS_VSX | PPC_FEATURE_TRUE_LE, PPC_FEATURE2_ARCH_2_07 | PPC_FEATURE2_DSCR | PPC_FEATURE2_EBB | PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | PPC_FEATURE2_HAS_VEC_CRYPTO | PPC_FEATURE2_ARCH_3_00 | PPC_FEATURE2_HAS_IEEE128 | PPC_FEATURE2_DARN, cpu_powerx_setup }, { "Motorola PowerPC 7400", MPC7400, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7410", MPC7410, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7450", MPC7450, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7455", MPC7455, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7457", MPC7457, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7447A", MPC7447A, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 7448", MPC7448, REVFMT_MAJMIN, PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 8240", MPC8240, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Motorola PowerPC 8245", MPC8245, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU, 0, cpu_6xx_setup }, { "Freescale e500v1 core", FSL_E500v1, REVFMT_MAJMIN, PPC_FEATURE_HAS_SPE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_BOOKE, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e500v2 core", FSL_E500v2, REVFMT_MAJMIN, PPC_FEATURE_HAS_SPE | PPC_FEATURE_BOOKE | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e500mc core", FSL_E500mc, REVFMT_MAJMIN, PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e5500 core", FSL_E5500, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "Freescale e6500 core", FSL_E6500, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_BOOKE | PPC_FEATURE_ARCH_2_05 | PPC_FEATURE_ARCH_2_06, PPC_FEATURE2_ISEL, cpu_booke_setup }, { "IBM Cell Broadband Engine", IBMCELLBE, REVFMT_MAJMIN, PPC_FEATURE_64 | PPC_FEATURE_HAS_ALTIVEC | PPC_FEATURE_HAS_FPU | PPC_FEATURE_CELL | PPC_FEATURE_SMT, 0, NULL}, { "Unknown PowerPC CPU", 0, REVFMT_HEX, 0, 0, NULL }, }; static void cpu_6xx_print_cacheinfo(u_int, uint16_t); static int cpu_feature_bit(SYSCTL_HANDLER_ARGS); static char model[64]; -SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, ""); +SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_CAPRD, model, 0, ""); static const struct cputab *cput; u_long cpu_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU; u_long cpu_features2 = 0; SYSCTL_OPAQUE(_hw, OID_AUTO, cpu_features, CTLFLAG_RD, &cpu_features, sizeof(cpu_features), "LX", "PowerPC CPU features"); SYSCTL_OPAQUE(_hw, OID_AUTO, cpu_features2, CTLFLAG_RD, &cpu_features2, sizeof(cpu_features2), "LX", "PowerPC CPU features 2"); #ifdef __powerpc64__ register_t lpcr = LPCR_LPES; #endif /* Provide some user-friendly aliases for bits in cpu_features */ SYSCTL_PROC(_hw, OID_AUTO, floatingpoint, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, PPC_FEATURE_HAS_FPU, cpu_feature_bit, "I", "Floating point instructions executed in hardware"); SYSCTL_PROC(_hw, OID_AUTO, altivec, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, 0, PPC_FEATURE_HAS_ALTIVEC, cpu_feature_bit, "I", "CPU supports Altivec"); /* * Phase 1 (early) CPU setup. Setup the cpu_features/cpu_features2 variables, * so they can be used during platform and MMU bringup. */ void cpu_feature_setup(void) { u_int pvr; uint16_t vers; const struct cputab *cp; pvr = mfpvr(); vers = pvr >> 16; for (cp = models; cp->version != 0; cp++) { if (cp->version == vers) break; } cput = cp; cpu_features |= cp->features; cpu_features2 |= cp->features2; } void cpu_setup(u_int cpuid) { uint64_t cps; const char *name; u_int maj, min, pvr; uint16_t rev, revfmt, vers; pvr = mfpvr(); vers = pvr >> 16; rev = pvr; switch (vers) { case MPC7410: min = (pvr >> 0) & 0xff; maj = min <= 4 ? 1 : 2; break; case FSL_E500v1: case FSL_E500v2: case FSL_E500mc: case FSL_E5500: maj = (pvr >> 4) & 0xf; min = (pvr >> 0) & 0xf; break; default: maj = (pvr >> 8) & 0xf; min = (pvr >> 0) & 0xf; } revfmt = cput->revfmt; name = cput->name; if (rev == MPC750 && pvr == 15) { name = "Motorola MPC755"; revfmt = REVFMT_HEX; } strncpy(model, name, sizeof(model) - 1); printf("cpu%d: %s revision ", cpuid, name); switch (revfmt) { case REVFMT_MAJMIN: printf("%u.%u", maj, min); break; case REVFMT_HEX: printf("0x%04x", rev); break; case REVFMT_DEC: printf("%u", rev); break; } if (cpu_est_clockrate(0, &cps) == 0) printf(", %jd.%02jd MHz", cps / 1000000, (cps / 10000) % 100); printf("\n"); printf("cpu%d: Features %b\n", cpuid, (int)cpu_features, PPC_FEATURE_BITMASK); if (cpu_features2 != 0) printf("cpu%d: Features2 %b\n", cpuid, (int)cpu_features2, PPC_FEATURE2_BITMASK); /* * Configure CPU */ if (cput->cpu_setup != NULL) cput->cpu_setup(cpuid, vers); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *cps) { uint16_t vers; register_t msr; phandle_t cpu, dev, root; uint32_t freq32; int res = 0; char buf[8]; vers = mfpvr() >> 16; msr = mfmsr(); mtmsr(msr & ~PSL_EE); switch (vers) { case MPC7450: case MPC7455: case MPC7457: case MPC750: case IBM750FX: case MPC7400: case MPC7410: case MPC7447A: case MPC7448: mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); mtspr(SPR_PMC1_74XX, 0); mtspr(SPR_MMCR0_74XX, SPR_MMCR0_74XX_PMC1SEL(PMCN_CYCLES)); DELAY(1000); *cps = (mfspr(SPR_PMC1_74XX) * 1000) + 4999; mtspr(SPR_MMCR0_74XX, SPR_MMCR0_FC); mtmsr(msr); return (0); case IBM970: case IBM970FX: case IBM970MP: isync(); mtspr(SPR_MMCR0, SPR_MMCR0_FC); isync(); mtspr(SPR_MMCR1, 0); mtspr(SPR_MMCRA, 0); mtspr(SPR_PMC1, 0); mtspr(SPR_MMCR0, SPR_MMCR0_PMC1SEL(PMC970N_CYCLES)); isync(); DELAY(1000); powerpc_sync(); mtspr(SPR_MMCR0, SPR_MMCR0_FC); *cps = (mfspr(SPR_PMC1) * 1000) + 4999; mtmsr(msr); return (0); default: root = OF_peer(0); if (root == 0) return (ENXIO); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } cpu = OF_child(dev); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); if (OF_getprop(cpu, "ibm,extended-clock-frequency", cps, sizeof(*cps)) >= 0) { *cps = be64toh(*cps); return (0); } else if (OF_getencprop(cpu, "clock-frequency", &freq32, sizeof(freq32)) >= 0) { *cps = freq32; return (0); } else { return (ENOENT); } } } void cpu_6xx_setup(int cpuid, uint16_t vers) { register_t hid0, pvr; const char *bitmask; hid0 = mfspr(SPR_HID0); pvr = mfpvr(); /* * Configure power-saving mode. */ switch (vers) { case MPC603: case MPC603e: case MPC603ev: case MPC604ev: case MPC750: case IBM750FX: case MPC7400: case MPC7410: case MPC8240: case MPC8245: /* Select DOZE mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_DOZE | HID0_DPM; powerpc_pow_enabled = 1; break; case MPC7448: case MPC7447A: case MPC7457: case MPC7455: case MPC7450: /* Enable the 7450 branch caches */ hid0 |= HID0_SGE | HID0_BTIC; hid0 |= HID0_LRSTK | HID0_FOLD | HID0_BHT; /* Disable BTIC on 7450 Rev 2.0 or earlier and on 7457 */ if (((pvr >> 16) == MPC7450 && (pvr & 0xFFFF) <= 0x0200) || (pvr >> 16) == MPC7457) hid0 &= ~HID0_BTIC; /* Select NAP mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_NAP | HID0_DPM; powerpc_pow_enabled = 1; break; default: /* No power-saving mode is available. */ ; } switch (vers) { case IBM750FX: case MPC750: hid0 &= ~HID0_DBP; /* XXX correct? */ hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; break; case MPC7400: case MPC7410: hid0 &= ~HID0_SPD; hid0 |= HID0_EMCP | HID0_BTIC | HID0_SGE | HID0_BHT; hid0 |= HID0_EIEC; break; } mtspr(SPR_HID0, hid0); if (bootverbose) cpu_6xx_print_cacheinfo(cpuid, vers); switch (vers) { case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: bitmask = HID0_7450_BITMASK; break; default: bitmask = HID0_BITMASK; break; } printf("cpu%d: HID0 %b\n", cpuid, (int)hid0, bitmask); if (cpu_idle_hook == NULL) cpu_idle_hook = cpu_idle_60x; } static void cpu_6xx_print_cacheinfo(u_int cpuid, uint16_t vers) { register_t hid; hid = mfspr(SPR_HID0); printf("cpu%u: ", cpuid); printf("L1 I-cache %sabled, ", (hid & HID0_ICE) ? "en" : "dis"); printf("L1 D-cache %sabled\n", (hid & HID0_DCE) ? "en" : "dis"); printf("cpu%u: ", cpuid); if (mfspr(SPR_L2CR) & L2CR_L2E) { switch (vers) { case MPC7450: case MPC7455: case MPC7457: printf("256KB L2 cache, "); if (mfspr(SPR_L3CR) & L3CR_L3E) printf("%cMB L3 backside cache", mfspr(SPR_L3CR) & L3CR_L3SIZ ? '2' : '1'); else printf("L3 cache disabled"); printf("\n"); break; case IBM750FX: printf("512KB L2 cache\n"); break; default: switch (mfspr(SPR_L2CR) & L2CR_L2SIZ) { case L2SIZ_256K: printf("256KB "); break; case L2SIZ_512K: printf("512KB "); break; case L2SIZ_1M: printf("1MB "); break; } printf("write-%s", (mfspr(SPR_L2CR) & L2CR_L2WT) ? "through" : "back"); if (mfspr(SPR_L2CR) & L2CR_L2PE) printf(", with parity"); printf(" backside cache\n"); break; } } else printf("L2 cache disabled\n"); } static void cpu_booke_setup(int cpuid, uint16_t vers) { #ifdef BOOKE_E500 register_t hid0; const char *bitmask; hid0 = mfspr(SPR_HID0); switch (vers) { case FSL_E500mc: bitmask = HID0_E500MC_BITMASK; cpu_idle_hook = cpu_idle_e500mc; break; case FSL_E5500: case FSL_E6500: bitmask = HID0_E5500_BITMASK; cpu_idle_hook = cpu_idle_e500mc; break; case FSL_E500v1: case FSL_E500v2: /* Only e500v1/v2 support HID0 power management setup. */ /* Program power-management mode. */ hid0 &= ~(HID0_DOZE | HID0_NAP | HID0_SLEEP); hid0 |= HID0_DOZE; mtspr(SPR_HID0, hid0); default: bitmask = HID0_E500_BITMASK; break; } printf("cpu%d: HID0 %b\n", cpuid, (int)hid0, bitmask); #endif if (cpu_idle_hook == NULL) cpu_idle_hook = cpu_idle_booke; } static void cpu_970_setup(int cpuid, uint16_t vers) { #ifdef AIM uint32_t hid0_hi, hid0_lo; __asm __volatile ("mfspr %0,%2; clrldi %1,%0,32; srdi %0,%0,32;" : "=r" (hid0_hi), "=r" (hid0_lo) : "K" (SPR_HID0)); /* Configure power-saving mode */ switch (vers) { case IBM970MP: hid0_hi |= (HID0_DEEPNAP | HID0_NAP | HID0_DPM); hid0_hi &= ~HID0_DOZE; break; default: hid0_hi |= (HID0_NAP | HID0_DPM); hid0_hi &= ~(HID0_DOZE | HID0_DEEPNAP); break; } powerpc_pow_enabled = 1; __asm __volatile (" \ sync; isync; \ sldi %0,%0,32; or %0,%0,%1; \ mtspr %2, %0; \ mfspr %0, %2; mfspr %0, %2; mfspr %0, %2; \ mfspr %0, %2; mfspr %0, %2; mfspr %0, %2; \ sync; isync" :: "r" (hid0_hi), "r"(hid0_lo), "K" (SPR_HID0)); __asm __volatile ("mfspr %0,%1; srdi %0,%0,32;" : "=r" (hid0_hi) : "K" (SPR_HID0)); printf("cpu%d: HID0 %b\n", cpuid, (int)(hid0_hi), HID0_970_BITMASK); #endif cpu_idle_hook = cpu_idle_60x; } static void cpu_powerx_setup(int cpuid, uint16_t vers) { #if defined(__powerpc64__) && defined(AIM) if ((mfmsr() & PSL_HV) == 0) return; /* Nuke the FSCR, to disable all facilities. */ mtspr(SPR_FSCR, 0); /* Configure power-saving */ switch (vers) { case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: cpu_idle_hook = cpu_idle_powerx; mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_PECE_WAKESET); isync(); break; case IBMPOWER9: cpu_idle_hook = cpu_idle_power9; mtspr(SPR_LPCR, mfspr(SPR_LPCR) | LPCR_PECE_WAKESET); isync(); break; default: return; } #endif } static int cpu_feature_bit(SYSCTL_HANDLER_ARGS) { int result; result = (cpu_features & arg2) ? 1 : 0; return (sysctl_handle_int(oidp, &result, 0, req)); } void cpu_idle(int busy) { sbintime_t sbt = -1; #ifdef INVARIANTS if ((mfmsr() & PSL_EE) != PSL_EE) { struct thread *td = curthread; printf("td msr %#lx\n", (u_long)td->td_md.md_saved_msr); panic("ints disabled in idleproc!"); } #endif CTR1(KTR_SPARE2, "cpu_idle(%d)", busy); if (cpu_idle_hook != NULL) { if (!busy) { critical_enter(); sbt = cpu_idleclock(); } cpu_idle_hook(sbt); if (!busy) { cpu_activeclock(); critical_exit(); } } CTR1(KTR_SPARE2, "cpu_idle(%d) done", busy); } static void cpu_idle_60x(sbintime_t sbt) { #ifdef AIM register_t msr; uint16_t vers; #endif if (!powerpc_pow_enabled) return; #ifdef AIM msr = mfmsr(); vers = mfpvr() >> 16; switch (vers) { case IBM970: case IBM970FX: case IBM970MP: case MPC7447A: case MPC7448: case MPC7450: case MPC7455: case MPC7457: /* 0x7e00066c: dssall */ __asm __volatile("\ .long 0x7e00066c; sync; mtmsr %0; isync" :: "r"(msr | PSL_POW)); break; default: powerpc_sync(); mtmsr(msr | PSL_POW); break; } #endif } #ifdef BOOKE_E500 static void cpu_idle_e500mc(sbintime_t sbt) { /* * Base binutils doesn't know what the 'wait' instruction is, so * use the opcode encoding here. */ __asm __volatile(".long 0x7c00007c"); } #endif static void cpu_idle_booke(sbintime_t sbt) { #ifdef BOOKE_E500 register_t msr; msr = mfmsr(); powerpc_sync(); mtmsr(msr | PSL_WE); #endif } #if defined(__powerpc64__) && defined(AIM) static void cpu_idle_powerx(sbintime_t sbt) { /* Sleeping when running on one cpu gives no advantages - avoid it */ if (smp_started == 0) return; spinlock_enter(); if (sched_runnable()) { spinlock_exit(); return; } if (can_wakeup == 0) can_wakeup = 1; mb(); enter_idle_powerx(); spinlock_exit(); } static void cpu_idle_power9(sbintime_t sbt) { register_t msr; msr = mfmsr(); /* Suspend external interrupts until stop instruction completes. */ mtmsr(msr & ~PSL_EE); /* Set the stop state to lowest latency, wake up to next instruction */ /* Set maximum transition level to 2, for deepest lossless sleep. */ mtspr(SPR_PSSCR, (2 << PSSCR_MTL_S) | (0 << PSSCR_RL_S)); /* "stop" instruction (PowerISA 3.0) */ __asm __volatile (".long 0x4c0002e4"); /* * Re-enable external interrupts to capture the interrupt that caused * the wake up. */ mtmsr(msr); } #endif int cpu_idle_wakeup(int cpu) { return (0); } diff --git a/sys/x86/x86/identcpu.c b/sys/x86/x86/identcpu.c index 7fa35bd50244..3163053a123f 100644 --- a/sys/x86/x86/identcpu.c +++ b/sys/x86/x86/identcpu.c @@ -1,2627 +1,2627 @@ /*- * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * Copyright (c) 1997 KATO Takenori. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Id: machdep.c,v 1.193 1996/06/18 01:22:04 bde Exp */ #include #include "opt_cpu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __i386__ #define IDENTBLUE_CYRIX486 0 #define IDENTBLUE_IBMCPU 1 #define IDENTBLUE_CYRIXM2 2 static void identifycyrix(void); static void print_transmeta_info(void); #endif static u_int find_cpu_vendor_id(void); static void print_AMD_info(void); static void print_INTEL_info(void); static void print_INTEL_TLB(u_int data); static void print_hypervisor_info(void); static void print_svm_info(void); static void print_via_padlock_info(void); static void print_vmx_info(void); #ifdef __i386__ int cpu; /* Are we 386, 386sx, 486, etc? */ int cpu_class; #endif u_int cpu_feature; /* Feature flags */ u_int cpu_feature2; /* Feature flags */ u_int amd_feature; /* AMD feature flags */ u_int amd_feature2; /* AMD feature flags */ u_int amd_rascap; /* AMD RAS capabilities */ u_int amd_pminfo; /* AMD advanced power management info */ u_int amd_extended_feature_extensions; u_int via_feature_rng; /* VIA RNG features */ u_int via_feature_xcrypt; /* VIA ACE features */ u_int cpu_high; /* Highest arg to CPUID */ u_int cpu_exthigh; /* Highest arg to extended CPUID */ u_int cpu_id; /* Stepping ID */ u_int cpu_procinfo; /* HyperThreading Info / Brand Index / CLFUSH */ u_int cpu_procinfo2; /* Multicore info */ char cpu_vendor[20]; /* CPU Origin code */ u_int cpu_vendor_id; /* CPU vendor ID */ u_int cpu_mxcsr_mask; /* Valid bits in mxcsr */ u_int cpu_clflush_line_size = 32; u_int cpu_stdext_feature; /* %ebx */ u_int cpu_stdext_feature2; /* %ecx */ u_int cpu_stdext_feature3; /* %edx */ uint64_t cpu_ia32_arch_caps; u_int cpu_max_ext_state_size; u_int cpu_mon_mwait_flags; /* MONITOR/MWAIT flags (CPUID.05H.ECX) */ u_int cpu_mon_min_size; /* MONITOR minimum range size, bytes */ u_int cpu_mon_max_size; /* MONITOR minimum range size, bytes */ u_int cpu_maxphyaddr; /* Max phys addr width in bits */ u_int cpu_power_eax; /* 06H: Power management leaf, %eax */ u_int cpu_power_ebx; /* 06H: Power management leaf, %ebx */ u_int cpu_power_ecx; /* 06H: Power management leaf, %ecx */ u_int cpu_power_edx; /* 06H: Power management leaf, %edx */ char machine[] = MACHINE; SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, &via_feature_rng, 0, "VIA RNG feature available in CPU"); SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, &via_feature_xcrypt, 0, "VIA xcrypt feature available in CPU"); #ifdef __amd64__ #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "i386"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); #else SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD | CTLFLAG_CAPRD, machine, 0, "Machine class"); #endif char cpu_model[128]; -SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_MPSAFE, +SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD | CTLFLAG_CAPRD | CTLFLAG_MPSAFE, cpu_model, 0, "Machine model"); static int hw_clockrate; SYSCTL_INT(_hw, OID_AUTO, clockrate, CTLFLAG_RD, &hw_clockrate, 0, "CPU instruction clock rate"); u_int hv_base; u_int hv_high; char hv_vendor[16]; SYSCTL_STRING(_hw, OID_AUTO, hv_vendor, CTLFLAG_RD | CTLFLAG_MPSAFE, hv_vendor, 0, "Hypervisor vendor"); static eventhandler_tag tsc_post_tag; static char cpu_brand[48]; #ifdef __i386__ #define MAX_BRAND_INDEX 8 static const char *cpu_brandtable[MAX_BRAND_INDEX + 1] = { NULL, /* No brand */ "Intel Celeron", "Intel Pentium III", "Intel Pentium III Xeon", NULL, NULL, NULL, NULL, "Intel Pentium 4" }; static struct { char *cpu_name; int cpu_class; } cpus[] = { { "Intel 80286", CPUCLASS_286 }, /* CPU_286 */ { "i386SX", CPUCLASS_386 }, /* CPU_386SX */ { "i386DX", CPUCLASS_386 }, /* CPU_386 */ { "i486SX", CPUCLASS_486 }, /* CPU_486SX */ { "i486DX", CPUCLASS_486 }, /* CPU_486 */ { "Pentium", CPUCLASS_586 }, /* CPU_586 */ { "Cyrix 486", CPUCLASS_486 }, /* CPU_486DLC */ { "Pentium Pro", CPUCLASS_686 }, /* CPU_686 */ { "Cyrix 5x86", CPUCLASS_486 }, /* CPU_M1SC */ { "Cyrix 6x86", CPUCLASS_486 }, /* CPU_M1 */ { "Blue Lightning", CPUCLASS_486 }, /* CPU_BLUE */ { "Cyrix 6x86MX", CPUCLASS_686 }, /* CPU_M2 */ { "NexGen 586", CPUCLASS_386 }, /* CPU_NX586 (XXX) */ { "Cyrix 486S/DX", CPUCLASS_486 }, /* CPU_CY486DX */ { "Pentium II", CPUCLASS_686 }, /* CPU_PII */ { "Pentium III", CPUCLASS_686 }, /* CPU_PIII */ { "Pentium 4", CPUCLASS_686 }, /* CPU_P4 */ }; #endif static struct { char *vendor; u_int vendor_id; } cpu_vendors[] = { { INTEL_VENDOR_ID, CPU_VENDOR_INTEL }, /* GenuineIntel */ { AMD_VENDOR_ID, CPU_VENDOR_AMD }, /* AuthenticAMD */ { HYGON_VENDOR_ID, CPU_VENDOR_HYGON }, /* HygonGenuine */ { CENTAUR_VENDOR_ID, CPU_VENDOR_CENTAUR }, /* CentaurHauls */ #ifdef __i386__ { NSC_VENDOR_ID, CPU_VENDOR_NSC }, /* Geode by NSC */ { CYRIX_VENDOR_ID, CPU_VENDOR_CYRIX }, /* CyrixInstead */ { TRANSMETA_VENDOR_ID, CPU_VENDOR_TRANSMETA }, /* GenuineTMx86 */ { SIS_VENDOR_ID, CPU_VENDOR_SIS }, /* SiS SiS SiS */ { UMC_VENDOR_ID, CPU_VENDOR_UMC }, /* UMC UMC UMC */ { NEXGEN_VENDOR_ID, CPU_VENDOR_NEXGEN }, /* NexGenDriven */ { RISE_VENDOR_ID, CPU_VENDOR_RISE }, /* RiseRiseRise */ #if 0 /* XXX CPUID 8000_0000h and 8086_0000h, not 0000_0000h */ { "TransmetaCPU", CPU_VENDOR_TRANSMETA }, #endif #endif }; void printcpuinfo(void) { u_int regs[4], i; char *brand; printf("CPU: "); #ifdef __i386__ cpu_class = cpus[cpu].cpu_class; strncpy(cpu_model, cpus[cpu].cpu_name, sizeof (cpu_model)); #else strncpy(cpu_model, "Hammer", sizeof (cpu_model)); #endif /* Check for extended CPUID information and a processor name. */ if (cpu_exthigh >= 0x80000004) { brand = cpu_brand; for (i = 0x80000002; i < 0x80000005; i++) { do_cpuid(i, regs); memcpy(brand, regs, sizeof(regs)); brand += sizeof(regs); } } switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: #ifdef __i386__ if ((cpu_id & 0xf00) > 0x300) { u_int brand_index; cpu_model[0] = '\0'; switch (cpu_id & 0x3000) { case 0x1000: strcpy(cpu_model, "Overdrive "); break; case 0x2000: strcpy(cpu_model, "Dual "); break; } switch (cpu_id & 0xf00) { case 0x400: strcat(cpu_model, "i486 "); /* Check the particular flavor of 486 */ switch (cpu_id & 0xf0) { case 0x00: case 0x10: strcat(cpu_model, "DX"); break; case 0x20: strcat(cpu_model, "SX"); break; case 0x30: strcat(cpu_model, "DX2"); break; case 0x40: strcat(cpu_model, "SL"); break; case 0x50: strcat(cpu_model, "SX2"); break; case 0x70: strcat(cpu_model, "DX2 Write-Back Enhanced"); break; case 0x80: strcat(cpu_model, "DX4"); break; } break; case 0x500: /* Check the particular flavor of 586 */ strcat(cpu_model, "Pentium"); switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, " A-step"); break; case 0x10: strcat(cpu_model, "/P5"); break; case 0x20: strcat(cpu_model, "/P54C"); break; case 0x30: strcat(cpu_model, "/P24T"); break; case 0x40: strcat(cpu_model, "/P55C"); break; case 0x70: strcat(cpu_model, "/P54C"); break; case 0x80: strcat(cpu_model, "/P55C (quarter-micron)"); break; default: /* nothing */ break; } #if defined(I586_CPU) && !defined(NO_F00F_HACK) /* * XXX - If/when Intel fixes the bug, this * should also check the version of the * CPU, not just that it's a Pentium. */ has_f00f_bug = 1; #endif break; case 0x600: /* Check the particular flavor of 686 */ switch (cpu_id & 0xf0) { case 0x00: strcat(cpu_model, "Pentium Pro A-step"); break; case 0x10: strcat(cpu_model, "Pentium Pro"); break; case 0x30: case 0x50: case 0x60: strcat(cpu_model, "Pentium II/Pentium II Xeon/Celeron"); cpu = CPU_PII; break; case 0x70: case 0x80: case 0xa0: case 0xb0: strcat(cpu_model, "Pentium III/Pentium III Xeon/Celeron"); cpu = CPU_PIII; break; default: strcat(cpu_model, "Unknown 80686"); break; } break; case 0xf00: strcat(cpu_model, "Pentium 4"); cpu = CPU_P4; break; default: strcat(cpu_model, "unknown"); break; } /* * If we didn't get a brand name from the extended * CPUID, try to look it up in the brand table. */ if (cpu_high > 0 && *cpu_brand == '\0') { brand_index = cpu_procinfo & CPUID_BRAND_INDEX; if (brand_index <= MAX_BRAND_INDEX && cpu_brandtable[brand_index] != NULL) strcpy(cpu_brand, cpu_brandtable[brand_index]); } } #else /* Please make up your mind folks! */ strcat(cpu_model, "EM64T"); #endif break; case CPU_VENDOR_AMD: /* * Values taken from AMD Processor Recognition * http://www.amd.com/K6/k6docs/pdf/20734g.pdf * (also describes ``Features'' encodings. */ strcpy(cpu_model, "AMD "); #ifdef __i386__ switch (cpu_id & 0xFF0) { case 0x410: strcat(cpu_model, "Standard Am486DX"); break; case 0x430: strcat(cpu_model, "Enhanced Am486DX2 Write-Through"); break; case 0x470: strcat(cpu_model, "Enhanced Am486DX2 Write-Back"); break; case 0x480: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Through"); break; case 0x490: strcat(cpu_model, "Enhanced Am486DX4/Am5x86 Write-Back"); break; case 0x4E0: strcat(cpu_model, "Am5x86 Write-Through"); break; case 0x4F0: strcat(cpu_model, "Am5x86 Write-Back"); break; case 0x500: strcat(cpu_model, "K5 model 0"); break; case 0x510: strcat(cpu_model, "K5 model 1"); break; case 0x520: strcat(cpu_model, "K5 PR166 (model 2)"); break; case 0x530: strcat(cpu_model, "K5 PR200 (model 3)"); break; case 0x560: strcat(cpu_model, "K6"); break; case 0x570: strcat(cpu_model, "K6 266 (model 1)"); break; case 0x580: strcat(cpu_model, "K6-2"); break; case 0x590: strcat(cpu_model, "K6-III"); break; case 0x5a0: strcat(cpu_model, "Geode LX"); break; default: strcat(cpu_model, "Unknown"); break; } #else if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); #endif break; #ifdef __i386__ case CPU_VENDOR_CYRIX: strcpy(cpu_model, "Cyrix "); switch (cpu_id & 0xff0) { case 0x440: strcat(cpu_model, "MediaGX"); break; case 0x520: strcat(cpu_model, "6x86"); break; case 0x540: cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); break; case 0x600: strcat(cpu_model, "6x86MX"); break; default: /* * Even though CPU supports the cpuid * instruction, it can be disabled. * Therefore, this routine supports all Cyrix * CPUs. */ switch (cyrix_did & 0xf0) { case 0x00: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486SLC"); break; case 0x01: strcat(cpu_model, "486DLC"); break; case 0x02: strcat(cpu_model, "486SLC2"); break; case 0x03: strcat(cpu_model, "486DLC2"); break; case 0x04: strcat(cpu_model, "486SRx"); break; case 0x05: strcat(cpu_model, "486DRx"); break; case 0x06: strcat(cpu_model, "486SRx2"); break; case 0x07: strcat(cpu_model, "486DRx2"); break; case 0x08: strcat(cpu_model, "486SRu"); break; case 0x09: strcat(cpu_model, "486DRu"); break; case 0x0a: strcat(cpu_model, "486SRu2"); break; case 0x0b: strcat(cpu_model, "486DRu2"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x10: switch (cyrix_did & 0x0f) { case 0x00: strcat(cpu_model, "486S"); break; case 0x01: strcat(cpu_model, "486S2"); break; case 0x02: strcat(cpu_model, "486Se"); break; case 0x03: strcat(cpu_model, "486S2e"); break; case 0x0a: strcat(cpu_model, "486DX"); break; case 0x0b: strcat(cpu_model, "486DX2"); break; case 0x0f: strcat(cpu_model, "486DX4"); break; default: strcat(cpu_model, "Unknown"); break; } break; case 0x20: if ((cyrix_did & 0x0f) < 8) strcat(cpu_model, "6x86"); /* Where did you get it? */ else strcat(cpu_model, "5x86"); break; case 0x30: strcat(cpu_model, "6x86"); break; case 0x40: if ((cyrix_did & 0xf000) == 0x3000) { cpu_class = CPUCLASS_586; strcat(cpu_model, "GXm"); } else strcat(cpu_model, "MediaGX"); break; case 0x50: strcat(cpu_model, "6x86MX"); break; case 0xf0: switch (cyrix_did & 0x0f) { case 0x0d: strcat(cpu_model, "Overdrive CPU"); break; case 0x0e: strcpy(cpu_model, "Texas Instruments 486SXL"); break; case 0x0f: strcat(cpu_model, "486SLC/DLC"); break; default: strcat(cpu_model, "Unknown"); break; } break; default: strcat(cpu_model, "Unknown"); break; } break; } break; case CPU_VENDOR_RISE: strcpy(cpu_model, "Rise "); switch (cpu_id & 0xff0) { case 0x500: /* 6401 and 6441 (Kirin) */ case 0x520: /* 6510 (Lynx) */ strcat(cpu_model, "mP6"); break; default: strcat(cpu_model, "Unknown"); } break; #endif case CPU_VENDOR_CENTAUR: #ifdef __i386__ switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "IDT WinChip C6"); break; case 0x580: strcpy(cpu_model, "IDT WinChip 2"); break; case 0x590: strcpy(cpu_model, "IDT WinChip 3"); break; case 0x660: strcpy(cpu_model, "VIA C3 Samuel"); break; case 0x670: if (cpu_id & 0x8) strcpy(cpu_model, "VIA C3 Ezra"); else strcpy(cpu_model, "VIA C3 Samuel 2"); break; case 0x680: strcpy(cpu_model, "VIA C3 Ezra-T"); break; case 0x690: strcpy(cpu_model, "VIA C3 Nehemiah"); break; case 0x6a0: case 0x6d0: strcpy(cpu_model, "VIA C7 Esther"); break; case 0x6f0: strcpy(cpu_model, "VIA Nano"); break; default: strcpy(cpu_model, "VIA/IDT Unknown"); } #else strcpy(cpu_model, "VIA "); if ((cpu_id & 0xff0) == 0x6f0) strcat(cpu_model, "Nano Processor"); else strcat(cpu_model, "Unknown"); #endif break; #ifdef __i386__ case CPU_VENDOR_IBM: strcpy(cpu_model, "Blue Lightning CPU"); break; case CPU_VENDOR_NSC: switch (cpu_id & 0xff0) { case 0x540: strcpy(cpu_model, "Geode SC1100"); cpu = CPU_GEODE1100; break; default: strcpy(cpu_model, "Geode/NSC unknown"); break; } break; #endif case CPU_VENDOR_HYGON: strcpy(cpu_model, "Hygon "); #ifdef __i386__ strcat(cpu_model, "Unknown"); #else if ((cpu_id & 0xf00) == 0xf00) strcat(cpu_model, "AMD64 Processor"); else strcat(cpu_model, "Unknown"); #endif break; default: strcat(cpu_model, "Unknown"); break; } /* * Replace cpu_model with cpu_brand minus leading spaces if * we have one. */ brand = cpu_brand; while (*brand == ' ') ++brand; if (*brand != '\0') strcpy(cpu_model, brand); printf("%s (", cpu_model); if (tsc_freq != 0) { hw_clockrate = (tsc_freq + 5000) / 1000000; printf("%jd.%02d-MHz ", (intmax_t)(tsc_freq + 4999) / 1000000, (u_int)((tsc_freq + 4999) / 10000) % 100); } #ifdef __i386__ switch(cpu_class) { case CPUCLASS_286: printf("286"); break; case CPUCLASS_386: printf("386"); break; #if defined(I486_CPU) case CPUCLASS_486: printf("486"); break; #endif #if defined(I586_CPU) case CPUCLASS_586: printf("586"); break; #endif #if defined(I686_CPU) case CPUCLASS_686: printf("686"); break; #endif default: printf("Unknown"); /* will panic below... */ } #else printf("K8"); #endif printf("-class CPU)\n"); if (*cpu_vendor) printf(" Origin=\"%s\"", cpu_vendor); if (cpu_id) printf(" Id=0x%x", cpu_id); if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON || cpu_vendor_id == CPU_VENDOR_CENTAUR || #ifdef __i386__ cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_RISE || cpu_vendor_id == CPU_VENDOR_NSC || (cpu_vendor_id == CPU_VENDOR_CYRIX && ((cpu_id & 0xf00) > 0x500)) || #endif 0) { printf(" Family=0x%x", CPUID_TO_FAMILY(cpu_id)); printf(" Model=0x%x", CPUID_TO_MODEL(cpu_id)); printf(" Stepping=%u", cpu_id & CPUID_STEPPING); #ifdef __i386__ if (cpu_vendor_id == CPU_VENDOR_CYRIX) printf("\n DIR=0x%04x", cyrix_did); #endif /* * AMD CPUID Specification * http://support.amd.com/us/Embedded_TechDocs/25481.pdf * * Intel Processor Identification and CPUID Instruction * http://www.intel.com/assets/pdf/appnote/241618.pdf */ if (cpu_high > 0) { /* * Here we should probably set up flags indicating * whether or not various features are available. * The interesting ones are probably VME, PSE, PAE, * and PGE. The code already assumes without bothering * to check that all CPUs >= Pentium have a TSC and * MSRs. */ printf("\n Features=0x%b", cpu_feature, "\020" "\001FPU" /* Integral FPU */ "\002VME" /* Extended VM86 mode support */ "\003DE" /* Debugging Extensions (CR4.DE) */ "\004PSE" /* 4MByte page tables */ "\005TSC" /* Timestamp counter */ "\006MSR" /* Machine specific registers */ "\007PAE" /* Physical address extension */ "\010MCE" /* Machine Check support */ "\011CX8" /* CMPEXCH8 instruction */ "\012APIC" /* SMP local APIC */ "\013oldMTRR" /* Previous implementation of MTRR */ "\014SEP" /* Fast System Call */ "\015MTRR" /* Memory Type Range Registers */ "\016PGE" /* PG_G (global bit) support */ "\017MCA" /* Machine Check Architecture */ "\020CMOV" /* CMOV instruction */ "\021PAT" /* Page attributes table */ "\022PSE36" /* 36 bit address space support */ "\023PN" /* Processor Serial number */ "\024CLFLUSH" /* Has the CLFLUSH instruction */ "\025" "\026DTS" /* Debug Trace Store */ "\027ACPI" /* ACPI support */ "\030MMX" /* MMX instructions */ "\031FXSR" /* FXSAVE/FXRSTOR */ "\032SSE" /* Streaming SIMD Extensions */ "\033SSE2" /* Streaming SIMD Extensions #2 */ "\034SS" /* Self snoop */ "\035HTT" /* Hyperthreading (see EBX bit 16-23) */ "\036TM" /* Thermal Monitor clock slowdown */ "\037IA64" /* CPU can execute IA64 instructions */ "\040PBE" /* Pending Break Enable */ ); if (cpu_feature2 != 0) { printf("\n Features2=0x%b", cpu_feature2, "\020" "\001SSE3" /* SSE3 */ "\002PCLMULQDQ" /* Carry-Less Mul Quadword */ "\003DTES64" /* 64-bit Debug Trace */ "\004MON" /* MONITOR/MWAIT Instructions */ "\005DS_CPL" /* CPL Qualified Debug Store */ "\006VMX" /* Virtual Machine Extensions */ "\007SMX" /* Safer Mode Extensions */ "\010EST" /* Enhanced SpeedStep */ "\011TM2" /* Thermal Monitor 2 */ "\012SSSE3" /* SSSE3 */ "\013CNXT-ID" /* L1 context ID available */ "\014SDBG" /* IA32 silicon debug */ "\015FMA" /* Fused Multiply Add */ "\016CX16" /* CMPXCHG16B Instruction */ "\017xTPR" /* Send Task Priority Messages*/ "\020PDCM" /* Perf/Debug Capability MSR */ "\021" "\022PCID" /* Process-context Identifiers*/ "\023DCA" /* Direct Cache Access */ "\024SSE4.1" /* SSE 4.1 */ "\025SSE4.2" /* SSE 4.2 */ "\026x2APIC" /* xAPIC Extensions */ "\027MOVBE" /* MOVBE Instruction */ "\030POPCNT" /* POPCNT Instruction */ "\031TSCDLT" /* TSC-Deadline Timer */ "\032AESNI" /* AES Crypto */ "\033XSAVE" /* XSAVE/XRSTOR States */ "\034OSXSAVE" /* OS-Enabled State Management*/ "\035AVX" /* Advanced Vector Extensions */ "\036F16C" /* Half-precision conversions */ "\037RDRAND" /* RDRAND Instruction */ "\040HV" /* Hypervisor */ ); } if (amd_feature != 0) { printf("\n AMD Features=0x%b", amd_feature, "\020" /* in hex */ "\001" /* Same */ "\002" /* Same */ "\003" /* Same */ "\004" /* Same */ "\005" /* Same */ "\006" /* Same */ "\007" /* Same */ "\010" /* Same */ "\011" /* Same */ "\012" /* Same */ "\013" /* Undefined */ "\014SYSCALL" /* Have SYSCALL/SYSRET */ "\015" /* Same */ "\016" /* Same */ "\017" /* Same */ "\020" /* Same */ "\021" /* Same */ "\022" /* Same */ "\023" /* Reserved, unknown */ "\024MP" /* Multiprocessor Capable */ "\025NX" /* Has EFER.NXE, NX */ "\026" /* Undefined */ "\027MMX+" /* AMD MMX Extensions */ "\030" /* Same */ "\031" /* Same */ "\032FFXSR" /* Fast FXSAVE/FXRSTOR */ "\033Page1GB" /* 1-GB large page support */ "\034RDTSCP" /* RDTSCP */ "\035" /* Undefined */ "\036LM" /* 64 bit long mode */ "\0373DNow!+" /* AMD 3DNow! Extensions */ "\0403DNow!" /* AMD 3DNow! */ ); } if (amd_feature2 != 0) { printf("\n AMD Features2=0x%b", amd_feature2, "\020" "\001LAHF" /* LAHF/SAHF in long mode */ "\002CMP" /* CMP legacy */ "\003SVM" /* Secure Virtual Mode */ "\004ExtAPIC" /* Extended APIC register */ "\005CR8" /* CR8 in legacy mode */ "\006ABM" /* LZCNT instruction */ "\007SSE4A" /* SSE4A */ "\010MAS" /* Misaligned SSE mode */ "\011Prefetch" /* 3DNow! Prefetch/PrefetchW */ "\012OSVW" /* OS visible workaround */ "\013IBS" /* Instruction based sampling */ "\014XOP" /* XOP extended instructions */ "\015SKINIT" /* SKINIT/STGI */ "\016WDT" /* Watchdog timer */ "\017" "\020LWP" /* Lightweight Profiling */ "\021FMA4" /* 4-operand FMA instructions */ "\022TCE" /* Translation Cache Extension */ "\023" "\024NodeId" /* NodeId MSR support */ "\025" "\026TBM" /* Trailing Bit Manipulation */ "\027Topology" /* Topology Extensions */ "\030PCXC" /* Core perf count */ "\031PNXC" /* NB perf count */ "\032" "\033DBE" /* Data Breakpoint extension */ "\034PTSC" /* Performance TSC */ "\035PL2I" /* L2I perf count */ "\036MWAITX" /* MONITORX/MWAITX instructions */ "\037ADMSKX" /* Address mask extension */ "\040" ); } if (cpu_stdext_feature != 0) { printf("\n Structured Extended Features=0x%b", cpu_stdext_feature, "\020" /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ "\001FSGSBASE" "\002TSCADJ" "\003SGX" /* Bit Manipulation Instructions */ "\004BMI1" /* Hardware Lock Elision */ "\005HLE" /* Advanced Vector Instructions 2 */ "\006AVX2" /* FDP_EXCPTN_ONLY */ "\007FDPEXC" /* Supervisor Mode Execution Prot. */ "\010SMEP" /* Bit Manipulation Instructions */ "\011BMI2" "\012ERMS" /* Invalidate Processor Context ID */ "\013INVPCID" /* Restricted Transactional Memory */ "\014RTM" "\015PQM" "\016NFPUSG" /* Intel Memory Protection Extensions */ "\017MPX" "\020PQE" /* AVX512 Foundation */ "\021AVX512F" "\022AVX512DQ" /* Enhanced NRBG */ "\023RDSEED" /* ADCX + ADOX */ "\024ADX" /* Supervisor Mode Access Prevention */ "\025SMAP" "\026AVX512IFMA" /* Formerly PCOMMIT */ "\027" "\030CLFLUSHOPT" "\031CLWB" "\032PROCTRACE" "\033AVX512PF" "\034AVX512ER" "\035AVX512CD" "\036SHA" "\037AVX512BW" "\040AVX512VL" ); } if (cpu_stdext_feature2 != 0) { printf("\n Structured Extended Features2=0x%b", cpu_stdext_feature2, "\020" "\001PREFETCHWT1" "\002AVX512VBMI" "\003UMIP" "\004PKU" "\005OSPKE" "\006WAITPKG" "\007AVX512VBMI2" "\011GFNI" "\012VAES" "\013VPCLMULQDQ" "\014AVX512VNNI" "\015AVX512BITALG" "\016TME" "\017AVX512VPOPCNTDQ" "\021LA57" "\027RDPID" "\032CLDEMOTE" "\034MOVDIRI" "\035MOVDIR64B" "\036ENQCMD" "\037SGXLC" ); } if (cpu_stdext_feature3 != 0) { printf("\n Structured Extended Features3=0x%b", cpu_stdext_feature3, "\020" "\003AVX512_4VNNIW" "\004AVX512_4FMAPS" "\005FSRM" "\011AVX512VP2INTERSECT" "\012MCUOPT" "\013MD_CLEAR" "\016TSXFA" "\023PCONFIG" "\025IBT" "\033IBPB" "\034STIBP" "\035L1DFL" "\036ARCH_CAP" "\037CORE_CAP" "\040SSBD" ); } if ((cpu_feature2 & CPUID2_XSAVE) != 0) { cpuid_count(0xd, 0x1, regs); if (regs[0] != 0) { printf("\n XSAVE Features=0x%b", regs[0], "\020" "\001XSAVEOPT" "\002XSAVEC" "\003XINUSE" "\004XSAVES"); } } if (cpu_ia32_arch_caps != 0) { printf("\n IA32_ARCH_CAPS=0x%b", (u_int)cpu_ia32_arch_caps, "\020" "\001RDCL_NO" "\002IBRS_ALL" "\003RSBA" "\004SKIP_L1DFL_VME" "\005SSB_NO" "\006MDS_NO" "\010TSX_CTRL" "\011TAA_NO" ); } if (amd_extended_feature_extensions != 0) { u_int amd_fe_masked; amd_fe_masked = amd_extended_feature_extensions; if ((amd_fe_masked & AMDFEID_IBRS) == 0) amd_fe_masked &= ~(AMDFEID_IBRS_ALWAYSON | AMDFEID_PREFER_IBRS); if ((amd_fe_masked & AMDFEID_STIBP) == 0) amd_fe_masked &= ~AMDFEID_STIBP_ALWAYSON; printf("\n " "AMD Extended Feature Extensions ID EBX=" "0x%b", amd_fe_masked, "\020" "\001CLZERO" "\002IRPerf" "\003XSaveErPtr" "\005RDPRU" "\011MCOMMIT" "\012WBNOINVD" "\015IBPB" "\017IBRS" "\020STIBP" "\021IBRS_ALWAYSON" "\022STIBP_ALWAYSON" "\023PREFER_IBRS" "\030PPIN" "\031SSBD" "\032VIRT_SSBD" "\033SSB_NO" ); } if (via_feature_rng != 0 || via_feature_xcrypt != 0) print_via_padlock_info(); if (cpu_feature2 & CPUID2_VMX) print_vmx_info(); if (amd_feature2 & AMDID2_SVM) print_svm_info(); if ((cpu_feature & CPUID_HTT) && (cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON)) cpu_feature &= ~CPUID_HTT; /* * If this CPU supports P-state invariant TSC then * mention the capability. */ if (tsc_is_invariant) { printf("\n TSC: P-state invariant"); if (tsc_perf_stat) printf(", performance statistics"); } } #ifdef __i386__ } else if (cpu_vendor_id == CPU_VENDOR_CYRIX) { printf(" DIR=0x%04x", cyrix_did); printf(" Stepping=%u", (cyrix_did & 0xf000) >> 12); printf(" Revision=%u", (cyrix_did & 0x0f00) >> 8); #ifndef CYRIX_CACHE_REALLY_WORKS if (cpu == CPU_M1 && (cyrix_did & 0xff00) < 0x1700) printf("\n CPU cache: write-through mode"); #endif #endif } /* Avoid ugly blank lines: only print newline when we have to. */ if (*cpu_vendor || cpu_id) printf("\n"); if (bootverbose) { if (cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON) print_AMD_info(); else if (cpu_vendor_id == CPU_VENDOR_INTEL) print_INTEL_info(); #ifdef __i386__ else if (cpu_vendor_id == CPU_VENDOR_TRANSMETA) print_transmeta_info(); #endif } print_hypervisor_info(); } #ifdef __i386__ void panicifcpuunsupported(void) { #if !defined(lint) #if !defined(I486_CPU) && !defined(I586_CPU) && !defined(I686_CPU) #error This kernel is not configured for one of the supported CPUs #endif #else /* lint */ #endif /* lint */ /* * Now that we have told the user what they have, * let them know if that machine type isn't configured. */ switch (cpu_class) { case CPUCLASS_286: /* a 286 should not make it this far, anyway */ case CPUCLASS_386: #if !defined(I486_CPU) case CPUCLASS_486: #endif #if !defined(I586_CPU) case CPUCLASS_586: #endif #if !defined(I686_CPU) case CPUCLASS_686: #endif panic("CPU class not configured"); default: break; } } static volatile u_int trap_by_rdmsr; /* * Special exception 6 handler. * The rdmsr instruction generates invalid opcodes fault on 486-class * Cyrix CPU. Stacked eip register points the rdmsr instruction in the * function identblue() when this handler is called. Stacked eip should * be advanced. */ inthand_t bluetrap6; __asm (" \n\ .text \n\ .p2align 2,0x90 \n\ .type " __XSTRING(CNAME(bluetrap6)) ",@function \n\ " __XSTRING(CNAME(bluetrap6)) ": \n\ ss \n\ movl $0xa8c1d," __XSTRING(CNAME(trap_by_rdmsr)) " \n\ addl $2, (%esp) /* rdmsr is a 2-byte instruction */ \n\ iret \n\ "); /* * Special exception 13 handler. * Accessing non-existent MSR generates general protection fault. */ inthand_t bluetrap13; __asm (" \n\ .text \n\ .p2align 2,0x90 \n\ .type " __XSTRING(CNAME(bluetrap13)) ",@function \n\ " __XSTRING(CNAME(bluetrap13)) ": \n\ ss \n\ movl $0xa89c4," __XSTRING(CNAME(trap_by_rdmsr)) " \n\ popl %eax /* discard error code */ \n\ addl $2, (%esp) /* rdmsr is a 2-byte instruction */ \n\ iret \n\ "); /* * Distinguish IBM Blue Lightning CPU from Cyrix CPUs that does not * support cpuid instruction. This function should be called after * loading interrupt descriptor table register. * * I don't like this method that handles fault, but I couldn't get * information for any other methods. Does blue giant know? */ static int identblue(void) { trap_by_rdmsr = 0; /* * Cyrix 486-class CPU does not support rdmsr instruction. * The rdmsr instruction generates invalid opcode fault, and exception * will be trapped by bluetrap6() on Cyrix 486-class CPU. The * bluetrap6() set the magic number to trap_by_rdmsr. */ setidt(IDT_UD, bluetrap6, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); /* * Certain BIOS disables cpuid instruction of Cyrix 6x86MX CPU. * In this case, rdmsr generates general protection fault, and * exception will be trapped by bluetrap13(). */ setidt(IDT_GP, bluetrap13, SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); rdmsr(0x1002); /* Cyrix CPU generates fault. */ if (trap_by_rdmsr == 0xa8c1d) return IDENTBLUE_CYRIX486; else if (trap_by_rdmsr == 0xa89c4) return IDENTBLUE_CYRIXM2; return IDENTBLUE_IBMCPU; } /* * identifycyrix() set lower 16 bits of cyrix_did as follows: * * F E D C B A 9 8 7 6 5 4 3 2 1 0 * +-------+-------+---------------+ * | SID | RID | Device ID | * | (DIR 1) | (DIR 0) | * +-------+-------+---------------+ */ static void identifycyrix(void) { register_t saveintr; int ccr2_test = 0, dir_test = 0; u_char ccr2, ccr3; saveintr = intr_disable(); ccr2 = read_cyrix_reg(CCR2); write_cyrix_reg(CCR2, ccr2 ^ CCR2_LOCK_NW); read_cyrix_reg(CCR2); if (read_cyrix_reg(CCR2) != ccr2) ccr2_test = 1; write_cyrix_reg(CCR2, ccr2); ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, ccr3 ^ CCR3_MAPEN3); read_cyrix_reg(CCR3); if (read_cyrix_reg(CCR3) != ccr3) dir_test = 1; /* CPU supports DIRs. */ write_cyrix_reg(CCR3, ccr3); if (dir_test) { /* Device ID registers are available. */ cyrix_did = read_cyrix_reg(DIR1) << 8; cyrix_did += read_cyrix_reg(DIR0); } else if (ccr2_test) cyrix_did = 0x0010; /* 486S A-step */ else cyrix_did = 0x00ff; /* Old 486SLC/DLC and TI486SXLC/SXL */ intr_restore(saveintr); } #endif /* Update TSC freq with the value indicated by the caller. */ static void tsc_freq_changed(void *arg __unused, const struct cf_level *level, int status) { /* If there was an error during the transition, don't do anything. */ if (status != 0) return; /* Total setting for this level gives the new frequency in MHz. */ hw_clockrate = level->total_set.freq; } static void hook_tsc_freq(void *arg __unused) { if (tsc_is_invariant) return; tsc_post_tag = EVENTHANDLER_REGISTER(cpufreq_post_change, tsc_freq_changed, NULL, EVENTHANDLER_PRI_ANY); } SYSINIT(hook_tsc_freq, SI_SUB_CONFIGURE, SI_ORDER_ANY, hook_tsc_freq, NULL); static struct { const char *vm_cpuid; int vm_guest; } vm_cpuids[] = { { "XenVMMXenVMM", VM_GUEST_XEN }, /* XEN */ { "Microsoft Hv", VM_GUEST_HV }, /* Microsoft Hyper-V */ { "VMwareVMware", VM_GUEST_VMWARE }, /* VMware VM */ { "KVMKVMKVM", VM_GUEST_KVM }, /* KVM */ { "bhyve bhyve ", VM_GUEST_BHYVE }, /* bhyve */ { "VBoxVBoxVBox", VM_GUEST_VBOX }, /* VirtualBox */ }; static void identify_hypervisor_cpuid_base(void) { u_int leaf, regs[4]; int i; /* * [RFC] CPUID usage for interaction between Hypervisors and Linux. * http://lkml.org/lkml/2008/10/1/246 * * KB1009458: Mechanisms to determine if software is running in * a VMware virtual machine * http://kb.vmware.com/kb/1009458 * * Search for a hypervisor that we recognize. If we cannot find * a specific hypervisor, return the first information about the * hypervisor that we found, as others may be able to use. */ for (leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) { do_cpuid(leaf, regs); /* * KVM from Linux kernels prior to commit * 57c22e5f35aa4b9b2fe11f73f3e62bbf9ef36190 set %eax * to 0 rather than a valid hv_high value. Check for * the KVM signature bytes and fixup %eax to the * highest supported leaf in that case. */ if (regs[0] == 0 && regs[1] == 0x4b4d564b && regs[2] == 0x564b4d56 && regs[3] == 0x0000004d) regs[0] = leaf + 1; if (regs[0] >= leaf) { for (i = 0; i < nitems(vm_cpuids); i++) if (strncmp((const char *)®s[1], vm_cpuids[i].vm_cpuid, 12) == 0) { vm_guest = vm_cpuids[i].vm_guest; break; } /* * If this is the first entry or we found a * specific hypervisor, record the base, high value, * and vendor identifier. */ if (vm_guest != VM_GUEST_VM || leaf == 0x40000000) { hv_base = leaf; hv_high = regs[0]; ((u_int *)&hv_vendor)[0] = regs[1]; ((u_int *)&hv_vendor)[1] = regs[2]; ((u_int *)&hv_vendor)[2] = regs[3]; hv_vendor[12] = '\0'; /* * If we found a specific hypervisor, then * we are finished. */ if (vm_guest != VM_GUEST_VM) return; } } } } void identify_hypervisor(void) { u_int regs[4]; char *p; TSENTER(); /* * If CPUID2_HV is set, we are running in a hypervisor environment. */ if (cpu_feature2 & CPUID2_HV) { vm_guest = VM_GUEST_VM; identify_hypervisor_cpuid_base(); /* If we have a definitive vendor, we can return now. */ if (*hv_vendor != '\0') { TSEXIT(); return; } } /* * Examine SMBIOS strings for older hypervisors. */ p = kern_getenv("smbios.system.serial"); if (p != NULL) { if (strncmp(p, "VMware-", 7) == 0 || strncmp(p, "VMW", 3) == 0) { vmware_hvcall(VMW_HVCMD_GETVERSION, regs); if (regs[1] == VMW_HVMAGIC) { vm_guest = VM_GUEST_VMWARE; freeenv(p); TSEXIT(); return; } } freeenv(p); } TSEXIT(); } bool fix_cpuid(void) { uint64_t msr; /* * Clear "Limit CPUID Maxval" bit and return true if the caller should * get the largest standard CPUID function number again if it is set * from BIOS. It is necessary for probing correct CPU topology later * and for the correct operation of the AVX-aware userspace. */ if (cpu_vendor_id == CPU_VENDOR_INTEL && ((CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x3) || (CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xe))) { msr = rdmsr(MSR_IA32_MISC_ENABLE); if ((msr & IA32_MISC_EN_LIMCPUID) != 0) { msr &= ~IA32_MISC_EN_LIMCPUID; wrmsr(MSR_IA32_MISC_ENABLE, msr); return (true); } } /* * Re-enable AMD Topology Extension that could be disabled by BIOS * on some notebook processors. Without the extension it's really * hard to determine the correct CPU cache topology. * See BIOS and Kernel Developer’s Guide (BKDG) for AMD Family 15h * Models 60h-6Fh Processors, Publication # 50742. */ if (vm_guest == VM_GUEST_NO && cpu_vendor_id == CPU_VENDOR_AMD && CPUID_TO_FAMILY(cpu_id) == 0x15) { msr = rdmsr(MSR_EXTFEATURES); if ((msr & ((uint64_t)1 << 54)) == 0) { msr |= (uint64_t)1 << 54; wrmsr(MSR_EXTFEATURES, msr); return (true); } } return (false); } void identify_cpu1(void) { u_int regs[4]; do_cpuid(0, regs); cpu_high = regs[0]; ((u_int *)&cpu_vendor)[0] = regs[1]; ((u_int *)&cpu_vendor)[1] = regs[3]; ((u_int *)&cpu_vendor)[2] = regs[2]; cpu_vendor[12] = '\0'; do_cpuid(1, regs); cpu_id = regs[0]; cpu_procinfo = regs[1]; cpu_feature = regs[3]; cpu_feature2 = regs[2]; } void identify_cpu2(void) { u_int regs[4], cpu_stdext_disable; if (cpu_high >= 6) { cpuid_count(6, 0, regs); cpu_power_eax = regs[0]; cpu_power_ebx = regs[1]; cpu_power_ecx = regs[2]; cpu_power_edx = regs[3]; } if (cpu_high >= 7) { cpuid_count(7, 0, regs); cpu_stdext_feature = regs[1]; /* * Some hypervisors failed to filter out unsupported * extended features. Allow to disable the * extensions, activation of which requires setting a * bit in CR4, and which VM monitors do not support. */ cpu_stdext_disable = 0; TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable); cpu_stdext_feature &= ~cpu_stdext_disable; cpu_stdext_feature2 = regs[2]; cpu_stdext_feature3 = regs[3]; if ((cpu_stdext_feature3 & CPUID_STDEXT3_ARCH_CAP) != 0) cpu_ia32_arch_caps = rdmsr(MSR_IA32_ARCH_CAP); } } void identify_cpu_ext_features(void) { u_int regs[4]; if (cpu_high >= 7) { cpuid_count(7, 0, regs); cpu_stdext_feature2 = regs[2]; cpu_stdext_feature3 = regs[3]; } } void identify_cpu_fixup_bsp(void) { u_int regs[4]; cpu_vendor_id = find_cpu_vendor_id(); if (fix_cpuid()) { do_cpuid(0, regs); cpu_high = regs[0]; } } /* * Final stage of CPU identification. */ void finishidentcpu(void) { u_int regs[4]; #ifdef __i386__ u_char ccr3; #endif identify_cpu_fixup_bsp(); if (cpu_high >= 5 && (cpu_feature2 & CPUID2_MON) != 0) { do_cpuid(5, regs); cpu_mon_mwait_flags = regs[2]; cpu_mon_min_size = regs[0] & CPUID5_MON_MIN_SIZE; cpu_mon_max_size = regs[1] & CPUID5_MON_MAX_SIZE; } identify_cpu2(); #ifdef __i386__ if (cpu_high > 0 && (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON || cpu_vendor_id == CPU_VENDOR_TRANSMETA || cpu_vendor_id == CPU_VENDOR_CENTAUR || cpu_vendor_id == CPU_VENDOR_NSC)) { do_cpuid(0x80000000, regs); if (regs[0] >= 0x80000000) cpu_exthigh = regs[0]; } #else if (cpu_vendor_id == CPU_VENDOR_INTEL || cpu_vendor_id == CPU_VENDOR_AMD || cpu_vendor_id == CPU_VENDOR_HYGON || cpu_vendor_id == CPU_VENDOR_CENTAUR) { do_cpuid(0x80000000, regs); cpu_exthigh = regs[0]; } #endif if (cpu_exthigh >= 0x80000001) { do_cpuid(0x80000001, regs); amd_feature = regs[3] & ~(cpu_feature & 0x0183f3ff); amd_feature2 = regs[2]; } if (cpu_exthigh >= 0x80000007) { do_cpuid(0x80000007, regs); amd_rascap = regs[1]; amd_pminfo = regs[3]; } if (cpu_exthigh >= 0x80000008) { do_cpuid(0x80000008, regs); cpu_maxphyaddr = regs[0] & 0xff; amd_extended_feature_extensions = regs[1]; cpu_procinfo2 = regs[2]; } else { cpu_maxphyaddr = (cpu_feature & CPUID_PAE) != 0 ? 36 : 32; } #ifdef __i386__ if (cpu_vendor_id == CPU_VENDOR_CYRIX) { if (cpu == CPU_486) { /* * These conditions are equivalent to: * - CPU does not support cpuid instruction. * - Cyrix/IBM CPU is detected. */ if (identblue() == IDENTBLUE_IBMCPU) { strcpy(cpu_vendor, "IBM"); cpu_vendor_id = CPU_VENDOR_IBM; cpu = CPU_BLUE; return; } } switch (cpu_id & 0xf00) { case 0x600: /* * Cyrix's datasheet does not describe DIRs. * Therefor, I assume it does not have them * and use the result of the cpuid instruction. * XXX they seem to have it for now at least. -Peter */ identifycyrix(); cpu = CPU_M2; break; default: identifycyrix(); /* * This routine contains a trick. * Don't check (cpu_id & 0x00f0) == 0x50 to detect M2, now. */ switch (cyrix_did & 0x00f0) { case 0x00: case 0xf0: cpu = CPU_486DLC; break; case 0x10: cpu = CPU_CY486DX; break; case 0x20: if ((cyrix_did & 0x000f) < 8) cpu = CPU_M1; else cpu = CPU_M1SC; break; case 0x30: cpu = CPU_M1; break; case 0x40: /* MediaGX CPU */ cpu = CPU_M1SC; break; default: /* M2 and later CPUs are treated as M2. */ cpu = CPU_M2; /* * enable cpuid instruction. */ ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); write_cyrix_reg(CCR4, read_cyrix_reg(CCR4) | CCR4_CPUID); write_cyrix_reg(CCR3, ccr3); do_cpuid(0, regs); cpu_high = regs[0]; /* eax */ do_cpuid(1, regs); cpu_id = regs[0]; /* eax */ cpu_feature = regs[3]; /* edx */ break; } } } else if (cpu == CPU_486 && *cpu_vendor == '\0') { /* * There are BlueLightning CPUs that do not change * undefined flags by dividing 5 by 2. In this case, * the CPU identification routine in locore.s leaves * cpu_vendor null string and puts CPU_486 into the * cpu. */ if (identblue() == IDENTBLUE_IBMCPU) { strcpy(cpu_vendor, "IBM"); cpu_vendor_id = CPU_VENDOR_IBM; cpu = CPU_BLUE; return; } } #endif } int pti_get_default(void) { if (strcmp(cpu_vendor, AMD_VENDOR_ID) == 0 || strcmp(cpu_vendor, HYGON_VENDOR_ID) == 0) return (0); if ((cpu_ia32_arch_caps & IA32_ARCH_CAP_RDCL_NO) != 0) return (0); return (1); } static u_int find_cpu_vendor_id(void) { int i; for (i = 0; i < nitems(cpu_vendors); i++) if (strcmp(cpu_vendor, cpu_vendors[i].vendor) == 0) return (cpu_vendors[i].vendor_id); return (0); } static void print_AMD_assoc(int i) { if (i == 255) printf(", fully associative\n"); else printf(", %d-way associative\n", i); } static void print_AMD_l2_assoc(int i) { switch (i & 0x0f) { case 0: printf(", disabled/not present\n"); break; case 1: printf(", direct mapped\n"); break; case 2: printf(", 2-way associative\n"); break; case 4: printf(", 4-way associative\n"); break; case 6: printf(", 8-way associative\n"); break; case 8: printf(", 16-way associative\n"); break; case 15: printf(", fully associative\n"); break; default: printf(", reserved configuration\n"); break; } } static void print_AMD_info(void) { #ifdef __i386__ uint64_t amd_whcr; #endif u_int regs[4]; if (cpu_exthigh >= 0x80000005) { do_cpuid(0x80000005, regs); printf("L1 2MB data TLB: %d entries", (regs[0] >> 16) & 0xff); print_AMD_assoc(regs[0] >> 24); printf("L1 2MB instruction TLB: %d entries", regs[0] & 0xff); print_AMD_assoc((regs[0] >> 8) & 0xff); printf("L1 4KB data TLB: %d entries", (regs[1] >> 16) & 0xff); print_AMD_assoc(regs[1] >> 24); printf("L1 4KB instruction TLB: %d entries", regs[1] & 0xff); print_AMD_assoc((regs[1] >> 8) & 0xff); printf("L1 data cache: %d kbytes", regs[2] >> 24); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0xff); print_AMD_assoc((regs[2] >> 16) & 0xff); printf("L1 instruction cache: %d kbytes", regs[3] >> 24); printf(", %d bytes/line", regs[3] & 0xff); printf(", %d lines/tag", (regs[3] >> 8) & 0xff); print_AMD_assoc((regs[3] >> 16) & 0xff); } if (cpu_exthigh >= 0x80000006) { do_cpuid(0x80000006, regs); if ((regs[0] >> 16) != 0) { printf("L2 2MB data TLB: %d entries", (regs[0] >> 16) & 0xfff); print_AMD_l2_assoc(regs[0] >> 28); printf("L2 2MB instruction TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } else { printf("L2 2MB unified TLB: %d entries", regs[0] & 0xfff); print_AMD_l2_assoc((regs[0] >> 28) & 0xf); } if ((regs[1] >> 16) != 0) { printf("L2 4KB data TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc(regs[1] >> 28); printf("L2 4KB instruction TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } else { printf("L2 4KB unified TLB: %d entries", (regs[1] >> 16) & 0xfff); print_AMD_l2_assoc((regs[1] >> 28) & 0xf); } printf("L2 unified cache: %d kbytes", regs[2] >> 16); printf(", %d bytes/line", regs[2] & 0xff); printf(", %d lines/tag", (regs[2] >> 8) & 0x0f); print_AMD_l2_assoc((regs[2] >> 12) & 0x0f); } #ifdef __i386__ if (((cpu_id & 0xf00) == 0x500) && (((cpu_id & 0x0f0) > 0x80) || (((cpu_id & 0x0f0) == 0x80) && (cpu_id & 0x00f) > 0x07))) { /* K6-2(new core [Stepping 8-F]), K6-III or later */ amd_whcr = rdmsr(0xc0000082); if (!(amd_whcr & (0x3ff << 22))) { printf("Write Allocate Disable\n"); } else { printf("Write Allocate Enable Limit: %dM bytes\n", (u_int32_t)((amd_whcr & (0x3ff << 22)) >> 22) * 4); printf("Write Allocate 15-16M bytes: %s\n", (amd_whcr & (1 << 16)) ? "Enable" : "Disable"); } } else if (((cpu_id & 0xf00) == 0x500) && ((cpu_id & 0x0f0) > 0x50)) { /* K6, K6-2(old core) */ amd_whcr = rdmsr(0xc0000082); if (!(amd_whcr & (0x7f << 1))) { printf("Write Allocate Disable\n"); } else { printf("Write Allocate Enable Limit: %dM bytes\n", (u_int32_t)((amd_whcr & (0x7f << 1)) >> 1) * 4); printf("Write Allocate 15-16M bytes: %s\n", (amd_whcr & 0x0001) ? "Enable" : "Disable"); printf("Hardware Write Allocate Control: %s\n", (amd_whcr & 0x0100) ? "Enable" : "Disable"); } } #endif /* * Opteron Rev E shows a bug as in very rare occasions a read memory * barrier is not performed as expected if it is followed by a * non-atomic read-modify-write instruction. * As long as that bug pops up very rarely (intensive machine usage * on other operating systems generally generates one unexplainable * crash any 2 months) and as long as a model specific fix would be * impractical at this stage, print out a warning string if the broken * model and family are identified. */ if (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) >= 0x20 && CPUID_TO_MODEL(cpu_id) <= 0x3f) printf("WARNING: This architecture revision has known SMP " "hardware bugs which may cause random instability\n"); } static void print_INTEL_info(void) { u_int regs[4]; u_int rounds, regnum; u_int nwaycode, nway; if (cpu_high >= 2) { rounds = 0; do { do_cpuid(0x2, regs); if (rounds == 0 && (rounds = (regs[0] & 0xff)) == 0) break; /* we have a buggy CPU */ for (regnum = 0; regnum <= 3; ++regnum) { if (regs[regnum] & (1<<31)) continue; if (regnum != 0) print_INTEL_TLB(regs[regnum] & 0xff); print_INTEL_TLB((regs[regnum] >> 8) & 0xff); print_INTEL_TLB((regs[regnum] >> 16) & 0xff); print_INTEL_TLB((regs[regnum] >> 24) & 0xff); } } while (--rounds > 0); } if (cpu_exthigh >= 0x80000006) { do_cpuid(0x80000006, regs); nwaycode = (regs[2] >> 12) & 0x0f; if (nwaycode >= 0x02 && nwaycode <= 0x08) nway = 1 << (nwaycode / 2); else nway = 0; printf("L2 cache: %u kbytes, %u-way associative, %u bytes/line\n", (regs[2] >> 16) & 0xffff, nway, regs[2] & 0xff); } } static void print_INTEL_TLB(u_int data) { switch (data) { case 0x0: case 0x40: default: break; case 0x1: printf("Instruction TLB: 4 KB pages, 4-way set associative, 32 entries\n"); break; case 0x2: printf("Instruction TLB: 4 MB pages, fully associative, 2 entries\n"); break; case 0x3: printf("Data TLB: 4 KB pages, 4-way set associative, 64 entries\n"); break; case 0x4: printf("Data TLB: 4 MB Pages, 4-way set associative, 8 entries\n"); break; case 0x6: printf("1st-level instruction cache: 8 KB, 4-way set associative, 32 byte line size\n"); break; case 0x8: printf("1st-level instruction cache: 16 KB, 4-way set associative, 32 byte line size\n"); break; case 0x9: printf("1st-level instruction cache: 32 KB, 4-way set associative, 64 byte line size\n"); break; case 0xa: printf("1st-level data cache: 8 KB, 2-way set associative, 32 byte line size\n"); break; case 0xb: printf("Instruction TLB: 4 MByte pages, 4-way set associative, 4 entries\n"); break; case 0xc: printf("1st-level data cache: 16 KB, 4-way set associative, 32 byte line size\n"); break; case 0xd: printf("1st-level data cache: 16 KBytes, 4-way set associative, 64 byte line size"); break; case 0xe: printf("1st-level data cache: 24 KBytes, 6-way set associative, 64 byte line size\n"); break; case 0x1d: printf("2nd-level cache: 128 KBytes, 2-way set associative, 64 byte line size\n"); break; case 0x21: printf("2nd-level cache: 256 KBytes, 8-way set associative, 64 byte line size\n"); break; case 0x22: printf("3rd-level cache: 512 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x23: printf("3rd-level cache: 1 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x24: printf("2nd-level cache: 1 MBytes, 16-way set associative, 64 byte line size\n"); break; case 0x25: printf("3rd-level cache: 2 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x29: printf("3rd-level cache: 4 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x2c: printf("1st-level data cache: 32 KB, 8-way set associative, 64 byte line size\n"); break; case 0x30: printf("1st-level instruction cache: 32 KB, 8-way set associative, 64 byte line size\n"); break; case 0x39: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 128 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x3b: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 128 KB, 2-way set associative, sectored cache, 64 byte line size\n"); break; case 0x3c: /* De-listed in SDM rev. 54 */ printf("2nd-level cache: 256 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x41: printf("2nd-level cache: 128 KB, 4-way set associative, 32 byte line size\n"); break; case 0x42: printf("2nd-level cache: 256 KB, 4-way set associative, 32 byte line size\n"); break; case 0x43: printf("2nd-level cache: 512 KB, 4-way set associative, 32 byte line size\n"); break; case 0x44: printf("2nd-level cache: 1 MB, 4-way set associative, 32 byte line size\n"); break; case 0x45: printf("2nd-level cache: 2 MB, 4-way set associative, 32 byte line size\n"); break; case 0x46: printf("3rd-level cache: 4 MB, 4-way set associative, 64 byte line size\n"); break; case 0x47: printf("3rd-level cache: 8 MB, 8-way set associative, 64 byte line size\n"); break; case 0x48: printf("2nd-level cache: 3MByte, 12-way set associative, 64 byte line size\n"); break; case 0x49: if (CPUID_TO_FAMILY(cpu_id) == 0xf && CPUID_TO_MODEL(cpu_id) == 0x6) printf("3rd-level cache: 4MB, 16-way set associative, 64-byte line size\n"); else printf("2nd-level cache: 4 MByte, 16-way set associative, 64 byte line size"); break; case 0x4a: printf("3rd-level cache: 6MByte, 12-way set associative, 64 byte line size\n"); break; case 0x4b: printf("3rd-level cache: 8MByte, 16-way set associative, 64 byte line size\n"); break; case 0x4c: printf("3rd-level cache: 12MByte, 12-way set associative, 64 byte line size\n"); break; case 0x4d: printf("3rd-level cache: 16MByte, 16-way set associative, 64 byte line size\n"); break; case 0x4e: printf("2nd-level cache: 6MByte, 24-way set associative, 64 byte line size\n"); break; case 0x4f: printf("Instruction TLB: 4 KByte pages, 32 entries\n"); break; case 0x50: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 64 entries\n"); break; case 0x51: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 128 entries\n"); break; case 0x52: printf("Instruction TLB: 4 KB, 2 MB or 4 MB pages, fully associative, 256 entries\n"); break; case 0x55: printf("Instruction TLB: 2-MByte or 4-MByte pages, fully associative, 7 entries\n"); break; case 0x56: printf("Data TLB0: 4 MByte pages, 4-way set associative, 16 entries\n"); break; case 0x57: printf("Data TLB0: 4 KByte pages, 4-way associative, 16 entries\n"); break; case 0x59: printf("Data TLB0: 4 KByte pages, fully associative, 16 entries\n"); break; case 0x5a: printf("Data TLB0: 2-MByte or 4 MByte pages, 4-way set associative, 32 entries\n"); break; case 0x5b: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 64 entries\n"); break; case 0x5c: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 128 entries\n"); break; case 0x5d: printf("Data TLB: 4 KB or 4 MB pages, fully associative, 256 entries\n"); break; case 0x60: printf("1st-level data cache: 16 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x61: printf("Instruction TLB: 4 KByte pages, fully associative, 48 entries\n"); break; case 0x63: printf("Data TLB: 2 MByte or 4 MByte pages, 4-way set associative, 32 entries and a separate array with 1 GByte pages, 4-way set associative, 4 entries\n"); break; case 0x64: printf("Data TLB: 4 KBytes pages, 4-way set associative, 512 entries\n"); break; case 0x66: printf("1st-level data cache: 8 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x67: printf("1st-level data cache: 16 KB, 4-way set associative, sectored cache, 64 byte line size\n"); break; case 0x68: printf("1st-level data cache: 32 KB, 4 way set associative, sectored cache, 64 byte line size\n"); break; case 0x6a: printf("uTLB: 4KByte pages, 8-way set associative, 64 entries\n"); break; case 0x6b: printf("DTLB: 4KByte pages, 8-way set associative, 256 entries\n"); break; case 0x6c: printf("DTLB: 2M/4M pages, 8-way set associative, 128 entries\n"); break; case 0x6d: printf("DTLB: 1 GByte pages, fully associative, 16 entries\n"); break; case 0x70: printf("Trace cache: 12K-uops, 8-way set associative\n"); break; case 0x71: printf("Trace cache: 16K-uops, 8-way set associative\n"); break; case 0x72: printf("Trace cache: 32K-uops, 8-way set associative\n"); break; case 0x76: printf("Instruction TLB: 2M/4M pages, fully associative, 8 entries\n"); break; case 0x78: printf("2nd-level cache: 1 MB, 4-way set associative, 64-byte line size\n"); break; case 0x79: printf("2nd-level cache: 128 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7a: printf("2nd-level cache: 256 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7b: printf("2nd-level cache: 512 KB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7c: printf("2nd-level cache: 1 MB, 8-way set associative, sectored cache, 64 byte line size\n"); break; case 0x7d: printf("2nd-level cache: 2-MB, 8-way set associative, 64-byte line size\n"); break; case 0x7f: printf("2nd-level cache: 512-KB, 2-way set associative, 64-byte line size\n"); break; case 0x80: printf("2nd-level cache: 512 KByte, 8-way set associative, 64-byte line size\n"); break; case 0x82: printf("2nd-level cache: 256 KB, 8-way set associative, 32 byte line size\n"); break; case 0x83: printf("2nd-level cache: 512 KB, 8-way set associative, 32 byte line size\n"); break; case 0x84: printf("2nd-level cache: 1 MB, 8-way set associative, 32 byte line size\n"); break; case 0x85: printf("2nd-level cache: 2 MB, 8-way set associative, 32 byte line size\n"); break; case 0x86: printf("2nd-level cache: 512 KB, 4-way set associative, 64 byte line size\n"); break; case 0x87: printf("2nd-level cache: 1 MB, 8-way set associative, 64 byte line size\n"); break; case 0xa0: printf("DTLB: 4k pages, fully associative, 32 entries\n"); break; case 0xb0: printf("Instruction TLB: 4 KB Pages, 4-way set associative, 128 entries\n"); break; case 0xb1: printf("Instruction TLB: 2M pages, 4-way, 8 entries or 4M pages, 4-way, 4 entries\n"); break; case 0xb2: printf("Instruction TLB: 4KByte pages, 4-way set associative, 64 entries\n"); break; case 0xb3: printf("Data TLB: 4 KB Pages, 4-way set associative, 128 entries\n"); break; case 0xb4: printf("Data TLB1: 4 KByte pages, 4-way associative, 256 entries\n"); break; case 0xb5: printf("Instruction TLB: 4KByte pages, 8-way set associative, 64 entries\n"); break; case 0xb6: printf("Instruction TLB: 4KByte pages, 8-way set associative, 128 entries\n"); break; case 0xba: printf("Data TLB1: 4 KByte pages, 4-way associative, 64 entries\n"); break; case 0xc0: printf("Data TLB: 4 KByte and 4 MByte pages, 4-way associative, 8 entries\n"); break; case 0xc1: printf("Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries\n"); break; case 0xc2: printf("DTLB: 4 KByte/2 MByte pages, 4-way associative, 16 entries\n"); break; case 0xc3: printf("Shared 2nd-Level TLB: 4 KByte /2 MByte pages, 6-way associative, 1536 entries. Also 1GBbyte pages, 4-way, 16 entries\n"); break; case 0xc4: printf("DTLB: 2M/4M Byte pages, 4-way associative, 32 entries\n"); break; case 0xca: printf("Shared 2nd-Level TLB: 4 KByte pages, 4-way associative, 512 entries\n"); break; case 0xd0: printf("3rd-level cache: 512 KByte, 4-way set associative, 64 byte line size\n"); break; case 0xd1: printf("3rd-level cache: 1 MByte, 4-way set associative, 64 byte line size\n"); break; case 0xd2: printf("3rd-level cache: 2 MByte, 4-way set associative, 64 byte line size\n"); break; case 0xd6: printf("3rd-level cache: 1 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xd7: printf("3rd-level cache: 2 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xd8: printf("3rd-level cache: 4 MByte, 8-way set associative, 64 byte line size\n"); break; case 0xdc: printf("3rd-level cache: 1.5 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xdd: printf("3rd-level cache: 3 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xde: printf("3rd-level cache: 6 MByte, 12-way set associative, 64 byte line size\n"); break; case 0xe2: printf("3rd-level cache: 2 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xe3: printf("3rd-level cache: 4 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xe4: printf("3rd-level cache: 8 MByte, 16-way set associative, 64 byte line size\n"); break; case 0xea: printf("3rd-level cache: 12MByte, 24-way set associative, 64 byte line size\n"); break; case 0xeb: printf("3rd-level cache: 18MByte, 24-way set associative, 64 byte line size\n"); break; case 0xec: printf("3rd-level cache: 24MByte, 24-way set associative, 64 byte line size\n"); break; case 0xf0: printf("64-Byte prefetching\n"); break; case 0xf1: printf("128-Byte prefetching\n"); break; } } static void print_svm_info(void) { u_int features, regs[4]; uint64_t msr; int comma; printf("\n SVM: "); do_cpuid(0x8000000A, regs); features = regs[3]; msr = rdmsr(MSR_VM_CR); if ((msr & VM_CR_SVMDIS) == VM_CR_SVMDIS) printf("(disabled in BIOS) "); if (!bootverbose) { comma = 0; if (features & (1 << 0)) { printf("%sNP", comma ? "," : ""); comma = 1; } if (features & (1 << 3)) { printf("%sNRIP", comma ? "," : ""); comma = 1; } if (features & (1 << 5)) { printf("%sVClean", comma ? "," : ""); comma = 1; } if (features & (1 << 6)) { printf("%sAFlush", comma ? "," : ""); comma = 1; } if (features & (1 << 7)) { printf("%sDAssist", comma ? "," : ""); comma = 1; } printf("%sNAsids=%d", comma ? "," : "", regs[1]); return; } printf("Features=0x%b", features, "\020" "\001NP" /* Nested paging */ "\002LbrVirt" /* LBR virtualization */ "\003SVML" /* SVM lock */ "\004NRIPS" /* NRIP save */ "\005TscRateMsr" /* MSR based TSC rate control */ "\006VmcbClean" /* VMCB clean bits */ "\007FlushByAsid" /* Flush by ASID */ "\010DecodeAssist" /* Decode assist */ "\011" "\012" "\013PauseFilter" /* PAUSE intercept filter */ "\014EncryptedMcodePatch" "\015PauseFilterThreshold" /* PAUSE filter threshold */ "\016AVIC" /* virtual interrupt controller */ "\017" "\020V_VMSAVE_VMLOAD" "\021vGIF" "\022GMET" /* Guest Mode Execute Trap */ "\023" "\024" "\025GuesSpecCtl" /* Guest Spec_ctl */ "\026" "\027" "\030" "\031" "\032" "\033" "\034" "\035" "\036" "\037" "\040" ); printf("\nRevision=%d, ASIDs=%d", regs[0] & 0xff, regs[1]); } #ifdef __i386__ static void print_transmeta_info(void) { u_int regs[4], nreg = 0; do_cpuid(0x80860000, regs); nreg = regs[0]; if (nreg >= 0x80860001) { do_cpuid(0x80860001, regs); printf(" Processor revision %u.%u.%u.%u\n", (regs[1] >> 24) & 0xff, (regs[1] >> 16) & 0xff, (regs[1] >> 8) & 0xff, regs[1] & 0xff); } if (nreg >= 0x80860002) { do_cpuid(0x80860002, regs); printf(" Code Morphing Software revision %u.%u.%u-%u-%u\n", (regs[1] >> 24) & 0xff, (regs[1] >> 16) & 0xff, (regs[1] >> 8) & 0xff, regs[1] & 0xff, regs[2]); } if (nreg >= 0x80860006) { char info[65]; do_cpuid(0x80860003, (u_int*) &info[0]); do_cpuid(0x80860004, (u_int*) &info[16]); do_cpuid(0x80860005, (u_int*) &info[32]); do_cpuid(0x80860006, (u_int*) &info[48]); info[64] = 0; printf(" %s\n", info); } } #endif static void print_via_padlock_info(void) { u_int regs[4]; do_cpuid(0xc0000001, regs); printf("\n VIA Padlock Features=0x%b", regs[3], "\020" "\003RNG" /* RNG */ "\007AES" /* ACE */ "\011AES-CTR" /* ACE2 */ "\013SHA1,SHA256" /* PHE */ "\015RSA" /* PMM */ ); } static uint32_t vmx_settable(uint64_t basic, int msr, int true_msr) { uint64_t val; if (basic & (1ULL << 55)) val = rdmsr(true_msr); else val = rdmsr(msr); /* Just report the controls that can be set to 1. */ return (val >> 32); } static void print_vmx_info(void) { uint64_t basic, msr; uint32_t entry, exit, mask, pin, proc, proc2; int comma; printf("\n VT-x: "); msr = rdmsr(MSR_IA32_FEATURE_CONTROL); if (!(msr & IA32_FEATURE_CONTROL_VMX_EN)) printf("(disabled in BIOS) "); basic = rdmsr(MSR_VMX_BASIC); pin = vmx_settable(basic, MSR_VMX_PINBASED_CTLS, MSR_VMX_TRUE_PINBASED_CTLS); proc = vmx_settable(basic, MSR_VMX_PROCBASED_CTLS, MSR_VMX_TRUE_PROCBASED_CTLS); if (proc & PROCBASED_SECONDARY_CONTROLS) proc2 = vmx_settable(basic, MSR_VMX_PROCBASED_CTLS2, MSR_VMX_PROCBASED_CTLS2); else proc2 = 0; exit = vmx_settable(basic, MSR_VMX_EXIT_CTLS, MSR_VMX_TRUE_EXIT_CTLS); entry = vmx_settable(basic, MSR_VMX_ENTRY_CTLS, MSR_VMX_TRUE_ENTRY_CTLS); if (!bootverbose) { comma = 0; if (exit & VM_EXIT_SAVE_PAT && exit & VM_EXIT_LOAD_PAT && entry & VM_ENTRY_LOAD_PAT) { printf("%sPAT", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_HLT_EXITING) { printf("%sHLT", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_MTF) { printf("%sMTF", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_PAUSE_EXITING) { printf("%sPAUSE", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_ENABLE_EPT) { printf("%sEPT", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_UNRESTRICTED_GUEST) { printf("%sUG", comma ? "," : ""); comma = 1; } if (proc2 & PROCBASED2_ENABLE_VPID) { printf("%sVPID", comma ? "," : ""); comma = 1; } if (proc & PROCBASED_USE_TPR_SHADOW && proc2 & PROCBASED2_VIRTUALIZE_APIC_ACCESSES && proc2 & PROCBASED2_VIRTUALIZE_X2APIC_MODE && proc2 & PROCBASED2_APIC_REGISTER_VIRTUALIZATION && proc2 & PROCBASED2_VIRTUAL_INTERRUPT_DELIVERY) { printf("%sVID", comma ? "," : ""); comma = 1; if (pin & PINBASED_POSTED_INTERRUPT) printf(",PostIntr"); } return; } mask = basic >> 32; printf("Basic Features=0x%b", mask, "\020" "\02132PA" /* 32-bit physical addresses */ "\022SMM" /* SMM dual-monitor */ "\027INS/OUTS" /* VM-exit info for INS and OUTS */ "\030TRUE" /* TRUE_CTLS MSRs */ ); printf("\n Pin-Based Controls=0x%b", pin, "\020" "\001ExtINT" /* External-interrupt exiting */ "\004NMI" /* NMI exiting */ "\006VNMI" /* Virtual NMIs */ "\007PreTmr" /* Activate VMX-preemption timer */ "\010PostIntr" /* Process posted interrupts */ ); printf("\n Primary Processor Controls=0x%b", proc, "\020" "\003INTWIN" /* Interrupt-window exiting */ "\004TSCOff" /* Use TSC offsetting */ "\010HLT" /* HLT exiting */ "\012INVLPG" /* INVLPG exiting */ "\013MWAIT" /* MWAIT exiting */ "\014RDPMC" /* RDPMC exiting */ "\015RDTSC" /* RDTSC exiting */ "\020CR3-LD" /* CR3-load exiting */ "\021CR3-ST" /* CR3-store exiting */ "\024CR8-LD" /* CR8-load exiting */ "\025CR8-ST" /* CR8-store exiting */ "\026TPR" /* Use TPR shadow */ "\027NMIWIN" /* NMI-window exiting */ "\030MOV-DR" /* MOV-DR exiting */ "\031IO" /* Unconditional I/O exiting */ "\032IOmap" /* Use I/O bitmaps */ "\034MTF" /* Monitor trap flag */ "\035MSRmap" /* Use MSR bitmaps */ "\036MONITOR" /* MONITOR exiting */ "\037PAUSE" /* PAUSE exiting */ ); if (proc & PROCBASED_SECONDARY_CONTROLS) printf("\n Secondary Processor Controls=0x%b", proc2, "\020" "\001APIC" /* Virtualize APIC accesses */ "\002EPT" /* Enable EPT */ "\003DT" /* Descriptor-table exiting */ "\004RDTSCP" /* Enable RDTSCP */ "\005x2APIC" /* Virtualize x2APIC mode */ "\006VPID" /* Enable VPID */ "\007WBINVD" /* WBINVD exiting */ "\010UG" /* Unrestricted guest */ "\011APIC-reg" /* APIC-register virtualization */ "\012VID" /* Virtual-interrupt delivery */ "\013PAUSE-loop" /* PAUSE-loop exiting */ "\014RDRAND" /* RDRAND exiting */ "\015INVPCID" /* Enable INVPCID */ "\016VMFUNC" /* Enable VM functions */ "\017VMCS" /* VMCS shadowing */ "\020EPT#VE" /* EPT-violation #VE */ "\021XSAVES" /* Enable XSAVES/XRSTORS */ ); printf("\n Exit Controls=0x%b", mask, "\020" "\003DR" /* Save debug controls */ /* Ignore Host address-space size */ "\015PERF" /* Load MSR_PERF_GLOBAL_CTRL */ "\020AckInt" /* Acknowledge interrupt on exit */ "\023PAT-SV" /* Save MSR_PAT */ "\024PAT-LD" /* Load MSR_PAT */ "\025EFER-SV" /* Save MSR_EFER */ "\026EFER-LD" /* Load MSR_EFER */ "\027PTMR-SV" /* Save VMX-preemption timer value */ ); printf("\n Entry Controls=0x%b", mask, "\020" "\003DR" /* Save debug controls */ /* Ignore IA-32e mode guest */ /* Ignore Entry to SMM */ /* Ignore Deactivate dual-monitor treatment */ "\016PERF" /* Load MSR_PERF_GLOBAL_CTRL */ "\017PAT" /* Load MSR_PAT */ "\020EFER" /* Load MSR_EFER */ ); if (proc & PROCBASED_SECONDARY_CONTROLS && (proc2 & (PROCBASED2_ENABLE_EPT | PROCBASED2_ENABLE_VPID)) != 0) { msr = rdmsr(MSR_VMX_EPT_VPID_CAP); mask = msr; printf("\n EPT Features=0x%b", mask, "\020" "\001XO" /* Execute-only translations */ "\007PW4" /* Page-walk length of 4 */ "\011UC" /* EPT paging-structure mem can be UC */ "\017WB" /* EPT paging-structure mem can be WB */ "\0212M" /* EPT PDE can map a 2-Mbyte page */ "\0221G" /* EPT PDPTE can map a 1-Gbyte page */ "\025INVEPT" /* INVEPT is supported */ "\026AD" /* Accessed and dirty flags for EPT */ "\032single" /* INVEPT single-context type */ "\033all" /* INVEPT all-context type */ ); mask = msr >> 32; printf("\n VPID Features=0x%b", mask, "\020" "\001INVVPID" /* INVVPID is supported */ "\011individual" /* INVVPID individual-address type */ "\012single" /* INVVPID single-context type */ "\013all" /* INVVPID all-context type */ /* INVVPID single-context-retaining-globals type */ "\014single-globals" ); } } static void print_hypervisor_info(void) { if (*hv_vendor != '\0') printf("Hypervisor: Origin = \"%s\"\n", hv_vendor); } /* * Returns the maximum physical address that can be used with the * current system. */ vm_paddr_t cpu_getmaxphyaddr(void) { #if defined(__i386__) if (!pae_mode) return (0xffffffff); #endif return ((1ULL << cpu_maxphyaddr) - 1); }