Index: head/sys/arm64/arm64/identcpu.c =================================================================== --- head/sys/arm64/arm64/identcpu.c (revision 354170) +++ head/sys/arm64/arm64/identcpu.c (revision 354171) @@ -1,1263 +1,1263 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include static int ident_lock; static void print_cpu_features(u_int cpu); static u_long parse_cpu_features_hwcap(u_int cpu); char machine[] = "arm64"; #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "arm"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); static char cpu_model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, sizeof(cpu_model), "Machine model"); /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { u_int cpu_impl; u_int cpu_part_num; u_int cpu_variant; u_int cpu_revision; const char *cpu_impl_name; const char *cpu_part_name; uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; }; struct cpu_desc cpu_desc[MAXCPU]; struct cpu_desc user_cpu_desc; static u_int cpu_print_regs; #define PRINT_ID_AA64_AFR0 0x00000001 #define PRINT_ID_AA64_AFR1 0x00000002 #define PRINT_ID_AA64_DFR0 0x00000010 #define PRINT_ID_AA64_DFR1 0x00000020 #define PRINT_ID_AA64_ISAR0 0x00000100 #define PRINT_ID_AA64_ISAR1 0x00000200 #define PRINT_ID_AA64_MMFR0 0x00001000 #define PRINT_ID_AA64_MMFR1 0x00002000 #define PRINT_ID_AA64_MMFR2 0x00004000 #define PRINT_ID_AA64_PFR0 0x00010000 #define PRINT_ID_AA64_PFR1 0x00020000 struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, "Unknown Processor" } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { { CPU_PART_THUNDERX, "ThunderX" }, { CPU_PART_THUNDERX2, "ThunderX2" }, CPU_PART_NONE, }; /* APM / Ampere */ static const struct cpu_parts cpu_parts_apm[] = { { CPU_PART_EMAG8180, "eMAG 8180" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_apm }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, CPU_IMPLEMENTER_NONE, }; #define MRS_TYPE_MASK 0xf #define MRS_INVALID 0 #define MRS_EXACT 1 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4)) #define MRS_EXACT_FIELD(x) ((x) >> 4) #define MRS_LOWER 2 struct mrs_field_value { uint64_t value; const char *desc; }; #define MRS_FIELD_VALUE(_value, _desc) \ { \ .value = (_value), \ .desc = (_desc), \ } #define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field) #define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s") #define MRS_FIELD_VALUE_END { .desc = NULL } struct mrs_field { const char *name; struct mrs_field_value *values; uint64_t mask; bool sign; u_int type; u_int shift; }; #define MRS_FIELD(_register, _name, _sign, _type, _values) \ { \ .name = #_name, \ .sign = (_sign), \ .type = (_type), \ .shift = _register ## _ ## _name ## _SHIFT, \ .mask = _register ## _ ## _name ## _MASK, \ .values = (_values), \ } #define MRS_FIELD_END { .type = MRS_INVALID, } /* ID_AA64AFR0_EL1 */ static struct mrs_field id_aa64afr0_fields[] = { MRS_FIELD_END, }; /* ID_AA64AFR1_EL1 */ static struct mrs_field id_aa64afr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64DFR0_EL1 */ static struct mrs_field_value id_aa64dfr0_pmsver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_wrps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_brps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_pmuver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_tracever[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_debugver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64dfr0_fields[] = { MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver), MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT, id_aa64dfr0_ctx_cmps), MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps), MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_EXACT, id_aa64dfr0_brps), MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver), MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT, id_aa64dfr0_tracever), MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6), id_aa64dfr0_debugver), MRS_FIELD_END, }; /* ID_AA64DFR1 */ static struct mrs_field id_aa64dfr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64ISAR0_EL1 */ static struct mrs_field_value id_aa64isar0_dp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sm3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_atomic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64isar0_fields[] = { MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp), MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4), MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3), MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3), MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm), MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic), MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32), MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2), MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1), MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes), MRS_FIELD_END, }; /* ID_AA64ISAR1_EL1 */ static struct mrs_field_value id_aa64isar1_gpi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_gpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_lrcpc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LRCPC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_fcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_jscvt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_api[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_apa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_dpb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DPB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64isar1_fields[] = { MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi), MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa), MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc), MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma), MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt), MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api), MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa), MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb), MRS_FIELD_END, }; /* ID_AA64MMFR0_EL1 */ static struct mrs_field_value id_aa64mmfr0_tgran4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_tgran64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_tgran16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_snsmem[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_bigend[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_asid_bits[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"), MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_parange[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr0_fields[] = { MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4), MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT, id_aa64mmfr0_tgran64), MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT, id_aa64mmfr0_tgran16), MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT, id_aa64mmfr0_bigend_el0), MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem), MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend), MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT, id_aa64mmfr0_asid_bits), MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT, id_aa64mmfr0_parange), MRS_FIELD_END, }; /* ID_AA64MMFR1_EL1 */ static struct mrs_field_value id_aa64mmfr1_xnx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_specsei[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_pan[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_lo[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_hpds[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_vh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_vmidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"), MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_hafdbs[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr1_fields[] = { MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx), MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT, id_aa64mmfr1_specsei), MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan), MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo), MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds), MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh), MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT, id_aa64mmfr1_vmidbits), MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs), MRS_FIELD_END, }; /* ID_AA64MMFR2_EL1 */ static struct mrs_field_value id_aa64mmfr2_nv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_ccidx[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"), MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "32bit CCIDX"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_varange[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"), MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_iesb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_lsm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_uao[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_cnp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr2_fields[] = { MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv), MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx), MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT, id_aa64mmfr2_varange), MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb), MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm), MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao), MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp), MRS_FIELD_END, }; /* ID_AA64PFR0_EL1 */ static struct mrs_field_value id_aa64pfr0_sve[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_ras[] = { MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_gic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_advsimd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_fp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el1[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"), MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el0[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"), MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64pfr0_fields[] = { MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve), MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras), MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic), MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd), MRS_FIELD(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp), MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3), MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2), MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1), MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0), MRS_FIELD_END, }; /* ID_AA64PFR1_EL1 */ static struct mrs_field id_aa64pfr1_fields[] = { MRS_FIELD_END, }; struct mrs_user_reg { u_int CRm; u_int Op2; size_t offset; struct mrs_field *fields; }; static struct mrs_user_reg user_regs[] = { { /* id_aa64isar0_el1 */ .CRm = 6, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64isar0), .fields = id_aa64isar0_fields, }, { /* id_aa64isar1_el1 */ .CRm = 6, .Op2 = 1, .offset = __offsetof(struct cpu_desc, id_aa64isar1), .fields = id_aa64isar1_fields, }, { /* id_aa64pfr0_el1 */ .CRm = 4, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64pfr0), .fields = id_aa64pfr0_fields, }, { /* id_aa64dfr0_el1 */ .CRm = 5, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64dfr0), .fields = id_aa64dfr0_fields, }, }; #define CPU_DESC_FIELD(desc, idx) \ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset) static int user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { uint64_t value; int CRm, Op2, i, reg; if ((insn & MRS_MASK) != MRS_VALUE) return (0); /* * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}. * These are in the EL1 CPU identification space. * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1. * CRm == {4-7} holds the ID_AA64 registers. * * For full details see the ARMv8 ARM (ARM DDI 0487C.a) * Table D9-2 System instruction encodings for non-Debug System * register accesses. */ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0) return (0); CRm = mrs_CRm(insn); if (CRm > 7 || (CRm < 4 && CRm != 0)) return (0); Op2 = mrs_Op2(insn); value = 0; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) { value = CPU_DESC_FIELD(user_cpu_desc, i); break; } } if (CRm == 0) { switch (Op2) { case 0: value = READ_SPECIALREG(midr_el1); break; case 5: value = READ_SPECIALREG(mpidr_el1); break; case 6: value = READ_SPECIALREG(revidr_el1); break; default: return (0); } } /* * We will handle this instruction, move to the next so we * don't trap here again. */ frame->tf_elr += INSN_SIZE; reg = MRS_REGISTER(insn); /* If reg is 31 then write to xzr, i.e. do nothing */ if (reg == 31) return (1); if (reg < nitems(frame->tf_x)) frame->tf_x[reg] = value; else if (reg == 30) frame->tf_lr = value; return (1); } static void update_user_regs(u_int cpu) { struct mrs_field *fields; uint64_t cur, value; int i, j, cur_field, new_field; for (i = 0; i < nitems(user_regs); i++) { value = CPU_DESC_FIELD(cpu_desc[cpu], i); if (cpu == 0) cur = value; else cur = CPU_DESC_FIELD(user_cpu_desc, i); fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { switch (fields[j].type & MRS_TYPE_MASK) { case MRS_EXACT: cur &= ~(0xfu << fields[j].shift); cur |= (uint64_t)MRS_EXACT_FIELD(fields[j].type) << fields[j].shift; break; case MRS_LOWER: new_field = (value >> fields[j].shift) & 0xf; cur_field = (cur >> fields[j].shift) & 0xf; if ((fields[j].sign && (int)new_field < (int)cur_field) || (!fields[j].sign && (u_int)new_field < (u_int)cur_field)) { cur &= ~(0xfu << fields[j].shift); cur |= new_field << fields[j].shift; } break; default: panic("Invalid field type: %d", fields[j].type); } } CPU_DESC_FIELD(user_cpu_desc, i) = cur; } } /* HWCAP */ extern u_long elf_hwcap; static void identify_cpu_sysinit(void *dummy __unused) { int cpu; u_long hwcap; /* Create a user visible cpu description with safe values */ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc)); /* Safe values for these registers */ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE | ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64; user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8; CPU_FOREACH(cpu) { print_cpu_features(cpu); hwcap = parse_cpu_features_hwcap(cpu); if (elf_hwcap == 0) elf_hwcap = hwcap; else elf_hwcap &= hwcap; update_user_regs(cpu); } install_undef_handler(true, user_mrs_handler); } SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL); static u_long parse_cpu_features_hwcap(u_int cpu) { u_long hwcap = 0; - if (ID_AA64ISAR0_DP(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL) + if (ID_AA64ISAR0_DP_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL) hwcap |= HWCAP_ASIMDDP; - if (ID_AA64ISAR0_SM4(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL) + if (ID_AA64ISAR0_SM4_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL) hwcap |= HWCAP_SM4; - if (ID_AA64ISAR0_SM3(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL) + if (ID_AA64ISAR0_SM3_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL) hwcap |= HWCAP_SM3; - if (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL) + if (ID_AA64ISAR0_RDM_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL) hwcap |= HWCAP_ASIMDRDM; - if (ID_AA64ISAR0_Atomic(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL) + if (ID_AA64ISAR0_Atomic_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL) hwcap |= HWCAP_ATOMICS; - if (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) + if (ID_AA64ISAR0_CRC32_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) hwcap |= HWCAP_CRC32; - switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) { + switch (ID_AA64ISAR0_SHA2_VAL(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA2_BASE: hwcap |= HWCAP_SHA2; break; case ID_AA64ISAR0_SHA2_512: hwcap |= HWCAP_SHA2 | HWCAP_SHA512; break; default: break; } - if (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) + if (ID_AA64ISAR0_SHA1_VAL(cpu_desc[cpu].id_aa64isar0)) hwcap |= HWCAP_SHA1; - switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) { + switch (ID_AA64ISAR0_AES_VAL(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_AES_BASE: hwcap |= HWCAP_AES; break; case ID_AA64ISAR0_AES_PMULL: hwcap |= HWCAP_PMULL | HWCAP_AES; break; default: break; } - if (ID_AA64ISAR1_LRCPC(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL) + if (ID_AA64ISAR1_LRCPC_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL) hwcap |= HWCAP_LRCPC; - if (ID_AA64ISAR1_FCMA(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL) + if (ID_AA64ISAR1_FCMA_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL) hwcap |= HWCAP_FCMA; - if (ID_AA64ISAR1_JSCVT(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL) + if (ID_AA64ISAR1_JSCVT_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL) hwcap |= HWCAP_JSCVT; - if (ID_AA64ISAR1_DPB(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL) + if (ID_AA64ISAR1_DPB_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL) hwcap |= HWCAP_DCPOP; - if (ID_AA64PFR0_SVE(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL) + if (ID_AA64PFR0_SVE_VAL(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL) hwcap |= HWCAP_SVE; - switch (ID_AA64PFR0_AdvSIMD(cpu_desc[cpu].id_aa64pfr0)) { + switch (ID_AA64PFR0_AdvSIMD_VAL(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_AdvSIMD_IMPL: hwcap |= HWCAP_ASIMD; break; case ID_AA64PFR0_AdvSIMD_HP: hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP; break; default: break; } - switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) { + switch (ID_AA64PFR0_FP_VAL(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_FP_IMPL: hwcap |= HWCAP_FP; break; case ID_AA64PFR0_FP_HP: hwcap |= HWCAP_FP | HWCAP_FPHP; break; default: break; } return (hwcap); } static void print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg, struct mrs_field *fields) { struct mrs_field_value *fv; int field, i, j, printed; sbuf_printf(sb, "%29s = <", reg_name); #define SEP_STR ((printed++) == 0) ? "" : "," printed = 0; for (i = 0; fields[i].type != 0; i++) { fv = fields[i].values; /* TODO: Handle with an unknown message */ if (fv == NULL) continue; field = (reg & fields[i].mask) >> fields[i].shift; for (j = 0; fv[j].desc != NULL; j++) { if ((fv[j].value >> fields[i].shift) != field) continue; if (fv[j].desc[0] != '\0') sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc); break; } if (fv[j].desc == NULL) sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR, fields[i].name, field); reg &= ~(0xful << fields[i].shift); } if (reg != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, reg); #undef SEP_STR sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } static void print_cpu_features(u_int cpu) { struct sbuf *sb; sb = sbuf_new_auto(); sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); sbuf_cat(sb, " affinity:"); switch(cpu_aff_levels) { default: case 4: sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 3: sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 2: sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr)); break; } sbuf_finish(sb); printf("%s\n", sbuf_data(sb)); sbuf_clear(sb); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); /* AArch64 Instruction Set Attribute Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) print_id_register(sb, "Instruction Set Attributes 0", cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields); /* AArch64 Instruction Set Attribute Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) print_id_register(sb, "Instruction Set Attributes 1", cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields); /* AArch64 Processor Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) print_id_register(sb, "Processor Features 0", cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields); /* AArch64 Processor Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) print_id_register(sb, "Processor Features 1", cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields); /* AArch64 Memory Model Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) print_id_register(sb, "Memory Model Features 0", cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) print_id_register(sb, "Memory Model Features 1", cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields); /* AArch64 Memory Model Feature Register 2 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) print_id_register(sb, "Memory Model Features 2", cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields); /* AArch64 Debug Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) print_id_register(sb, "Debug Features 0", cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) print_id_register(sb, "Debug Features 1", cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields); /* AArch64 Auxiliary Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) print_id_register(sb, "Auxiliary Features 0", cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields); /* AArch64 Auxiliary Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) print_id_register(sb, "Auxiliary Features 1", cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields); sbuf_delete(sb); sb = NULL; #undef SEP_STR } void identify_cpu(void) { u_int midr; u_int impl_id; u_int part_id; u_int cpu; size_t i; const struct cpu_parts *cpu_partsp = NULL; cpu = PCPU_GET(cpuid); midr = get_midr(); /* * Store midr to pcpu to allow fast reading * from EL0, EL1 and assembly code. */ PCPU_SET(midr, midr); impl_id = CPU_IMPL(midr); for (i = 0; i < nitems(cpu_implementers); i++) { if (impl_id == cpu_implementers[i].impl_id || cpu_implementers[i].impl_id == 0) { cpu_desc[cpu].cpu_impl = impl_id; cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } part_id = CPU_PART(midr); for (i = 0; &cpu_partsp[i] != NULL; i++) { if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) { cpu_desc[cpu].cpu_part_num = part_id; cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name; break; } } cpu_desc[cpu].cpu_revision = CPU_REV(midr); cpu_desc[cpu].cpu_variant = CPU_VAR(midr); snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d", cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); /* Save affinity for current CPU */ cpu_desc[cpu].mpidr = get_mpidr(); CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK; cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1); cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1); cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1); cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1); cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1); cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1); cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1); if (cpu != 0) { /* * This code must run on one cpu at a time, but we are * not scheduling on the current core so implement a * simple spinlock. */ while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0) __asm __volatile("wfe" ::: "memory"); switch (cpu_aff_levels) { case 0: if (CPU_AFF0(cpu_desc[cpu].mpidr) != CPU_AFF0(cpu_desc[0].mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(cpu_desc[cpu].mpidr) != CPU_AFF1(cpu_desc[0].mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(cpu_desc[cpu].mpidr) != CPU_AFF2(cpu_desc[0].mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(cpu_desc[cpu].mpidr) != CPU_AFF3(cpu_desc[0].mpidr)) cpu_aff_levels = 4; break; } if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0) cpu_print_regs |= PRINT_ID_AA64_AFR0; if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1) cpu_print_regs |= PRINT_ID_AA64_AFR1; if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0) cpu_print_regs |= PRINT_ID_AA64_DFR0; if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1) cpu_print_regs |= PRINT_ID_AA64_DFR1; if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0) cpu_print_regs |= PRINT_ID_AA64_ISAR0; if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1) cpu_print_regs |= PRINT_ID_AA64_ISAR1; if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0) cpu_print_regs |= PRINT_ID_AA64_MMFR0; if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1) cpu_print_regs |= PRINT_ID_AA64_MMFR1; if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2) cpu_print_regs |= PRINT_ID_AA64_MMFR2; if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0) cpu_print_regs |= PRINT_ID_AA64_PFR0; if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1) cpu_print_regs |= PRINT_ID_AA64_PFR1; /* Wake up the other CPUs */ atomic_store_rel_int(&ident_lock, 0); __asm __volatile("sev" ::: "memory"); } } Index: head/sys/arm64/arm64/machdep.c =================================================================== --- head/sys/arm64/arm64/machdep.c (revision 354170) +++ head/sys/arm64/arm64/machdep.c (revision 354171) @@ -1,1255 +1,1255 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_acpi.h" #include "opt_platform.h" #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef DEV_ACPI #include #include #endif #ifdef FDT #include #include #endif enum arm64_bus arm64_bus_method = ARM64_BUS_NONE; struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; int early_boot = 1; int cold = 1; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ int64_t dczva_line_size; /* The size of cache line the dc zva zeroes */ int has_pan; /* * Physical address of the EFI System Table. Stashed from the metadata hints * passed into the kernel and used by the EFI code to call runtime services. */ vm_paddr_t efi_systbl_phys; /* pagezero_* implementations are provided in support.S */ void pagezero_simple(void *); void pagezero_cache(void *); /* pagezero_simple is default pagezero */ void (*pagezero)(void *p) = pagezero_simple; static void pan_setup(void) { uint64_t id_aa64mfr1; id_aa64mfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); - if (ID_AA64MMFR1_PAN(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) + if (ID_AA64MMFR1_PAN_VAL(id_aa64mfr1) != ID_AA64MMFR1_PAN_NONE) has_pan = 1; } void pan_enable(void) { /* * The LLVM integrated assembler doesn't understand the PAN * PSTATE field. Because of this we need to manually create * the instruction in an asm block. This is equivalent to: * msr pan, #1 * * This sets the PAN bit, stopping the kernel from accessing * memory when userspace can also access it unless the kernel * uses the userspace load/store instructions. */ if (has_pan) { WRITE_SPECIALREG(sctlr_el1, READ_SPECIALREG(sctlr_el1) & ~SCTLR_SPAN); __asm __volatile(".inst 0xd500409f | (0x1 << 8)"); } } static void cpu_startup(void *dummy) { undef_init(); identify_cpu(); install_cpu_errata(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sp = frame->tf_sp; regs->lr = frame->tf_lr; regs->elr = frame->tf_elr; regs->spsr = frame->tf_spsr; memcpy(regs->x, frame->tf_x, sizeof(regs->x)); #ifdef COMPAT_FREEBSD32 /* * We may be called here for a 32bits process, if we're using a * 64bits debugger. If so, put PC and SPSR where it expects it. */ if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { regs->x[15] = frame->tf_elr; regs->x[16] = frame->tf_spsr; } #endif return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sp = regs->sp; frame->tf_lr = regs->lr; frame->tf_elr = regs->elr; frame->tf_spsr &= ~PSR_FLAGS; frame->tf_spsr |= regs->spsr & PSR_FLAGS; memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x)); #ifdef COMPAT_FREEBSD32 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) { /* * We may be called for a 32bits process if we're using * a 64bits debugger. If so, get PC and SPSR from where * it put it. */ frame->tf_elr = regs->x[15]; frame->tf_spsr = regs->x[16] & PSR_FLAGS; } #endif return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ if (td == curthread) vfp_save_state(td, pcb); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called fill_fpregs while the kernel is using the VFP")); memcpy(regs->fp_q, pcb->pcb_fpustate.vfp_regs, sizeof(regs->fp_q)); regs->fp_cr = pcb->pcb_fpustate.vfp_fpcr; regs->fp_sr = pcb->pcb_fpustate.vfp_fpsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Called set_fpregs while the kernel is using the VFP")); memcpy(pcb->pcb_fpustate.vfp_regs, regs->fp_q, sizeof(regs->fp_q)); pcb->pcb_fpustate.vfp_fpcr = regs->fp_cr; pcb->pcb_fpustate.vfp_fpsr = regs->fp_sr; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: fill_dbregs"); return (EDOOFUS); } int set_dbregs(struct thread *td, struct dbreg *regs) { printf("ARM64TODO: set_dbregs"); return (EDOOFUS); } #ifdef COMPAT_FREEBSD32 int fill_regs32(struct thread *td, struct reg32 *regs) { int i; struct trapframe *tf; tf = td->td_frame; for (i = 0; i < 13; i++) regs->r[i] = tf->tf_x[i]; /* For arm32, SP is r13 and LR is r14 */ regs->r_sp = tf->tf_x[13]; regs->r_lr = tf->tf_x[14]; regs->r_pc = tf->tf_elr; regs->r_cpsr = tf->tf_spsr; return (0); } int set_regs32(struct thread *td, struct reg32 *regs) { int i; struct trapframe *tf; tf = td->td_frame; for (i = 0; i < 13; i++) tf->tf_x[i] = regs->r[i]; /* For arm 32, SP is r13 an LR is r14 */ tf->tf_x[13] = regs->r_sp; tf->tf_x[14] = regs->r_lr; tf->tf_elr = regs->r_pc; tf->tf_spsr = regs->r_cpsr; return (0); } int fill_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: fill_fpregs32"); return (EDOOFUS); } int set_fpregs32(struct thread *td, struct fpreg32 *regs) { printf("ARM64TODO: set_fpregs32"); return (EDOOFUS); } int fill_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: fill_dbregs32"); return (EDOOFUS); } int set_dbregs32(struct thread *td, struct dbreg32 *regs) { printf("ARM64TODO: set_dbregs32"); return (EDOOFUS); } #endif int ptrace_set_pc(struct thread *td, u_long addr) { printf("ARM64TODO: ptrace_set_pc"); return (EDOOFUS); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_spsr |= PSR_SS; td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; return (0); } int ptrace_clear_single_step(struct thread *td) { td->td_frame->tf_spsr &= ~PSR_SS; td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); tf->tf_x[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_lr = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct gpregs *)0)->gp_x); CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct reg *)0)->x); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_x[0] = 0; mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gpregs.gp_x[0] = tf->tf_x[0]; mcp->mc_gpregs.gp_spsr = tf->tf_spsr; } memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1], sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1)); mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_lr = tf->tf_lr; mcp->mc_gpregs.gp_elr = tf->tf_elr; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; uint32_t spsr; spsr = mcp->mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_AARCH32 | PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x)); tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_lr = mcp->mc_gpregs.gp_lr; tf->tf_elr = mcp->mc_gpregs.gp_elr; tf->tf_spsr = mcp->mc_gpregs.gp_spsr; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ vfp_save_state(td, curpcb); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called get_fpcontext while the kernel is using the VFP")); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPU flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_fpustate.vfp_regs, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_cr = curpcb->pcb_fpustate.vfp_fpcr; mcp->mc_fpregs.fp_sr = curpcb->pcb_fpustate.vfp_fpsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* * Discard any vfp state for the current thread, we * are about to override it. */ vfp_discard(td); KASSERT(curpcb->pcb_fpusaved == &curpcb->pcb_fpustate, ("Called set_fpcontext while the kernel is using the VFP")); memcpy(curpcb->pcb_fpustate.vfp_regs, mcp->mc_fpregs.fp_q, sizeof(mcp->mc_fpregs)); curpcb->pcb_fpustate.vfp_fpcr = mcp->mc_fpregs.fp_cr; curpcb->pcb_fpustate.vfp_fpsr = mcp->mc_fpregs.fp_sr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* ARM64TODO TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { struct pcpu *pc; pc = pcpu_find(cpu_id); if (pc == NULL || rate == NULL) return (EINVAL); if (pc->pc_clock == 0) return (EOPNOTSUPP); *rate = pc->pc_clock; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { int i; for (i = 0; i < PCB_LR; i++) pcb->pcb_x[i] = tf->tf_x[i]; pcb->pcb_x[PCB_LR] = tf->tf_lr; pcb->pcb_pc = tf->tf_elr; pcb->pcb_sp = tf->tf_sp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_kstack_pages = KSTACK_PAGES; thread0.td_pcb = (struct pcb *)(thread0.td_kstack + thread0.td_kstack_pages * KSTACK_PAGES) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; /* Set the base address of translation table 0. */ thread0.td_proc->p_md.md_l0addr = READ_SPECIALREG(ttbr0_el1); } typedef struct { uint32_t type; uint64_t phys_start; uint64_t virt_start; uint64_t num_pages; uint64_t attr; } EFI_MEMORY_DESCRIPTOR; typedef void (*efi_map_entry_cb)(struct efi_md *); static void foreach_efi_map_entry(struct efi_map_header *efihdr, efi_map_entry_cb cb) { struct efi_md *map, *p; size_t efisz; int ndesc, i; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { cb(p); } } static void exclude_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: arm_physmem_exclude_region(p->md_phys, p->md_pages * PAGE_SIZE, EXFLAG_NOALLOC); } } static void exclude_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, exclude_efi_map_entry); } static void add_efi_map_entry(struct efi_md *p) { switch (p->md_type) { case EFI_MD_TYPE_RT_DATA: /* * Runtime data will be excluded after the DMAP * region is created to stop it from being added * to phys_avail. */ case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ arm_physmem_hardware_region(p->md_phys, p->md_pages * PAGE_SIZE); break; } } static void add_efi_map_entries(struct efi_map_header *efihdr) { foreach_efi_map_entry(efihdr, add_efi_map_entry); } static void print_efi_map_entry(struct efi_md *p) { const char *type; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode", "PersistentMemory" }; if (p->md_type < nitems(types)) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_NV) printf("NV "); if (p->md_attr & EFI_MD_ATTR_MORE_RELIABLE) printf("MORE_RELIABLE "); if (p->md_attr & EFI_MD_ATTR_RO) printf("RO "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } static void print_efi_map_entries(struct efi_map_header *efihdr) { printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); foreach_efi_map_entry(efihdr, print_efi_map_entry); } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static bool bus_probe(void) { bool has_acpi, has_fdt; char *order, *env; has_acpi = has_fdt = false; #ifdef FDT has_fdt = (OF_peer(0) != 0); #endif #ifdef DEV_ACPI has_acpi = (acpi_find_table(ACPI_SIG_SPCR) != 0); #endif env = kern_getenv("kern.cfg.order"); if (env != NULL) { order = env; while (order != NULL) { if (has_acpi && strncmp(order, "acpi", 4) == 0 && (order[4] == ',' || order[4] == '\0')) { arm64_bus_method = ARM64_BUS_ACPI; break; } if (has_fdt && strncmp(order, "fdt", 3) == 0 && (order[3] == ',' || order[3] == '\0')) { arm64_bus_method = ARM64_BUS_FDT; break; } order = strchr(order, ','); } freeenv(env); /* If we set the bus method it is valid */ if (arm64_bus_method != ARM64_BUS_NONE) return (true); } /* If no order or an invalid order was set use the default */ if (arm64_bus_method == ARM64_BUS_NONE) { if (has_fdt) arm64_bus_method = ARM64_BUS_FDT; else if (has_acpi) arm64_bus_method = ARM64_BUS_ACPI; } /* * If no option was set the default is valid, otherwise we are * setting one to get cninit() working, then calling panic to tell * the user about the invalid bus setup. */ return (env == NULL); } static void cache_setup(void) { int dcache_line_shift, icache_line_shift, dczva_line_shift; uint32_t ctr_el0; uint32_t dczid_el0; ctr_el0 = READ_SPECIALREG(ctr_el0); /* Read the log2 words in each D cache line */ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); /* Get the D cache line size */ dcache_line_size = sizeof(int) << dcache_line_shift; /* And the same for the I cache */ icache_line_shift = CTR_ILINE_SIZE(ctr_el0); icache_line_size = sizeof(int) << icache_line_shift; idcache_line_size = MIN(dcache_line_size, icache_line_size); dczid_el0 = READ_SPECIALREG(dczid_el0); /* Check if dc zva is not prohibited */ if (dczid_el0 & DCZID_DZP) dczva_line_size = 0; else { /* Same as with above calculations */ dczva_line_shift = DCZID_BS_SIZE(dczid_el0); dczva_line_size = sizeof(int) << dczva_line_shift; /* Change pagezero function */ pagezero = pagezero_cache; } } void initarm(struct arm64_bootparams *abp) { struct efi_fb *efifb; struct efi_map_header *efihdr; struct pcpu *pcpup; char *env; #ifdef FDT struct mem_region mem_regions[FDT_MEM_REGIONS]; int mem_regions_sz; #endif vm_offset_t lastaddr; caddr_t kmdp; bool valid; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); link_elf_ireloc(kmdp); #ifdef FDT try_load_dtb(kmdp); #endif efi_systbl_phys = MD_FETCH(kmdp, MODINFOMD_FW_HANDLE, vm_paddr_t); /* Find the address to start allocating from */ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); /* Load the physical memory ranges */ efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) add_efi_map_entries(efihdr); #ifdef FDT else { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); } if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); #endif /* Exclude the EFI framebuffer from our view of physical memory. */ efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); if (efifb != NULL) arm_physmem_exclude_region(efifb->fb_addr, efifb->fb_size, EXFLAG_NOALLOC); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); pan_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ pmap_bootstrap(abp->kern_l0pt, abp->kern_l1pt, KERNBASE - abp->kern_delta, lastaddr - KERNBASE); /* Exclude entries neexed in teh DMAP region, but not phys_avail */ if (efihdr != NULL) exclude_efi_map_entries(efihdr); arm_physmem_init_kernel_globals(); devmap_bootstrap(0, NULL); valid = bus_probe(); cninit(); if (!valid) panic("Invalid bus configuration: %s", kern_getenv("kern.cfg.order")); init_proc0(abp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); dbg_init(); kdb_init(); pan_enable(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (boothowto & RB_VERBOSE) { print_efi_map_entries(efihdr); arm_physmem_print_tables(); } early_boot = 0; } void dbg_init(void) { /* Clear OS lock */ WRITE_SPECIALREG(OSLAR_EL1, 0); /* This permits DDB to use debug registers for watchpoints. */ dbg_monitor_init(); /* TODO: Eventually will need to initialize debug registers here. */ } #ifdef DDB #include DB_SHOW_COMMAND(specialregs, db_show_spregs) { #define PRINT_REG(reg) \ db_printf(__STRING(reg) " = %#016lx\n", READ_SPECIALREG(reg)) PRINT_REG(actlr_el1); PRINT_REG(afsr0_el1); PRINT_REG(afsr1_el1); PRINT_REG(aidr_el1); PRINT_REG(amair_el1); PRINT_REG(ccsidr_el1); PRINT_REG(clidr_el1); PRINT_REG(contextidr_el1); PRINT_REG(cpacr_el1); PRINT_REG(csselr_el1); PRINT_REG(ctr_el0); PRINT_REG(currentel); PRINT_REG(daif); PRINT_REG(dczid_el0); PRINT_REG(elr_el1); PRINT_REG(esr_el1); PRINT_REG(far_el1); #if 0 /* ARM64TODO: Enable VFP before reading floating-point registers */ PRINT_REG(fpcr); PRINT_REG(fpsr); #endif PRINT_REG(id_aa64afr0_el1); PRINT_REG(id_aa64afr1_el1); PRINT_REG(id_aa64dfr0_el1); PRINT_REG(id_aa64dfr1_el1); PRINT_REG(id_aa64isar0_el1); PRINT_REG(id_aa64isar1_el1); PRINT_REG(id_aa64pfr0_el1); PRINT_REG(id_aa64pfr1_el1); PRINT_REG(id_afr0_el1); PRINT_REG(id_dfr0_el1); PRINT_REG(id_isar0_el1); PRINT_REG(id_isar1_el1); PRINT_REG(id_isar2_el1); PRINT_REG(id_isar3_el1); PRINT_REG(id_isar4_el1); PRINT_REG(id_isar5_el1); PRINT_REG(id_mmfr0_el1); PRINT_REG(id_mmfr1_el1); PRINT_REG(id_mmfr2_el1); PRINT_REG(id_mmfr3_el1); #if 0 /* Missing from llvm */ PRINT_REG(id_mmfr4_el1); #endif PRINT_REG(id_pfr0_el1); PRINT_REG(id_pfr1_el1); PRINT_REG(isr_el1); PRINT_REG(mair_el1); PRINT_REG(midr_el1); PRINT_REG(mpidr_el1); PRINT_REG(mvfr0_el1); PRINT_REG(mvfr1_el1); PRINT_REG(mvfr2_el1); PRINT_REG(revidr_el1); PRINT_REG(sctlr_el1); PRINT_REG(sp_el0); PRINT_REG(spsel); PRINT_REG(spsr_el1); PRINT_REG(tcr_el1); PRINT_REG(tpidr_el0); PRINT_REG(tpidr_el1); PRINT_REG(tpidrro_el0); PRINT_REG(ttbr0_el1); PRINT_REG(ttbr1_el1); PRINT_REG(vbar_el1); #undef PRINT_REG } DB_SHOW_COMMAND(vtop, db_show_vtop) { uint64_t phys; if (have_addr) { phys = arm64_address_translate_s1e1r(addr); db_printf("EL1 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e1w(addr); db_printf("EL1 physical address reg (write): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0r(addr); db_printf("EL0 physical address reg (read): 0x%016lx\n", phys); phys = arm64_address_translate_s1e0w(addr); db_printf("EL0 physical address reg (write): 0x%016lx\n", phys); } else db_printf("show vtop \n"); } #endif