Index: head/sys/arm64/arm64/identcpu.c =================================================================== --- head/sys/arm64/arm64/identcpu.c (revision 357459) +++ head/sys/arm64/arm64/identcpu.c (revision 357460) @@ -1,1361 +1,1371 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Semihalf * under sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include static int ident_lock; static void print_cpu_features(u_int cpu); static u_long parse_cpu_features_hwcap(u_int cpu); char machine[] = "arm64"; #ifdef SCTL_MASK32 extern int adaptive_machine_arch; #endif static int sysctl_hw_machine(SYSCTL_HANDLER_ARGS) { #ifdef SCTL_MASK32 static const char machine32[] = "arm"; #endif int error; #ifdef SCTL_MASK32 if ((req->flags & SCTL_MASK32) != 0 && adaptive_machine_arch) error = SYSCTL_OUT(req, machine32, sizeof(machine32)); else #endif error = SYSCTL_OUT(req, machine, sizeof(machine)); return (error); } SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, sysctl_hw_machine, "A", "Machine class"); static char cpu_model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, sizeof(cpu_model), "Machine model"); /* * Per-CPU affinity as provided in MPIDR_EL1 * Indexed by CPU number in logical order selected by the system. * Relevant fields can be extracted using CPU_AFFn macros, * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system. * * Fields used by us: * Aff1 - Cluster number * Aff0 - CPU number in Aff1 cluster */ uint64_t __cpu_affinity[MAXCPU]; static u_int cpu_aff_levels; struct cpu_desc { u_int cpu_impl; u_int cpu_part_num; u_int cpu_variant; u_int cpu_revision; const char *cpu_impl_name; const char *cpu_part_name; uint64_t mpidr; uint64_t id_aa64afr0; uint64_t id_aa64afr1; uint64_t id_aa64dfr0; uint64_t id_aa64dfr1; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; uint64_t id_aa64pfr0; uint64_t id_aa64pfr1; }; -struct cpu_desc cpu_desc[MAXCPU]; -struct cpu_desc user_cpu_desc; +static struct cpu_desc cpu_desc[MAXCPU]; +static struct cpu_desc user_cpu_desc; static u_int cpu_print_regs; #define PRINT_ID_AA64_AFR0 0x00000001 #define PRINT_ID_AA64_AFR1 0x00000002 #define PRINT_ID_AA64_DFR0 0x00000010 #define PRINT_ID_AA64_DFR1 0x00000020 #define PRINT_ID_AA64_ISAR0 0x00000100 #define PRINT_ID_AA64_ISAR1 0x00000200 #define PRINT_ID_AA64_MMFR0 0x00001000 #define PRINT_ID_AA64_MMFR1 0x00002000 #define PRINT_ID_AA64_MMFR2 0x00004000 #define PRINT_ID_AA64_PFR0 0x00010000 #define PRINT_ID_AA64_PFR1 0x00020000 struct cpu_parts { u_int part_id; const char *part_name; }; #define CPU_PART_NONE { 0, "Unknown Processor" } struct cpu_implementers { u_int impl_id; const char *impl_name; /* * Part number is implementation defined * so each vendor will have its own set of values and names. */ const struct cpu_parts *cpu_parts; }; #define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none } /* * Per-implementer table of (PartNum, CPU Name) pairs. */ /* ARM Ltd. */ static const struct cpu_parts cpu_parts_arm[] = { { CPU_PART_FOUNDATION, "Foundation-Model" }, { CPU_PART_CORTEX_A35, "Cortex-A35" }, { CPU_PART_CORTEX_A53, "Cortex-A53" }, { CPU_PART_CORTEX_A55, "Cortex-A55" }, { CPU_PART_CORTEX_A57, "Cortex-A57" }, { CPU_PART_CORTEX_A65, "Cortex-A65" }, { CPU_PART_CORTEX_A72, "Cortex-A72" }, { CPU_PART_CORTEX_A73, "Cortex-A73" }, { CPU_PART_CORTEX_A75, "Cortex-A75" }, { CPU_PART_CORTEX_A76, "Cortex-A76" }, { CPU_PART_CORTEX_A76AE, "Cortex-A76AE" }, { CPU_PART_CORTEX_A77, "Cortex-A77" }, { CPU_PART_NEOVERSE_N1, "Neoverse-N1" }, CPU_PART_NONE, }; /* Cavium */ static const struct cpu_parts cpu_parts_cavium[] = { { CPU_PART_THUNDERX, "ThunderX" }, { CPU_PART_THUNDERX2, "ThunderX2" }, CPU_PART_NONE, }; /* APM / Ampere */ static const struct cpu_parts cpu_parts_apm[] = { { CPU_PART_EMAG8180, "eMAG 8180" }, CPU_PART_NONE, }; /* Unknown */ static const struct cpu_parts cpu_parts_none[] = { CPU_PART_NONE, }; /* * Implementers table. */ const struct cpu_implementers cpu_implementers[] = { { CPU_IMPL_ARM, "ARM", cpu_parts_arm }, { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none }, { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium }, { CPU_IMPL_DEC, "DEC", cpu_parts_none }, { CPU_IMPL_INFINEON, "IFX", cpu_parts_none }, { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none }, { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none }, { CPU_IMPL_APM, "APM", cpu_parts_apm }, { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none }, { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none }, { CPU_IMPL_INTEL, "Intel", cpu_parts_none }, CPU_IMPLEMENTER_NONE, }; #define MRS_TYPE_MASK 0xf #define MRS_INVALID 0 #define MRS_EXACT 1 #define MRS_EXACT_VAL(x) (MRS_EXACT | ((x) << 4)) #define MRS_EXACT_FIELD(x) ((x) >> 4) #define MRS_LOWER 2 struct mrs_field_value { uint64_t value; const char *desc; }; #define MRS_FIELD_VALUE(_value, _desc) \ { \ .value = (_value), \ .desc = (_desc), \ } #define MRS_FIELD_VALUE_NONE_IMPL(_reg, _field, _none, _impl) \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _none, ""), \ MRS_FIELD_VALUE(_reg ## _ ## _field ## _ ## _impl, #_field) #define MRS_FIELD_VALUE_COUNT(_reg, _field, _desc) \ MRS_FIELD_VALUE(0ul << _reg ## _ ## _field ## _SHIFT, "1 " _desc), \ MRS_FIELD_VALUE(1ul << _reg ## _ ## _field ## _SHIFT, "2 " _desc "s"), \ MRS_FIELD_VALUE(2ul << _reg ## _ ## _field ## _SHIFT, "3 " _desc "s"), \ MRS_FIELD_VALUE(3ul << _reg ## _ ## _field ## _SHIFT, "4 " _desc "s"), \ MRS_FIELD_VALUE(4ul << _reg ## _ ## _field ## _SHIFT, "5 " _desc "s"), \ MRS_FIELD_VALUE(5ul << _reg ## _ ## _field ## _SHIFT, "6 " _desc "s"), \ MRS_FIELD_VALUE(6ul << _reg ## _ ## _field ## _SHIFT, "7 " _desc "s"), \ MRS_FIELD_VALUE(7ul << _reg ## _ ## _field ## _SHIFT, "8 " _desc "s"), \ MRS_FIELD_VALUE(8ul << _reg ## _ ## _field ## _SHIFT, "9 " _desc "s"), \ MRS_FIELD_VALUE(9ul << _reg ## _ ## _field ## _SHIFT, "10 "_desc "s"), \ MRS_FIELD_VALUE(10ul<< _reg ## _ ## _field ## _SHIFT, "11 "_desc "s"), \ MRS_FIELD_VALUE(11ul<< _reg ## _ ## _field ## _SHIFT, "12 "_desc "s"), \ MRS_FIELD_VALUE(12ul<< _reg ## _ ## _field ## _SHIFT, "13 "_desc "s"), \ MRS_FIELD_VALUE(13ul<< _reg ## _ ## _field ## _SHIFT, "14 "_desc "s"), \ MRS_FIELD_VALUE(14ul<< _reg ## _ ## _field ## _SHIFT, "15 "_desc "s"), \ MRS_FIELD_VALUE(15ul<< _reg ## _ ## _field ## _SHIFT, "16 "_desc "s") #define MRS_FIELD_VALUE_END { .desc = NULL } struct mrs_field { const char *name; struct mrs_field_value *values; uint64_t mask; bool sign; u_int type; u_int shift; }; #define MRS_FIELD(_register, _name, _sign, _type, _values) \ { \ .name = #_name, \ .sign = (_sign), \ .type = (_type), \ .shift = _register ## _ ## _name ## _SHIFT, \ .mask = _register ## _ ## _name ## _MASK, \ .values = (_values), \ } #define MRS_FIELD_END { .type = MRS_INVALID, } /* ID_AA64AFR0_EL1 */ static struct mrs_field id_aa64afr0_fields[] = { MRS_FIELD_END, }; /* ID_AA64AFR1_EL1 */ static struct mrs_field id_aa64afr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64DFR0_EL1 */ static struct mrs_field_value id_aa64dfr0_pmsver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMSVer_V1, "SPE"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_ctx_cmps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, CTX_CMPs, "CTX BKPT"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_wrps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, WRPs, "Watchpoint"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_brps[] = { MRS_FIELD_VALUE_COUNT(ID_AA64DFR0, BRPs, "Breakpoint"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_pmuver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3, "PMUv3"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_3_1, "PMUv3+16 bit evtCount"), MRS_FIELD_VALUE(ID_AA64DFR0_PMUVer_IMPL, "IMPL PMU"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_tracever[] = { MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_NONE, ""), MRS_FIELD_VALUE(ID_AA64DFR0_TraceVer_IMPL, "Trace"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64dfr0_debugver[] = { MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8, "Debugv8"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_VHE, "Debugv8_VHE"), MRS_FIELD_VALUE(ID_AA64DFR0_DebugVer_8_2, "Debugv8.2"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64dfr0_fields[] = { MRS_FIELD(ID_AA64DFR0, PMSVer, false, MRS_EXACT, id_aa64dfr0_pmsver), MRS_FIELD(ID_AA64DFR0, CTX_CMPs, false, MRS_EXACT, id_aa64dfr0_ctx_cmps), MRS_FIELD(ID_AA64DFR0, WRPs, false, MRS_EXACT, id_aa64dfr0_wrps), MRS_FIELD(ID_AA64DFR0, BRPs, false, MRS_LOWER, id_aa64dfr0_brps), MRS_FIELD(ID_AA64DFR0, PMUVer, false, MRS_EXACT, id_aa64dfr0_pmuver), MRS_FIELD(ID_AA64DFR0, TraceVer, false, MRS_EXACT, id_aa64dfr0_tracever), MRS_FIELD(ID_AA64DFR0, DebugVer, false, MRS_EXACT_VAL(0x6), id_aa64dfr0_debugver), MRS_FIELD_END, }; /* ID_AA64DFR1 */ static struct mrs_field id_aa64dfr1_fields[] = { MRS_FIELD_END, }; /* ID_AA64ISAR0_EL1 */ static struct mrs_field_value id_aa64isar0_dp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, DP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sm4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sm3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SM3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA3, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_rdm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, RDM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_atomic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, Atomic, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_crc32[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, CRC32, NONE, BASE), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA2, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_SHA2_512, "SHA2+SHA512"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_sha1[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, SHA1, NONE, BASE), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar0_aes[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR0, AES, NONE, BASE), MRS_FIELD_VALUE(ID_AA64ISAR0_AES_PMULL, "AES+PMULL"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64isar0_fields[] = { MRS_FIELD(ID_AA64ISAR0, DP, false, MRS_LOWER, id_aa64isar0_dp), MRS_FIELD(ID_AA64ISAR0, SM4, false, MRS_LOWER, id_aa64isar0_sm4), MRS_FIELD(ID_AA64ISAR0, SM3, false, MRS_LOWER, id_aa64isar0_sm3), MRS_FIELD(ID_AA64ISAR0, SHA3, false, MRS_LOWER, id_aa64isar0_sha3), MRS_FIELD(ID_AA64ISAR0, RDM, false, MRS_LOWER, id_aa64isar0_rdm), MRS_FIELD(ID_AA64ISAR0, Atomic, false, MRS_LOWER, id_aa64isar0_atomic), MRS_FIELD(ID_AA64ISAR0, CRC32, false, MRS_LOWER, id_aa64isar0_crc32), MRS_FIELD(ID_AA64ISAR0, SHA2, false, MRS_LOWER, id_aa64isar0_sha2), MRS_FIELD(ID_AA64ISAR0, SHA1, false, MRS_LOWER, id_aa64isar0_sha1), MRS_FIELD(ID_AA64ISAR0, AES, false, MRS_LOWER, id_aa64isar0_aes), MRS_FIELD_END, }; /* ID_AA64ISAR1_EL1 */ static struct mrs_field_value id_aa64isar1_gpi[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_gpa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_lrcpc[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, LRCPC, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_fcma[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, FCMA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_jscvt[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, JSCVT, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_api[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, API, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_apa[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64isar1_dpb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, DPB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64isar1_fields[] = { MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi), MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa), MRS_FIELD(ID_AA64ISAR1, LRCPC, false, MRS_LOWER, id_aa64isar1_lrcpc), MRS_FIELD(ID_AA64ISAR1, FCMA, false, MRS_LOWER, id_aa64isar1_fcma), MRS_FIELD(ID_AA64ISAR1, JSCVT, false, MRS_LOWER, id_aa64isar1_jscvt), MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api), MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa), MRS_FIELD(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb), MRS_FIELD_END, }; /* ID_AA64MMFR0_EL1 */ static struct mrs_field_value id_aa64mmfr0_tgran4[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran4, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_tgran64[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran64, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_tgran16[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, TGran16, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_bigend_el0[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEndEL0, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_snsmem[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, SNSMem, NONE, DISTINCT), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_bigend[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR0, BigEnd, FIXED, MIXED), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_asid_bits[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_8, "8bit ASID"), MRS_FIELD_VALUE(ID_AA64MMFR0_ASIDBits_16, "16bit ASID"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr0_parange[] = { MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4G, "4GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_64G, "64GB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_1T, "1TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4T, "4TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_16T, "16TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_256T, "256TB PA"), MRS_FIELD_VALUE(ID_AA64MMFR0_PARange_4P, "4PB PA"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr0_fields[] = { MRS_FIELD(ID_AA64MMFR0, TGran4, false, MRS_EXACT, id_aa64mmfr0_tgran4), MRS_FIELD(ID_AA64MMFR0, TGran64, false, MRS_EXACT, id_aa64mmfr0_tgran64), MRS_FIELD(ID_AA64MMFR0, TGran16, false, MRS_EXACT, id_aa64mmfr0_tgran16), MRS_FIELD(ID_AA64MMFR0, BigEndEL0, false, MRS_EXACT, id_aa64mmfr0_bigend_el0), MRS_FIELD(ID_AA64MMFR0, SNSMem, false, MRS_EXACT, id_aa64mmfr0_snsmem), MRS_FIELD(ID_AA64MMFR0, BigEnd, false, MRS_EXACT, id_aa64mmfr0_bigend), MRS_FIELD(ID_AA64MMFR0, ASIDBits, false, MRS_EXACT, id_aa64mmfr0_asid_bits), MRS_FIELD(ID_AA64MMFR0, PARange, false, MRS_EXACT, id_aa64mmfr0_parange), MRS_FIELD_END, }; /* ID_AA64MMFR1_EL1 */ static struct mrs_field_value id_aa64mmfr1_xnx[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, XNX, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_specsei[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, SpecSEI, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_pan[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, PAN, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64MMFR1_PAN_ATS1E1, "PAN+ATS1E1"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_lo[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, LO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_hpds[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_HPD, "HPD"), MRS_FIELD_VALUE(ID_AA64MMFR1_HPDS_TTPBHA, "HPD+TTPBHA"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_vh[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR1, VH, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_vmidbits[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_8, "8bit VMID"), MRS_FIELD_VALUE(ID_AA64MMFR1_VMIDBits_16, "16bit VMID"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr1_hafdbs[] = { MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF, "HAF"), MRS_FIELD_VALUE(ID_AA64MMFR1_HAFDBS_AF_DBS, "HAF+DS"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr1_fields[] = { MRS_FIELD(ID_AA64MMFR1, XNX, false, MRS_EXACT, id_aa64mmfr1_xnx), MRS_FIELD(ID_AA64MMFR1, SpecSEI, false, MRS_EXACT, id_aa64mmfr1_specsei), MRS_FIELD(ID_AA64MMFR1, PAN, false, MRS_EXACT, id_aa64mmfr1_pan), MRS_FIELD(ID_AA64MMFR1, LO, false, MRS_EXACT, id_aa64mmfr1_lo), MRS_FIELD(ID_AA64MMFR1, HPDS, false, MRS_EXACT, id_aa64mmfr1_hpds), MRS_FIELD(ID_AA64MMFR1, VH, false, MRS_EXACT, id_aa64mmfr1_vh), MRS_FIELD(ID_AA64MMFR1, VMIDBits, false, MRS_EXACT, id_aa64mmfr1_vmidbits), MRS_FIELD(ID_AA64MMFR1, HAFDBS, false, MRS_EXACT, id_aa64mmfr1_hafdbs), MRS_FIELD_END, }; /* ID_AA64MMFR2_EL1 */ static struct mrs_field_value id_aa64mmfr2_nv[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, NV, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_ccidx[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_32, "32bit CCIDX"), MRS_FIELD_VALUE(ID_AA64MMFR2_CCIDX_64, "32bit CCIDX"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_varange[] = { MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_48, "48bit VA"), MRS_FIELD_VALUE(ID_AA64MMFR2_VARange_52, "52bit VA"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_iesb[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, IESB, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_lsm[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, LSM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_uao[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, UAO, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64mmfr2_cnp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64MMFR2, CnP, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64mmfr2_fields[] = { MRS_FIELD(ID_AA64MMFR2, NV, false, MRS_EXACT, id_aa64mmfr2_nv), MRS_FIELD(ID_AA64MMFR2, CCIDX, false, MRS_EXACT, id_aa64mmfr2_ccidx), MRS_FIELD(ID_AA64MMFR2, VARange, false, MRS_EXACT, id_aa64mmfr2_varange), MRS_FIELD(ID_AA64MMFR2, IESB, false, MRS_EXACT, id_aa64mmfr2_iesb), MRS_FIELD(ID_AA64MMFR2, LSM, false, MRS_EXACT, id_aa64mmfr2_lsm), MRS_FIELD(ID_AA64MMFR2, UAO, false, MRS_EXACT, id_aa64mmfr2_uao), MRS_FIELD(ID_AA64MMFR2, CnP, false, MRS_EXACT, id_aa64mmfr2_cnp), MRS_FIELD_END, }; /* ID_AA64PFR0_EL1 */ static struct mrs_field_value id_aa64pfr0_csv3[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV3_ISOLATED, "CSV3"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_csv2[] = { MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_ISOLATED, "CSV2"), MRS_FIELD_VALUE(ID_AA64PFR0_CSV2_SCXTNUM, "SCXTNUM"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_dit[] = { MRS_FIELD_VALUE(ID_AA64PFR0_DIT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_DIT_PSTATE, "PSTATE.DIT"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_amu[] = { MRS_FIELD_VALUE(ID_AA64PFR0_AMU_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_AMU_V1, "AMUv1"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_mpam[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, MPAM, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_sel2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SEL2, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_sve[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, SVE, NONE, IMPL), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_ras[] = { MRS_FIELD_VALUE(ID_AA64PFR0_RAS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR0_RAS_V1, "RASv1"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_gic[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, GIC, CPUIF_NONE, CPUIF_EN), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_advsimd[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, AdvSIMD, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_AdvSIMD_HP, "AdvSIMD+HP"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_fp[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, FP, NONE, IMPL), MRS_FIELD_VALUE(ID_AA64PFR0_FP_HP, "FP+HP"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el3[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL3, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL3_64_32, "EL3 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el2[] = { MRS_FIELD_VALUE_NONE_IMPL(ID_AA64PFR0, EL2, NONE, 64), MRS_FIELD_VALUE(ID_AA64PFR0_EL2_64_32, "EL2 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el1[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64, "EL1"), MRS_FIELD_VALUE(ID_AA64PFR0_EL1_64_32, "EL1 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr0_el0[] = { MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64, "EL0"), MRS_FIELD_VALUE(ID_AA64PFR0_EL0_64_32, "EL0 32"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64pfr0_fields[] = { MRS_FIELD(ID_AA64PFR0, CSV3, false, MRS_EXACT, id_aa64pfr0_csv3), MRS_FIELD(ID_AA64PFR0, CSV2, false, MRS_EXACT, id_aa64pfr0_csv2), MRS_FIELD(ID_AA64PFR0, DIT, false, MRS_EXACT, id_aa64pfr0_dit), MRS_FIELD(ID_AA64PFR0, AMU, false, MRS_EXACT, id_aa64pfr0_amu), MRS_FIELD(ID_AA64PFR0, MPAM, false, MRS_EXACT, id_aa64pfr0_mpam), MRS_FIELD(ID_AA64PFR0, SEL2, false, MRS_EXACT, id_aa64pfr0_sel2), MRS_FIELD(ID_AA64PFR0, SVE, false, MRS_EXACT, id_aa64pfr0_sve), MRS_FIELD(ID_AA64PFR0, RAS, false, MRS_EXACT, id_aa64pfr0_ras), MRS_FIELD(ID_AA64PFR0, GIC, false, MRS_EXACT, id_aa64pfr0_gic), MRS_FIELD(ID_AA64PFR0, AdvSIMD, true, MRS_LOWER, id_aa64pfr0_advsimd), MRS_FIELD(ID_AA64PFR0, FP, true, MRS_LOWER, id_aa64pfr0_fp), MRS_FIELD(ID_AA64PFR0, EL3, false, MRS_EXACT, id_aa64pfr0_el3), MRS_FIELD(ID_AA64PFR0, EL2, false, MRS_EXACT, id_aa64pfr0_el2), MRS_FIELD(ID_AA64PFR0, EL1, false, MRS_LOWER, id_aa64pfr0_el1), MRS_FIELD(ID_AA64PFR0, EL0, false, MRS_LOWER, id_aa64pfr0_el0), MRS_FIELD_END, }; /* ID_AA64PFR1_EL1 */ static struct mrs_field_value id_aa64pfr1_bt[] = { MRS_FIELD_VALUE(ID_AA64PFR1_BT_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_BT_IMPL, "BTI"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr1_ssbs[] = { MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE, "PSTATE.SSBS"), MRS_FIELD_VALUE(ID_AA64PFR1_SSBS_PSTATE_MSR, "PSTATE.SSBS MSR"), MRS_FIELD_VALUE_END, }; static struct mrs_field_value id_aa64pfr1_mte[] = { MRS_FIELD_VALUE(ID_AA64PFR1_MTE_NONE, ""), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL_EL0, "MTE EL0"), MRS_FIELD_VALUE(ID_AA64PFR1_MTE_IMPL, "MTE"), MRS_FIELD_VALUE_END, }; static struct mrs_field id_aa64pfr1_fields[] = { MRS_FIELD(ID_AA64PFR1, BT, false, MRS_EXACT, id_aa64pfr1_bt), MRS_FIELD(ID_AA64PFR1, SSBS, false, MRS_EXACT, id_aa64pfr1_ssbs), MRS_FIELD(ID_AA64PFR1, MTE, false, MRS_EXACT, id_aa64pfr1_mte), MRS_FIELD_END, }; struct mrs_user_reg { u_int reg; u_int CRm; u_int Op2; size_t offset; struct mrs_field *fields; }; static struct mrs_user_reg user_regs[] = { { /* id_aa64isar0_el1 */ .reg = ID_AA64ISAR0_EL1, .CRm = 6, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64isar0), .fields = id_aa64isar0_fields, }, { /* id_aa64isar1_el1 */ .reg = ID_AA64ISAR1_EL1, .CRm = 6, .Op2 = 1, .offset = __offsetof(struct cpu_desc, id_aa64isar1), .fields = id_aa64isar1_fields, }, { /* id_aa64pfr0_el1 */ .reg = ID_AA64PFR0_EL1, .CRm = 4, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64pfr0), .fields = id_aa64pfr0_fields, }, { /* id_aa64pfr0_el1 */ .reg = ID_AA64PFR1_EL1, .CRm = 4, .Op2 = 1, .offset = __offsetof(struct cpu_desc, id_aa64pfr1), .fields = id_aa64pfr1_fields, }, { /* id_aa64dfr0_el1 */ .reg = ID_AA64DFR0_EL1, .CRm = 5, .Op2 = 0, .offset = __offsetof(struct cpu_desc, id_aa64dfr0), .fields = id_aa64dfr0_fields, }, }; #define CPU_DESC_FIELD(desc, idx) \ *(uint64_t *)((char *)&(desc) + user_regs[(idx)].offset) static int user_mrs_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame, uint32_t esr) { uint64_t value; int CRm, Op2, i, reg; if ((insn & MRS_MASK) != MRS_VALUE) return (0); /* * We only emulate Op0 == 3, Op1 == 0, CRn == 0, CRm == {0, 4-7}. * These are in the EL1 CPU identification space. * CRm == 0 holds MIDR_EL1, MPIDR_EL1, and REVID_EL1. * CRm == {4-7} holds the ID_AA64 registers. * * For full details see the ARMv8 ARM (ARM DDI 0487C.a) * Table D9-2 System instruction encodings for non-Debug System * register accesses. */ if (mrs_Op0(insn) != 3 || mrs_Op1(insn) != 0 || mrs_CRn(insn) != 0) return (0); CRm = mrs_CRm(insn); if (CRm > 7 || (CRm < 4 && CRm != 0)) return (0); Op2 = mrs_Op2(insn); value = 0; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].CRm == CRm && user_regs[i].Op2 == Op2) { value = CPU_DESC_FIELD(user_cpu_desc, i); break; } } if (CRm == 0) { switch (Op2) { case 0: value = READ_SPECIALREG(midr_el1); break; case 5: value = READ_SPECIALREG(mpidr_el1); break; case 6: value = READ_SPECIALREG(revidr_el1); break; default: return (0); } } /* * We will handle this instruction, move to the next so we * don't trap here again. */ frame->tf_elr += INSN_SIZE; reg = MRS_REGISTER(insn); /* If reg is 31 then write to xzr, i.e. do nothing */ if (reg == 31) return (1); if (reg < nitems(frame->tf_x)) frame->tf_x[reg] = value; else if (reg == 30) frame->tf_lr = value; return (1); } bool extract_user_id_field(u_int reg, u_int field_shift, uint8_t *val) { uint64_t value; int i; for (i = 0; i < nitems(user_regs); i++) { if (user_regs[i].reg == reg) { value = CPU_DESC_FIELD(user_cpu_desc, i); *val = value >> field_shift; return (true); } } return (false); } static void update_user_regs(u_int cpu) { struct mrs_field *fields; uint64_t cur, value; int i, j, cur_field, new_field; for (i = 0; i < nitems(user_regs); i++) { value = CPU_DESC_FIELD(cpu_desc[cpu], i); if (cpu == 0) cur = value; else cur = CPU_DESC_FIELD(user_cpu_desc, i); fields = user_regs[i].fields; for (j = 0; fields[j].type != 0; j++) { switch (fields[j].type & MRS_TYPE_MASK) { case MRS_EXACT: cur &= ~(0xfu << fields[j].shift); cur |= (uint64_t)MRS_EXACT_FIELD(fields[j].type) << fields[j].shift; break; case MRS_LOWER: new_field = (value >> fields[j].shift) & 0xf; cur_field = (cur >> fields[j].shift) & 0xf; if ((fields[j].sign && (int)new_field < (int)cur_field) || (!fields[j].sign && (u_int)new_field < (u_int)cur_field)) { cur &= ~(0xfu << fields[j].shift); cur |= new_field << fields[j].shift; } break; default: panic("Invalid field type: %d", fields[j].type); } } CPU_DESC_FIELD(user_cpu_desc, i) = cur; } } /* HWCAP */ extern u_long elf_hwcap; +bool __read_frequently lse_supported = false; static void identify_cpu_sysinit(void *dummy __unused) { int cpu; u_long hwcap; /* Create a user visible cpu description with safe values */ memset(&user_cpu_desc, 0, sizeof(user_cpu_desc)); /* Safe values for these registers */ user_cpu_desc.id_aa64pfr0 = ID_AA64PFR0_AdvSIMD_NONE | ID_AA64PFR0_FP_NONE | ID_AA64PFR0_EL1_64 | ID_AA64PFR0_EL0_64; user_cpu_desc.id_aa64dfr0 = ID_AA64DFR0_DebugVer_8; - CPU_FOREACH(cpu) { print_cpu_features(cpu); hwcap = parse_cpu_features_hwcap(cpu); if (elf_hwcap == 0) elf_hwcap = hwcap; else elf_hwcap &= hwcap; update_user_regs(cpu); } + + if ((elf_hwcap & HWCAP_ATOMICS) != 0) { + lse_supported = true; + if (bootverbose) + printf("Enabling LSE atomics in the kernel\n"); + } +#ifdef LSE_ATOMICS + if (!lse_supported) + panic("CPU does not support LSE atomic instructions"); +#endif install_undef_handler(true, user_mrs_handler); } SYSINIT(idenrity_cpu, SI_SUB_SMP, SI_ORDER_ANY, identify_cpu_sysinit, NULL); static u_long parse_cpu_features_hwcap(u_int cpu) { u_long hwcap = 0; if (ID_AA64ISAR0_DP_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_DP_IMPL) hwcap |= HWCAP_ASIMDDP; if (ID_AA64ISAR0_SM4_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM4_IMPL) hwcap |= HWCAP_SM4; if (ID_AA64ISAR0_SM3_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_SM3_IMPL) hwcap |= HWCAP_SM3; if (ID_AA64ISAR0_RDM_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_RDM_IMPL) hwcap |= HWCAP_ASIMDRDM; if (ID_AA64ISAR0_Atomic_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_Atomic_IMPL) hwcap |= HWCAP_ATOMICS; if (ID_AA64ISAR0_CRC32_VAL(cpu_desc[cpu].id_aa64isar0) == ID_AA64ISAR0_CRC32_BASE) hwcap |= HWCAP_CRC32; switch (ID_AA64ISAR0_SHA2_VAL(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_SHA2_BASE: hwcap |= HWCAP_SHA2; break; case ID_AA64ISAR0_SHA2_512: hwcap |= HWCAP_SHA2 | HWCAP_SHA512; break; default: break; } if (ID_AA64ISAR0_SHA1_VAL(cpu_desc[cpu].id_aa64isar0)) hwcap |= HWCAP_SHA1; switch (ID_AA64ISAR0_AES_VAL(cpu_desc[cpu].id_aa64isar0)) { case ID_AA64ISAR0_AES_BASE: hwcap |= HWCAP_AES; break; case ID_AA64ISAR0_AES_PMULL: hwcap |= HWCAP_PMULL | HWCAP_AES; break; default: break; } if (ID_AA64ISAR1_LRCPC_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_LRCPC_IMPL) hwcap |= HWCAP_LRCPC; if (ID_AA64ISAR1_FCMA_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_FCMA_IMPL) hwcap |= HWCAP_FCMA; if (ID_AA64ISAR1_JSCVT_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_JSCVT_IMPL) hwcap |= HWCAP_JSCVT; if (ID_AA64ISAR1_DPB_VAL(cpu_desc[cpu].id_aa64isar1) == ID_AA64ISAR1_DPB_IMPL) hwcap |= HWCAP_DCPOP; if (ID_AA64PFR0_SVE_VAL(cpu_desc[cpu].id_aa64pfr0) == ID_AA64PFR0_SVE_IMPL) hwcap |= HWCAP_SVE; switch (ID_AA64PFR0_AdvSIMD_VAL(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_AdvSIMD_IMPL: hwcap |= HWCAP_ASIMD; break; case ID_AA64PFR0_AdvSIMD_HP: hwcap |= HWCAP_ASIMD | HWCAP_ASIMDDP; break; default: break; } switch (ID_AA64PFR0_FP_VAL(cpu_desc[cpu].id_aa64pfr0)) { case ID_AA64PFR0_FP_IMPL: hwcap |= HWCAP_FP; break; case ID_AA64PFR0_FP_HP: hwcap |= HWCAP_FP | HWCAP_FPHP; break; default: break; } return (hwcap); } static void print_id_register(struct sbuf *sb, const char *reg_name, uint64_t reg, struct mrs_field *fields) { struct mrs_field_value *fv; int field, i, j, printed; sbuf_printf(sb, "%29s = <", reg_name); #define SEP_STR ((printed++) == 0) ? "" : "," printed = 0; for (i = 0; fields[i].type != 0; i++) { fv = fields[i].values; /* TODO: Handle with an unknown message */ if (fv == NULL) continue; field = (reg & fields[i].mask) >> fields[i].shift; for (j = 0; fv[j].desc != NULL; j++) { if ((fv[j].value >> fields[i].shift) != field) continue; if (fv[j].desc[0] != '\0') sbuf_printf(sb, "%s%s", SEP_STR, fv[j].desc); break; } if (fv[j].desc == NULL) sbuf_printf(sb, "%sUnknown %s(%x)", SEP_STR, fields[i].name, field); reg &= ~(0xful << fields[i].shift); } if (reg != 0) sbuf_printf(sb, "%s%#lx", SEP_STR, reg); #undef SEP_STR sbuf_finish(sb); printf("%s>\n", sbuf_data(sb)); sbuf_clear(sb); } static void print_cpu_features(u_int cpu) { struct sbuf *sb; sb = sbuf_new_auto(); sbuf_printf(sb, "CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); sbuf_cat(sb, " affinity:"); switch(cpu_aff_levels) { default: case 4: sbuf_printf(sb, " %2d", CPU_AFF3(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 3: sbuf_printf(sb, " %2d", CPU_AFF2(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 2: sbuf_printf(sb, " %2d", CPU_AFF1(cpu_desc[cpu].mpidr)); /* FALLTHROUGH */ case 1: case 0: /* On UP this will be zero */ sbuf_printf(sb, " %2d", CPU_AFF0(cpu_desc[cpu].mpidr)); break; } sbuf_finish(sb); printf("%s\n", sbuf_data(sb)); sbuf_clear(sb); /* * There is a hardware errata where, if one CPU is performing a TLB * invalidation while another is performing a store-exclusive the * store-exclusive may return the wrong status. A workaround seems * to be to use an IPI to invalidate on each CPU, however given the * limited number of affected units (pass 1.1 is the evaluation * hardware revision), and the lack of information from Cavium * this has not been implemented. * * At the time of writing this the only information is from: * https://lkml.org/lkml/2016/8/4/722 */ /* * XXX: CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1 on its own also * triggers on pass 2.0+. */ if (cpu == 0 && CPU_VAR(PCPU_GET(midr)) == 0 && CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1) printf("WARNING: ThunderX Pass 1.1 detected.\nThis has known " "hardware bugs that may cause the incorrect operation of " "atomic operations.\n"); /* AArch64 Instruction Set Attribute Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) print_id_register(sb, "Instruction Set Attributes 0", cpu_desc[cpu].id_aa64isar0, id_aa64isar0_fields); /* AArch64 Instruction Set Attribute Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) print_id_register(sb, "Instruction Set Attributes 1", cpu_desc[cpu].id_aa64isar1, id_aa64isar1_fields); /* AArch64 Processor Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) print_id_register(sb, "Processor Features 0", cpu_desc[cpu].id_aa64pfr0, id_aa64pfr0_fields); /* AArch64 Processor Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) print_id_register(sb, "Processor Features 1", cpu_desc[cpu].id_aa64pfr1, id_aa64pfr1_fields); /* AArch64 Memory Model Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) print_id_register(sb, "Memory Model Features 0", cpu_desc[cpu].id_aa64mmfr0, id_aa64mmfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) print_id_register(sb, "Memory Model Features 1", cpu_desc[cpu].id_aa64mmfr1, id_aa64mmfr1_fields); /* AArch64 Memory Model Feature Register 2 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR2) != 0) print_id_register(sb, "Memory Model Features 2", cpu_desc[cpu].id_aa64mmfr2, id_aa64mmfr2_fields); /* AArch64 Debug Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) print_id_register(sb, "Debug Features 0", cpu_desc[cpu].id_aa64dfr0, id_aa64dfr0_fields); /* AArch64 Memory Model Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) print_id_register(sb, "Debug Features 1", cpu_desc[cpu].id_aa64dfr1, id_aa64dfr1_fields); /* AArch64 Auxiliary Feature Register 0 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) print_id_register(sb, "Auxiliary Features 0", cpu_desc[cpu].id_aa64afr0, id_aa64afr0_fields); /* AArch64 Auxiliary Feature Register 1 */ if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) print_id_register(sb, "Auxiliary Features 1", cpu_desc[cpu].id_aa64afr1, id_aa64afr1_fields); sbuf_delete(sb); sb = NULL; #undef SEP_STR } void identify_cpu(void) { u_int midr; u_int impl_id; u_int part_id; u_int cpu; size_t i; const struct cpu_parts *cpu_partsp = NULL; cpu = PCPU_GET(cpuid); midr = get_midr(); /* * Store midr to pcpu to allow fast reading * from EL0, EL1 and assembly code. */ PCPU_SET(midr, midr); impl_id = CPU_IMPL(midr); for (i = 0; i < nitems(cpu_implementers); i++) { if (impl_id == cpu_implementers[i].impl_id || cpu_implementers[i].impl_id == 0) { cpu_desc[cpu].cpu_impl = impl_id; cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name; cpu_partsp = cpu_implementers[i].cpu_parts; break; } } part_id = CPU_PART(midr); for (i = 0; &cpu_partsp[i] != NULL; i++) { if (part_id == cpu_partsp[i].part_id || cpu_partsp[i].part_id == 0) { cpu_desc[cpu].cpu_part_num = part_id; cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name; break; } } cpu_desc[cpu].cpu_revision = CPU_REV(midr); cpu_desc[cpu].cpu_variant = CPU_VAR(midr); snprintf(cpu_model, sizeof(cpu_model), "%s %s r%dp%d", cpu_desc[cpu].cpu_impl_name, cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant, cpu_desc[cpu].cpu_revision); /* Save affinity for current CPU */ cpu_desc[cpu].mpidr = get_mpidr(); CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK; cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1); cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1); cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1); cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1); cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1); cpu_desc[cpu].id_aa64mmfr2 = READ_SPECIALREG(id_aa64mmfr2_el1); cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1); cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1); if (cpu != 0) { /* * This code must run on one cpu at a time, but we are * not scheduling on the current core so implement a * simple spinlock. */ while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0) __asm __volatile("wfe" ::: "memory"); switch (cpu_aff_levels) { case 0: if (CPU_AFF0(cpu_desc[cpu].mpidr) != CPU_AFF0(cpu_desc[0].mpidr)) cpu_aff_levels = 1; /* FALLTHROUGH */ case 1: if (CPU_AFF1(cpu_desc[cpu].mpidr) != CPU_AFF1(cpu_desc[0].mpidr)) cpu_aff_levels = 2; /* FALLTHROUGH */ case 2: if (CPU_AFF2(cpu_desc[cpu].mpidr) != CPU_AFF2(cpu_desc[0].mpidr)) cpu_aff_levels = 3; /* FALLTHROUGH */ case 3: if (CPU_AFF3(cpu_desc[cpu].mpidr) != CPU_AFF3(cpu_desc[0].mpidr)) cpu_aff_levels = 4; break; } if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0) cpu_print_regs |= PRINT_ID_AA64_AFR0; if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1) cpu_print_regs |= PRINT_ID_AA64_AFR1; if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0) cpu_print_regs |= PRINT_ID_AA64_DFR0; if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1) cpu_print_regs |= PRINT_ID_AA64_DFR1; if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0) cpu_print_regs |= PRINT_ID_AA64_ISAR0; if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1) cpu_print_regs |= PRINT_ID_AA64_ISAR1; if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0) cpu_print_regs |= PRINT_ID_AA64_MMFR0; if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1) cpu_print_regs |= PRINT_ID_AA64_MMFR1; if (cpu_desc[cpu].id_aa64mmfr2 != cpu_desc[0].id_aa64mmfr2) cpu_print_regs |= PRINT_ID_AA64_MMFR2; if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0) cpu_print_regs |= PRINT_ID_AA64_PFR0; if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1) cpu_print_regs |= PRINT_ID_AA64_PFR1; /* Wake up the other CPUs */ atomic_store_rel_int(&ident_lock, 0); __asm __volatile("sev" ::: "memory"); } } Index: head/sys/arm64/include/atomic.h =================================================================== --- head/sys/arm64/include/atomic.h (revision 357459) +++ head/sys/arm64/include/atomic.h (revision 357460) @@ -1,563 +1,600 @@ /*- * Copyright (c) 2013 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ATOMIC_H_ #define _MACHINE_ATOMIC_H_ #define isb() __asm __volatile("isb" : : : "memory") /* * Options for DMB and DSB: * oshld Outer Shareable, load * oshst Outer Shareable, store * osh Outer Shareable, all * nshld Non-shareable, load * nshst Non-shareable, store * nsh Non-shareable, all * ishld Inner Shareable, load * ishst Inner Shareable, store * ish Inner Shareable, all * ld Full system, load * st Full system, store * sy Full system, all */ #define dsb(opt) __asm __volatile("dsb " __STRING(opt) : : : "memory") #define dmb(opt) __asm __volatile("dmb " __STRING(opt) : : : "memory") #define mb() dmb(sy) /* Full system memory barrier all */ #define wmb() dmb(st) /* Full system memory barrier store */ #define rmb() dmb(ld) /* Full system memory barrier load */ #if defined(KCSAN) && !defined(KCSAN_RUNTIME) #include #else #include +#ifdef _KERNEL +extern bool lse_supported; + +#ifdef LSE_ATOMICS +#define _ATOMIC_LSE_SUPPORTED 1 +#else +#define _ATOMIC_LSE_SUPPORTED lse_supported +#endif +#else +#define _ATOMIC_LSE_SUPPORTED 0 +#endif + #define _ATOMIC_OP_PROTO(t, op, bar, flav) \ static __inline void \ atomic_##op##_##bar##t##flav(volatile uint##t##_t *p, uint##t##_t val) #define _ATOMIC_OP_IMPL(t, w, s, op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \ _ATOMIC_OP_PROTO(t, op, bar, _llsc) \ { \ uint##t##_t tmp; \ int res; \ \ pre; \ __asm __volatile( \ "1: ld"#a"xr"#s" %"#w"0, [%2]\n" \ " "#llsc_asm_op" %"#w"0, %"#w"0, %"#w"3\n" \ " st"#l"xr"#s" %w1, %"#w"0, [%2]\n" \ " cbnz %w1, 1b\n" \ : "=&r"(tmp), "=&r"(res) \ : "r" (p), "r" (val) \ : "memory" \ ); \ } \ \ _ATOMIC_OP_PROTO(t, op, bar, _lse) \ { \ uint##t##_t tmp; \ \ pre; \ __asm __volatile( \ ".arch_extension lse\n" \ "ld"#lse_asm_op#a#l#s" %"#w"2, %"#w"0, [%1]\n" \ ".arch_extension nolse\n" \ : "=r" (tmp) \ : "r" (p), "r" (val) \ : "memory" \ ); \ } \ \ _ATOMIC_OP_PROTO(t, op, bar, ) \ { \ - atomic_##op##_##bar##t##_llsc(p, val); \ + if (_ATOMIC_LSE_SUPPORTED) \ + atomic_##op##_##bar##t##_lse(p, val); \ + else \ + atomic_##op##_##bar##t##_llsc(p, val); \ } #define __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, bar, a, l) \ _ATOMIC_OP_IMPL(8, w, b, op, llsc_asm_op, lse_asm_op, pre, \ bar, a, l) \ _ATOMIC_OP_IMPL(16, w, h, op, llsc_asm_op, lse_asm_op, pre, \ bar, a, l) \ _ATOMIC_OP_IMPL(32, w, , op, llsc_asm_op, lse_asm_op, pre, \ bar, a, l) \ _ATOMIC_OP_IMPL(64, , , op, llsc_asm_op, lse_asm_op, pre, \ bar, a, l) #define _ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre) \ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, , , ) \ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, acq_, a, ) \ __ATOMIC_OP(op, llsc_asm_op, lse_asm_op, pre, rel_, , l) _ATOMIC_OP(add, add, add, ) _ATOMIC_OP(clear, bic, clr, ) _ATOMIC_OP(set, orr, set, ) _ATOMIC_OP(subtract, add, add, val = -val) #define _ATOMIC_CMPSET_PROTO(t, bar, flav) \ static __inline int \ atomic_cmpset_##bar##t##flav(volatile uint##t##_t *p, \ uint##t##_t cmpval, uint##t##_t newval) #define _ATOMIC_FCMPSET_PROTO(t, bar, flav) \ static __inline int \ atomic_fcmpset_##bar##t##flav(volatile uint##t##_t *p, \ uint##t##_t *cmpval, uint##t##_t newval) #define _ATOMIC_CMPSET_IMPL(t, w, s, bar, a, l) \ _ATOMIC_CMPSET_PROTO(t, bar, _llsc) \ { \ uint##t##_t tmp; \ int res; \ \ __asm __volatile( \ "1: mov %w1, #1\n" \ " ld"#a"xr"#s" %"#w"0, [%2]\n" \ " cmp %"#w"0, %"#w"3\n" \ " b.ne 2f\n" \ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \ " cbnz %w1, 1b\n" \ "2:" \ : "=&r"(tmp), "=&r"(res) \ : "r" (p), "r" (cmpval), "r" (newval) \ : "cc", "memory" \ ); \ \ return (!res); \ } \ \ _ATOMIC_CMPSET_PROTO(t, bar, _lse) \ { \ uint##t##_t oldval; \ int res; \ \ oldval = cmpval; \ __asm __volatile( \ ".arch_extension lse\n" \ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \ "cmp %"#w"1, %"#w"2\n" \ "cset %w0, eq\n" \ ".arch_extension nolse\n" \ : "=r" (res), "+&r" (cmpval) \ : "r" (oldval), "r" (p), "r" (newval) \ : "cc", "memory" \ ); \ \ return (res); \ } \ \ _ATOMIC_CMPSET_PROTO(t, bar, ) \ { \ - return (atomic_cmpset_##bar##t##_llsc(p, cmpval, newval)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_cmpset_##bar##t##_lse(p, cmpval, \ + newval)); \ + else \ + return (atomic_cmpset_##bar##t##_llsc(p, cmpval, \ + newval)); \ } \ \ _ATOMIC_FCMPSET_PROTO(t, bar, _llsc) \ { \ uint##t##_t _cmpval, tmp; \ int res; \ \ _cmpval = *cmpval; \ __asm __volatile( \ " mov %w1, #1\n" \ " ld"#a"xr"#s" %"#w"0, [%2]\n" \ " cmp %"#w"0, %"#w"3\n" \ " b.ne 1f\n" \ " st"#l"xr"#s" %w1, %"#w"4, [%2]\n" \ "1:" \ : "=&r"(tmp), "=&r"(res) \ : "r" (p), "r" (_cmpval), "r" (newval) \ : "cc", "memory" \ ); \ *cmpval = tmp; \ \ return (!res); \ } \ \ _ATOMIC_FCMPSET_PROTO(t, bar, _lse) \ { \ uint##t##_t _cmpval, tmp; \ int res; \ \ _cmpval = tmp = *cmpval; \ __asm __volatile( \ ".arch_extension lse\n" \ "cas"#a#l#s" %"#w"1, %"#w"4, [%3]\n" \ "cmp %"#w"1, %"#w"2\n" \ "cset %w0, eq\n" \ ".arch_extension nolse\n" \ : "=r" (res), "+&r" (tmp) \ : "r" (_cmpval), "r" (p), "r" (newval) \ : "cc", "memory" \ ); \ *cmpval = tmp; \ \ return (res); \ } \ \ _ATOMIC_FCMPSET_PROTO(t, bar, ) \ { \ - return (atomic_fcmpset_##bar##t##_llsc(p, cmpval, newval)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_fcmpset_##bar##t##_lse(p, cmpval, \ + newval)); \ + else \ + return (atomic_fcmpset_##bar##t##_llsc(p, cmpval, \ + newval)); \ } #define _ATOMIC_CMPSET(bar, a, l) \ _ATOMIC_CMPSET_IMPL(8, w, b, bar, a, l) \ _ATOMIC_CMPSET_IMPL(16, w, h, bar, a, l) \ _ATOMIC_CMPSET_IMPL(32, w, , bar, a, l) \ _ATOMIC_CMPSET_IMPL(64, , , bar, a, l) _ATOMIC_CMPSET( , , ) _ATOMIC_CMPSET(acq_, a, ) _ATOMIC_CMPSET(rel_, ,l) #define _ATOMIC_FETCHADD_PROTO(t, flav) \ static __inline uint##t##_t \ atomic_fetchadd_##t##flav(volatile uint##t##_t *p, uint##t##_t val) #define _ATOMIC_FETCHADD_IMPL(t, w) \ _ATOMIC_FETCHADD_PROTO(t, _llsc) \ { \ uint##t##_t ret, tmp; \ int res; \ \ __asm __volatile( \ "1: ldxr %"#w"2, [%3]\n" \ " add %"#w"0, %"#w"2, %"#w"4\n" \ " stxr %w1, %"#w"0, [%3]\n" \ " cbnz %w1, 1b\n" \ : "=&r" (tmp), "=&r" (res), "=&r" (ret) \ : "r" (p), "r" (val) \ : "memory" \ ); \ \ return (ret); \ } \ \ _ATOMIC_FETCHADD_PROTO(t, _lse) \ { \ uint##t##_t ret; \ \ __asm __volatile( \ ".arch_extension lse\n" \ "ldadd %"#w"2, %"#w"0, [%1]\n" \ ".arch_extension nolse\n" \ : "=r" (ret) \ : "r" (p), "r" (val) \ : "memory" \ ); \ \ return (ret); \ } \ \ _ATOMIC_FETCHADD_PROTO(t, ) \ { \ - return (atomic_fetchadd_##t##_llsc(p, val)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_fetchadd_##t##_lse(p, val)); \ + else \ + return (atomic_fetchadd_##t##_llsc(p, val)); \ } _ATOMIC_FETCHADD_IMPL(32, w) _ATOMIC_FETCHADD_IMPL(64, ) #define _ATOMIC_SWAP_PROTO(t, flav) \ static __inline uint##t##_t \ atomic_swap_##t##flav(volatile uint##t##_t *p, uint##t##_t val) #define _ATOMIC_READANDCLEAR_PROTO(t, flav) \ static __inline uint##t##_t \ atomic_readandclear_##t##flav(volatile uint##t##_t *p) #define _ATOMIC_SWAP_IMPL(t, w, zreg) \ _ATOMIC_SWAP_PROTO(t, _llsc) \ { \ uint##t##_t ret; \ int res; \ \ __asm __volatile( \ "1: ldxr %"#w"1, [%2]\n" \ " stxr %w0, %"#w"3, [%2]\n" \ " cbnz %w0, 1b\n" \ : "=&r" (res), "=&r" (ret) \ : "r" (p), "r" (val) \ : "memory" \ ); \ \ return (ret); \ } \ \ _ATOMIC_SWAP_PROTO(t, _lse) \ { \ uint##t##_t ret; \ \ __asm __volatile( \ ".arch_extension lse\n" \ "swp %"#w"2, %"#w"0, [%1]\n" \ ".arch_extension nolse\n" \ : "=r" (ret) \ : "r" (p), "r" (val) \ : "memory" \ ); \ \ return (ret); \ } \ \ _ATOMIC_SWAP_PROTO(t, ) \ { \ - return (atomic_swap_##t##_llsc(p, val)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_swap_##t##_lse(p, val)); \ + else \ + return (atomic_swap_##t##_llsc(p, val)); \ } \ \ _ATOMIC_READANDCLEAR_PROTO(t, _llsc) \ { \ uint##t##_t ret; \ int res; \ \ __asm __volatile( \ "1: ldxr %"#w"1, [%2]\n" \ " stxr %w0, "#zreg", [%2]\n" \ " cbnz %w0, 1b\n" \ : "=&r" (res), "=&r" (ret) \ : "r" (p) \ : "memory" \ ); \ \ return (ret); \ } \ \ _ATOMIC_READANDCLEAR_PROTO(t, _lse) \ { \ return (atomic_swap_##t##_lse(p, 0)); \ } \ \ _ATOMIC_READANDCLEAR_PROTO(t, ) \ { \ - return (atomic_readandclear_##t##_llsc(p)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_readandclear_##t##_lse(p)); \ + else \ + return (atomic_readandclear_##t##_llsc(p)); \ } _ATOMIC_SWAP_IMPL(32, w, wzr) _ATOMIC_SWAP_IMPL(64, , xzr) #define _ATOMIC_TEST_OP_PROTO(t, op, flav) \ static __inline int \ atomic_testand##op##_##t##flav(volatile uint##t##_t *p, u_int val) #define _ATOMIC_TEST_OP_IMPL(t, w, op, llsc_asm_op, lse_asm_op) \ _ATOMIC_TEST_OP_PROTO(t, op, _llsc) \ { \ uint##t##_t mask, old, tmp; \ int res; \ \ mask = 1u << (val & 0x1f); \ __asm __volatile( \ "1: ldxr %"#w"2, [%3]\n" \ " "#llsc_asm_op" %"#w"0, %"#w"2, %"#w"4\n" \ " stxr %w1, %"#w"0, [%3]\n" \ " cbnz %w1, 1b\n" \ : "=&r" (tmp), "=&r" (res), "=&r" (old) \ : "r" (p), "r" (mask) \ : "memory" \ ); \ \ return ((old & mask) != 0); \ } \ \ _ATOMIC_TEST_OP_PROTO(t, op, _lse) \ { \ uint##t##_t mask, old; \ \ mask = 1u << (val & 0x1f); \ __asm __volatile( \ ".arch_extension lse\n" \ "ld"#lse_asm_op" %"#w"2, %"#w"0, [%1]\n" \ ".arch_extension nolse\n" \ : "=r" (old) \ : "r" (p), "r" (mask) \ : "memory" \ ); \ \ return ((old & mask) != 0); \ } \ \ _ATOMIC_TEST_OP_PROTO(t, op, ) \ { \ - return (atomic_testand##op##_##t##_llsc(p, val)); \ + if (_ATOMIC_LSE_SUPPORTED) \ + return (atomic_testand##op##_##t##_lse(p, val)); \ + else \ + return (atomic_testand##op##_##t##_llsc(p, val)); \ } #define _ATOMIC_TEST_OP(op, llsc_asm_op, lse_asm_op) \ _ATOMIC_TEST_OP_IMPL(32, w, op, llsc_asm_op, lse_asm_op) \ _ATOMIC_TEST_OP_IMPL(64, , op, llsc_asm_op, lse_asm_op) _ATOMIC_TEST_OP(clear, bic, clr) _ATOMIC_TEST_OP(set, orr, set) #define _ATOMIC_LOAD_ACQ_IMPL(t, w, s) \ static __inline uint##t##_t \ atomic_load_acq_##t(volatile uint##t##_t *p) \ { \ uint##t##_t ret; \ \ __asm __volatile( \ "ldar"#s" %"#w"0, [%1]\n" \ : "=&r" (ret) \ : "r" (p) \ : "memory"); \ \ return (ret); \ } _ATOMIC_LOAD_ACQ_IMPL(8, w, b) _ATOMIC_LOAD_ACQ_IMPL(16, w, h) _ATOMIC_LOAD_ACQ_IMPL(32, w, ) _ATOMIC_LOAD_ACQ_IMPL(64, , ) #define _ATOMIC_STORE_REL_IMPL(t, w, s) \ static __inline void \ atomic_store_rel_##t(volatile uint##t##_t *p, uint##t##_t val) \ { \ __asm __volatile( \ "stlr"#s" %"#w"0, [%1]\n" \ : \ : "r" (val), "r" (p) \ : "memory"); \ } _ATOMIC_STORE_REL_IMPL(8, w, b) _ATOMIC_STORE_REL_IMPL(16, w, h) _ATOMIC_STORE_REL_IMPL(32, w, ) _ATOMIC_STORE_REL_IMPL(64, , ) #define atomic_add_int atomic_add_32 #define atomic_fcmpset_int atomic_fcmpset_32 #define atomic_clear_int atomic_clear_32 #define atomic_cmpset_int atomic_cmpset_32 #define atomic_fetchadd_int atomic_fetchadd_32 #define atomic_readandclear_int atomic_readandclear_32 #define atomic_set_int atomic_set_32 #define atomic_swap_int atomic_swap_32 #define atomic_subtract_int atomic_subtract_32 #define atomic_testandclear_int atomic_testandclear_32 #define atomic_testandset_int atomic_testandset_32 #define atomic_add_acq_int atomic_add_acq_32 #define atomic_fcmpset_acq_int atomic_fcmpset_acq_32 #define atomic_clear_acq_int atomic_clear_acq_32 #define atomic_cmpset_acq_int atomic_cmpset_acq_32 #define atomic_load_acq_int atomic_load_acq_32 #define atomic_set_acq_int atomic_set_acq_32 #define atomic_subtract_acq_int atomic_subtract_acq_32 #define atomic_add_rel_int atomic_add_rel_32 #define atomic_fcmpset_rel_int atomic_fcmpset_rel_32 #define atomic_clear_rel_int atomic_clear_rel_32 #define atomic_cmpset_rel_int atomic_cmpset_rel_32 #define atomic_set_rel_int atomic_set_rel_32 #define atomic_subtract_rel_int atomic_subtract_rel_32 #define atomic_store_rel_int atomic_store_rel_32 #define atomic_add_long atomic_add_64 #define atomic_fcmpset_long atomic_fcmpset_64 #define atomic_clear_long atomic_clear_64 #define atomic_cmpset_long atomic_cmpset_64 #define atomic_fetchadd_long atomic_fetchadd_64 #define atomic_readandclear_long atomic_readandclear_64 #define atomic_set_long atomic_set_64 #define atomic_swap_long atomic_swap_64 #define atomic_subtract_long atomic_subtract_64 #define atomic_testandclear_long atomic_testandclear_64 #define atomic_testandset_long atomic_testandset_64 #define atomic_add_ptr atomic_add_64 #define atomic_fcmpset_ptr atomic_fcmpset_64 #define atomic_clear_ptr atomic_clear_64 #define atomic_cmpset_ptr atomic_cmpset_64 #define atomic_fetchadd_ptr atomic_fetchadd_64 #define atomic_readandclear_ptr atomic_readandclear_64 #define atomic_set_ptr atomic_set_64 #define atomic_swap_ptr atomic_swap_64 #define atomic_subtract_ptr atomic_subtract_64 #define atomic_add_acq_long atomic_add_acq_64 #define atomic_fcmpset_acq_long atomic_fcmpset_acq_64 #define atomic_clear_acq_long atomic_clear_acq_64 #define atomic_cmpset_acq_long atomic_cmpset_acq_64 #define atomic_load_acq_long atomic_load_acq_64 #define atomic_set_acq_long atomic_set_acq_64 #define atomic_subtract_acq_long atomic_subtract_acq_64 #define atomic_add_acq_ptr atomic_add_acq_64 #define atomic_fcmpset_acq_ptr atomic_fcmpset_acq_64 #define atomic_clear_acq_ptr atomic_clear_acq_64 #define atomic_cmpset_acq_ptr atomic_cmpset_acq_64 #define atomic_load_acq_ptr atomic_load_acq_64 #define atomic_set_acq_ptr atomic_set_acq_64 #define atomic_subtract_acq_ptr atomic_subtract_acq_64 #define atomic_add_rel_long atomic_add_rel_64 #define atomic_fcmpset_rel_long atomic_fcmpset_rel_64 #define atomic_clear_rel_long atomic_clear_rel_64 #define atomic_cmpset_rel_long atomic_cmpset_rel_64 #define atomic_set_rel_long atomic_set_rel_64 #define atomic_subtract_rel_long atomic_subtract_rel_64 #define atomic_store_rel_long atomic_store_rel_64 #define atomic_add_rel_ptr atomic_add_rel_64 #define atomic_fcmpset_rel_ptr atomic_fcmpset_rel_64 #define atomic_clear_rel_ptr atomic_clear_rel_64 #define atomic_cmpset_rel_ptr atomic_cmpset_rel_64 #define atomic_set_rel_ptr atomic_set_rel_64 #define atomic_subtract_rel_ptr atomic_subtract_rel_64 #define atomic_store_rel_ptr atomic_store_rel_64 static __inline void atomic_thread_fence_acq(void) { dmb(ld); } static __inline void atomic_thread_fence_rel(void) { dmb(sy); } static __inline void atomic_thread_fence_acq_rel(void) { dmb(sy); } static __inline void atomic_thread_fence_seq_cst(void) { dmb(sy); } #endif /* KCSAN && !KCSAN_RUNTIME */ #endif /* _MACHINE_ATOMIC_H_ */ Index: head/sys/conf/options.arm64 =================================================================== --- head/sys/conf/options.arm64 (revision 357459) +++ head/sys/conf/options.arm64 (revision 357460) @@ -1,29 +1,30 @@ # $FreeBSD$ ARM64 opt_global.h INTRNG opt_global.h SOCDEV_PA opt_global.h SOCDEV_VA opt_global.h THUNDERX_PASS_1_1_ERRATA opt_global.h VFP opt_global.h LINUX_BOOT_ABI opt_global.h +LSE_ATOMICS opt_global.h # Binary compatibility COMPAT_FREEBSD32 opt_global.h # EFI Runtime services support EFIRT opt_efirt.h # SoC Support SOC_ALLWINNER_A64 opt_soc.h SOC_ALLWINNER_H5 opt_soc.h SOC_ALLWINNER_H6 opt_soc.h SOC_BRCM_BCM2837 opt_soc.h SOC_BRCM_BCM2838 opt_soc.h SOC_CAVM_THUNDERX opt_soc.h SOC_HISI_HI6220 opt_soc.h SOC_INTEL_STRATIX10 opt_soc.h SOC_MARVELL_8K opt_soc.h SOC_ROCKCHIP_RK3328 opt_soc.h SOC_ROCKCHIP_RK3399 opt_soc.h SOC_XILINX_ZYNQ opt_soc.h