Index: sys/amd64/amd64/elf_machdep.c =================================================================== --- sys/amd64/amd64/elf_machdep.c +++ sys/amd64/amd64/elf_machdep.c @@ -84,6 +84,20 @@ }; INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec); +static void +elf64_freebsd_sysentvec_fixup(struct sysentvec *sv) +{ + if (hw_lower_amd64_sharedpage != 0) { + sv->sv_shared_page_base = SHAREDPAGE - PAGE_SIZE; + sv->sv_usrstack = USRSTACK - PAGE_SIZE; + sv->sv_psstrings = PS_STRINGS - PAGE_SIZE; + } +} + +SYSINIT(elf64_sysvec_fixup, SI_SUB_EXEC, SI_ORDER_FIRST, + (sysinit_cfunc_t) elf64_freebsd_sysentvec_fixup, + &elf64_freebsd_sysvec); + static Elf64_Brandinfo freebsd_brand_info = { .brand = ELFOSABI_FREEBSD, .machine = EM_X86_64, @@ -96,7 +110,7 @@ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE }; -SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, +SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_SECOND, (sysinit_cfunc_t) elf64_insert_brand_entry, &freebsd_brand_info); Index: sys/amd64/amd64/initcpu.c =================================================================== --- sys/amd64/amd64/initcpu.c +++ sys/amd64/amd64/initcpu.c @@ -48,6 +48,11 @@ static int hw_instruction_sse; SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); +static int lower_sharedpage_init; +int hw_lower_amd64_sharedpage; +SYSCTL_INT(_hw, OID_AUTO, lower_amd64_sharedpage, CTLFLAG_RDTUN, + &hw_lower_amd64_sharedpage, 0, + "Lower sharedpage to work around Ryzen issue with executing code near the top of user memory"); /* * -1: automatic (default) * 0: keep enable CLFLUSH @@ -120,6 +125,21 @@ msr = rdmsr(0xc0011020); msr |= (uint64_t)1 << 15; wrmsr(0xc0011020, msr); + } + } + + /* + * Work around a problem on Ryzen that is triggered by executing + * code near the top of user memory, in our case the signal + * trampoline code in the shared page on amd64. + * + * This is executed once before tunables take effect so it can + * be overridden, and again as each AP is spun up and after resume. + */ + if (lower_sharedpage_init == 0) { + lower_sharedpage_init = 1; + if (CPUID_TO_FAMILY(cpu_id) == 0x17) { + hw_lower_amd64_sharedpage = 1; } } } Index: sys/amd64/include/md_var.h =================================================================== --- sys/amd64/include/md_var.h +++ sys/amd64/include/md_var.h @@ -34,7 +34,8 @@ #include -extern uint64_t *vm_page_dump; +extern uint64_t *vm_page_dump; +extern int hw_lower_amd64_sharedpage; struct savefpu; Index: sys/amd64/linux/linux_sysvec.c =================================================================== --- sys/amd64/linux/linux_sysvec.c +++ sys/amd64/linux/linux_sysvec.c @@ -844,14 +844,17 @@ linux_shared_page_obj = __elfN(linux_shared_page_init) (&linux_shared_page_mapping); - __elfN(linux_vdso_reloc)(&elf_linux_sysvec, SHAREDPAGE); + __elfN(linux_vdso_reloc)(&elf_linux_sysvec, + (hw_lower_amd64_sharedpage != 0) ? (SHAREDPAGE - PAGE_SIZE) : + SHAREDPAGE); bcopy(elf_linux_sysvec.sv_sigcode, linux_shared_page_mapping, linux_szsigcode); elf_linux_sysvec.sv_shared_page_obj = linux_shared_page_obj; linux_kplatform = linux_shared_page_mapping + - (linux_platform - (caddr_t)SHAREDPAGE); + (linux_platform - (caddr_t)((hw_lower_amd64_sharedpage != 0) ? + (SHAREDPAGE - PAGE_SIZE) : SHAREDPAGE)); } SYSINIT(elf_linux_vdso_init, SI_SUB_EXEC, SI_ORDER_ANY, (sysinit_cfunc_t)linux_vdso_install, NULL); @@ -953,6 +956,12 @@ switch(type) { case MOD_LOAD: + if (hw_lower_amd64_sharedpage != 0) { + elf_linux_sysvec.sv_usrstack = USRSTACK - PAGE_SIZE; + elf_linux_sysvec.sv_psstrings = PS_STRINGS - PAGE_SIZE; + elf_linux_sysvec.sv_shared_page_base = SHAREDPAGE - + PAGE_SIZE; + } for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL; ++brandinfo) if (elf64_insert_brand_entry(*brandinfo) < 0)