Index: head/sys/amd64/vmm/io/iommu.c =================================================================== --- head/sys/amd64/vmm/io/iommu.c +++ head/sys/amd64/vmm/io/iommu.c @@ -184,7 +184,7 @@ if (vmm_is_intel()) ops = &iommu_ops_intel; - else if (vmm_is_amd()) + else if (vmm_is_svm()) ops = &iommu_ops_amd; else ops = NULL; Index: head/sys/amd64/vmm/vmm.c =================================================================== --- head/sys/amd64/vmm/vmm.c +++ head/sys/amd64/vmm/vmm.c @@ -347,7 +347,7 @@ if (vmm_is_intel()) ops = &vmm_ops_intel; - else if (vmm_is_amd()) + else if (vmm_is_svm()) ops = &vmm_ops_amd; else return (ENXIO); Index: head/sys/amd64/vmm/vmm_stat.c =================================================================== --- head/sys/amd64/vmm/vmm_stat.c +++ head/sys/amd64/vmm/vmm_stat.c @@ -67,7 +67,7 @@ if (vst->scope == VMM_STAT_SCOPE_INTEL && !vmm_is_intel()) return; - if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd()) + if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_svm()) return; if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) { Index: head/sys/amd64/vmm/vmm_util.h =================================================================== --- head/sys/amd64/vmm/vmm_util.h +++ head/sys/amd64/vmm/vmm_util.h @@ -34,7 +34,7 @@ struct trapframe; bool vmm_is_intel(void); -bool vmm_is_amd(void); +bool vmm_is_svm(void); bool vmm_supports_1G_pages(void); void dump_trapframe(struct trapframe *tf); Index: head/sys/amd64/vmm/vmm_util.c =================================================================== --- head/sys/amd64/vmm/vmm_util.c +++ head/sys/amd64/vmm/vmm_util.c @@ -46,9 +46,10 @@ } bool -vmm_is_amd(void) +vmm_is_svm(void) { - return (strcmp(cpu_vendor, "AuthenticAMD") == 0); + return (strcmp(cpu_vendor, "AuthenticAMD") == 0 || + strcmp(cpu_vendor, "HygonGenuine") == 0); } bool Index: head/sys/amd64/vmm/x86.c =================================================================== --- head/sys/amd64/vmm/x86.c +++ head/sys/amd64/vmm/x86.c @@ -135,7 +135,7 @@ break; case CPUID_8000_0008: cpuid_count(*eax, *ecx, regs); - if (vmm_is_amd()) { + if (vmm_is_svm()) { /* * As on Intel (0000_0007:0, EDX), mask out * unsupported or unsafe AMD extended features @@ -234,7 +234,7 @@ case CPUID_8000_001D: /* AMD Cache topology, like 0000_0004 for Intel. */ - if (!vmm_is_amd()) + if (!vmm_is_svm()) goto default_leaf; /* @@ -276,8 +276,11 @@ break; case CPUID_8000_001E: - /* AMD Family 16h+ additional identifiers */ - if (!vmm_is_amd() || CPUID_TO_FAMILY(cpu_id) < 0x16) + /* + * AMD Family 16h+ and Hygon Family 18h additional + * identifiers. + */ + if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) goto default_leaf; vm_get_topology(vm, &sockets, &cores, &threads,