Index: sys/amd64/vmm/io/iommu.c =================================================================== --- sys/amd64/vmm/io/iommu.c +++ sys/amd64/vmm/io/iommu.c @@ -184,7 +184,7 @@ if (vmm_is_intel()) ops = &iommu_ops_intel; - else if (vmm_is_amd()) + else if (vmm_is_amd() || vmm_is_hygon()) ops = &iommu_ops_amd; else ops = NULL; Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c +++ sys/amd64/vmm/vmm.c @@ -347,7 +347,7 @@ if (vmm_is_intel()) ops = &vmm_ops_intel; - else if (vmm_is_amd()) + else if (vmm_is_amd() || vmm_is_hygon()) ops = &vmm_ops_amd; else return (ENXIO); Index: sys/amd64/vmm/vmm_stat.c =================================================================== --- sys/amd64/vmm/vmm_stat.c +++ sys/amd64/vmm/vmm_stat.c @@ -67,7 +67,7 @@ if (vst->scope == VMM_STAT_SCOPE_INTEL && !vmm_is_intel()) return; - if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd()) + if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd() && !vmm_is_hygon()) return; if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) { Index: sys/amd64/vmm/vmm_util.h =================================================================== --- sys/amd64/vmm/vmm_util.h +++ sys/amd64/vmm/vmm_util.h @@ -35,6 +35,7 @@ bool vmm_is_intel(void); bool vmm_is_amd(void); +bool vmm_is_hygon(void); bool vmm_supports_1G_pages(void); void dump_trapframe(struct trapframe *tf); Index: sys/amd64/vmm/vmm_util.c =================================================================== --- sys/amd64/vmm/vmm_util.c +++ sys/amd64/vmm/vmm_util.c @@ -51,6 +51,12 @@ return (strcmp(cpu_vendor, "AuthenticAMD") == 0); } +bool +vmm_is_hygon(void) +{ + return (strcmp(cpu_vendor, "HygonGenuine") == 0); +} + bool vmm_supports_1G_pages(void) { Index: sys/amd64/vmm/x86.c =================================================================== --- sys/amd64/vmm/x86.c +++ sys/amd64/vmm/x86.c @@ -135,7 +135,7 @@ break; case CPUID_8000_0008: cpuid_count(*eax, *ecx, regs); - if (vmm_is_amd()) { + if (vmm_is_amd() || vmm_is_hygon()) { /* * As on Intel (0000_0007:0, EDX), mask out * unsupported or unsafe AMD extended features @@ -234,7 +234,7 @@ case CPUID_8000_001D: /* AMD Cache topology, like 0000_0004 for Intel. */ - if (!vmm_is_amd()) + if (!vmm_is_amd() && !vmm_is_hygon()) goto default_leaf; /* @@ -276,8 +276,9 @@ break; case CPUID_8000_001E: - /* AMD Family 16h+ additional identifiers */ - if (!vmm_is_amd() || CPUID_TO_FAMILY(cpu_id) < 0x16) + /* AMD Family 16h+ and Hygon Family 18h additional identifiers */ + if ((!vmm_is_amd() || CPUID_TO_FAMILY(cpu_id) < 0x16) && + !vmm_is_hygon()) goto default_leaf; vm_get_topology(vm, &sockets, &cores, &threads,