diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c --- a/sys/amd64/amd64/initcpu.c +++ b/sys/amd64/amd64/initcpu.c @@ -196,6 +196,23 @@ } } +void init_amd_sem(void* dummy) +{ + uint64_t msr; + + /* FIXME: should prob check how much mem is in the current system. */ + if (amd_encrypted_memory & AMD_SEM_SUPPORTED) { + /* pte bit to be or'd to use ecnryption */ + // FIXME: every core writes this field at the same time?! + pg_sem_c = 1 << (amd_encrypted_memory2 & AMD_SEM_CBIT_MASK); + + /* enable the c bit for ptes. */ + msr = rdmsr(MSR_SYSCFG); + msr |= 1 << 23; + wrmsr(MSR_SYSCFG, msr); + } +} + /* * Initialize special VIA features */ diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -172,6 +172,19 @@ char *xfpustate, size_t xfpustate_len); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); + + +static void cpu_amd_sem_init(void* dummy) +{ + printf("********* amd_sem_init\n"); + + /* enable SEM on all cores at the same time. */ + smp_rendezvous(NULL, init_amd_sem, NULL, NULL); +} + +SYSINIT(amdsem, SI_SUB_SMP, SI_ORDER_EIGHTH, cpu_amd_sem_init, NULL); + + /* Preload data parse function */ static caddr_t native_parse_preload_data(u_int64_t); diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -367,6 +367,9 @@ vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; pt_entry_t pg_nx; +/* the c-bit to mark pages as encrypted. */ +pt_entry_t pg_sem_c; + static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); /* Unused, kept for ABI stability on the stable branch. */ @@ -5211,6 +5214,11 @@ pml4e = pmap_pml4e(pmap, sva); if ((*pml4e & PG_V) == 0) { + /* if the PML4 entry for the page is not present + * then we can advance by however many pages are + * covered by this entry (lots!) instead of doing + * it one leaf page at a time. + */ va_next = (sva + NBPML4) & ~PML4MASK; if (va_next < sva) va_next = eva; @@ -5219,6 +5227,9 @@ pdpe = pmap_pml4e_to_pdpe(pml4e, sva); if ((*pdpe & PG_V) == 0) { + /* same here: if PDPE says the page is not present + * then just skip forward. + */ va_next = (sva + NBPDP) & ~PDPMASK; if (va_next < sva) va_next = eva; @@ -5782,7 +5793,8 @@ if ((flags & PMAP_ENTER_WIRED) != 0) newpte |= PG_W; if (va < VM_MAXUSER_ADDRESS) - newpte |= PG_U; + /* as a first test, encrypt all user mode pages */ + newpte |= PG_U | pg_sem_c; if (pmap == kernel_pmap) newpte |= PG_G; newpte |= pmap_cache_bits(pmap, m->md.pat_mode, psind > 0); @@ -6344,7 +6356,8 @@ if ((prot & VM_PROT_EXECUTE) == 0) newpte |= pg_nx; if (va < VM_MAXUSER_ADDRESS) - newpte |= PG_U | pmap_pkru_get(pmap, va); + /* as a first test, encrypt all user mode pages */ + newpte |= PG_U | pg_sem_c | pmap_pkru_get(pmap, va); pte_store(pte, newpte); return (mpte); } @@ -6444,6 +6457,7 @@ pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg)); pde = &pde[pmap_pde_index(addr)]; if ((*pde & PG_V) == 0) { + /* FIXME: dont know if i want pg_sem_c here */ pde_store(pde, pa | PG_PS | PG_M | PG_A | PG_U | PG_RW | PG_V); pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE); @@ -10204,6 +10218,7 @@ break; } + /* FIXME: marker for jo, to add c bit debug */ sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %s %d %d %d\n", range->sva, eva, (range->attrs & X86_PG_RW) != 0 ? 'w' : '-', diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -117,16 +117,25 @@ #define EPT_PG_EMUL_V X86_PG_AVAIL(52) #define EPT_PG_EMUL_RW X86_PG_AVAIL(53) #define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */ -#define PG_FRAME (0x000ffffffffff000ul) -#define PG_PS_FRAME (0x000fffffffe00000ul) -#define PG_PS_PDP_FRAME (0x000fffffc0000000ul) + +// FIXME: for amd sem we need to trim this bitmask! right now it covers the full address bit range, all the way up to bit 51 +// but with sem, the actual c bit and address width reduction is dynamic, so we'd have to make a guess here. +// address width reduction is usually(???) 5 bits, so 52-5=47 -> 46th bit is the last address bit and bit 47 is the c bit. +//#define PG_FRAME (0x000ffffffffff000ul) +#define PG_FRAME (0x00007ffffffff000ul) +//#define PG_PS_FRAME (0x000fffffffe00000ul) +#define PG_PS_FRAME (0x00007fffffe00000ul) +//#define PG_PS_PDP_FRAME (0x000fffffc0000000ul) +#define PG_PS_PDP_FRAME (0x00007fffc0000000ul) + /* * Promotion to a 2MB (PDE) page mapping requires that the corresponding 4KB * (PTE) page mappings have identical settings for the following fields: */ +// FIXME: this needs to take into account the sem c bit too! #define PG_PTE_PROMOTE (PG_NX | PG_MANAGED | PG_W | PG_G | PG_PTE_CACHE | \ - PG_M | PG_A | PG_U | PG_RW | PG_V | PG_PKU_MASK) + PG_M | PG_A | PG_U | PG_RW | PG_V | PG_PKU_MASK | pg_sem_c) /* * Page Protection Exception bits @@ -297,6 +306,8 @@ extern pt_entry_t pg_nx; +extern pt_entry_t pg_sem_c; + #endif /* _KERNEL */ /* diff --git a/sys/x86/include/specialreg.h b/sys/x86/include/specialreg.h --- a/sys/x86/include/specialreg.h +++ b/sys/x86/include/specialreg.h @@ -384,6 +384,12 @@ #define AMDID_COREID_SIZE 0x0000f000 #define AMDID_COREID_SIZE_SHIFT 12 +/* + * AMD encrypted memory function 8000_001F + */ +#define AMD_SEM_SUPPORTED 0x00000001 +#define AMD_SEM_CBIT_MASK 0x0000003F + /* * CPUID instruction 7 Structured Extended Features, leaf 0 ebx info */ diff --git a/sys/x86/include/x86_var.h b/sys/x86/include/x86_var.h --- a/sys/x86/include/x86_var.h +++ b/sys/x86/include/x86_var.h @@ -47,6 +47,8 @@ extern u_int amd_rascap; extern u_int amd_pminfo; extern u_int amd_extended_feature_extensions; +extern u_int amd_encrypted_memory; +extern u_int amd_encrypted_memory2; extern u_int via_feature_rng; extern u_int via_feature_xcrypt; extern u_int cpu_clflush_line_size; @@ -138,6 +140,7 @@ void identify_hypervisor(void); void initializecpu(void); void initializecpucache(void); +void init_amd_sem(void* dummy); bool fix_cpuid(void); void fillw(int /*u_short*/ pat, void *base, size_t cnt); int is_physical_memory(vm_paddr_t addr); diff --git a/sys/x86/x86/identcpu.c b/sys/x86/x86/identcpu.c --- a/sys/x86/x86/identcpu.c +++ b/sys/x86/x86/identcpu.c @@ -94,6 +94,8 @@ u_int amd_rascap; /* AMD RAS capabilities */ u_int amd_pminfo; /* AMD advanced power management info */ u_int amd_extended_feature_extensions; +u_int amd_encrypted_memory; /* eax: is sem etc supported */ +u_int amd_encrypted_memory2; /* ebx: c bit, phys reduction */ u_int via_feature_rng; /* VIA RNG features */ u_int via_feature_xcrypt; /* VIA ACE features */ u_int cpu_high; /* Highest arg to CPUID */ @@ -930,6 +932,17 @@ ); } + if (amd_encrypted_memory != 0) { + printf("\n AMD Encrypted Memory=0x%b, C-bit=%u", amd_encrypted_memory, + "\020" + "\001SEM" + "\002SEV" + // fixme more + , + (amd_encrypted_memory2 & AMD_SEM_CBIT_MASK) + ); + } + if (cpu_stdext_feature != 0) { printf("\n Structured Extended Features=0x%b", cpu_stdext_feature, @@ -1584,6 +1597,16 @@ cpu_maxphyaddr = (cpu_feature & CPUID_PAE) != 0 ? 36 : 32; } + if (cpu_exthigh >= 0x8000001F) { + do_cpuid(0x8000001F, regs); + amd_encrypted_memory = regs[0]; + amd_encrypted_memory2 = regs[1]; + /* FIXME: we should prob have a way of disabling this */ + // FIXME: this is not right! the address width is 52 bits wide, of which 5 are trimmed for sem. + // but here, my cpu reports 48 bits. do NOT substract 5 bits from that! + //cpu_maxphyaddr -= (amd_encrypted_memory2 >> 6) & 0x3F; + } + #ifdef __i386__ if (cpu_vendor_id == CPU_VENDOR_CYRIX) { if (cpu == CPU_486) {