Index: sys/amd64/amd64/machdep.c =================================================================== --- sys/amd64/amd64/machdep.c +++ sys/amd64/amd64/machdep.c @@ -215,7 +215,8 @@ static struct trapframe proc0_tf; struct region_descriptor r_gdt, r_idt; -struct pcpu __pcpu[MAXCPU]; +struct pcpu *__pcpu; +struct pcpu temp_bsp_pcpu; struct mtx icu_lock; @@ -1543,13 +1544,68 @@ wrmsr(MSR_SF_MASK, PSL_NT | PSL_T | PSL_I | PSL_C | PSL_D | PSL_AC); } +void +amd64_bsp_pcpu_init1(struct pcpu *pc) +{ + + PCPU_SET(prvspace, pc); + PCPU_SET(curthread, &thread0); + PCPU_SET(tssp, &common_tss[0]); + PCPU_SET(commontssp, &common_tss[0]); + PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); + PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); + PCPU_SET(fs32p, &gdt[GUFS32_SEL]); + PCPU_SET(gs32p, &gdt[GUGS32_SEL]); +} + +void +amd64_bsp_pcpu_init2(uint64_t rsp0) +{ + + PCPU_SET(rsp0, rsp0); + PCPU_SET(pti_rsp0, ((vm_offset_t)PCPU_PTR(pti_stack) + + PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful); + PCPU_SET(curpcb, thread0.td_pcb); +} + +void +amd64_bsp_ist_init(struct pcpu *pc) +{ + struct nmi_pcpu *np; + + /* doublefault stack space, runs on ist1 */ + common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; + + /* + * NMI stack, runs on ist2. The pcpu pointer is stored just + * above the start of the ist2 stack. + */ + np = ((struct nmi_pcpu *)&nmi0_stack[sizeof(nmi0_stack)]) - 1; + np->np_pcpu = (register_t)pc; + common_tss[0].tss_ist2 = (long)np; + + /* + * MC# stack, runs on ist3. The pcpu pointer is stored just + * above the start of the ist3 stack. + */ + np = ((struct nmi_pcpu *)&mce0_stack[sizeof(mce0_stack)]) - 1; + np->np_pcpu = (register_t)pc; + common_tss[0].tss_ist3 = (long)np; + + /* + * DB# stack, runs on ist4. + */ + np = ((struct nmi_pcpu *)&dbg0_stack[sizeof(dbg0_stack)]) - 1; + np->np_pcpu = (register_t)pc; + common_tss[0].tss_ist4 = (long)np; +} + u_int64_t hammer_time(u_int64_t modulep, u_int64_t physfree) { caddr_t kmdp; int gsel_tss, x; struct pcpu *pc; - struct nmi_pcpu *np; struct xstate_hdr *xhdr; u_int64_t rsp0; char *env; @@ -1623,7 +1679,7 @@ r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (long) gdt; lgdt(&r_gdt); - pc = &__pcpu[0]; + pc = &temp_bsp_pcpu; wrmsr(MSR_FSBASE, 0); /* User value */ wrmsr(MSR_GSBASE, (u_int64_t)pc); @@ -1632,15 +1688,8 @@ pcpu_init(pc, 0, sizeof(struct pcpu)); dpcpu_init((void *)(physfree + KERNBASE), 0); physfree += DPCPU_SIZE; - PCPU_SET(prvspace, pc); - PCPU_SET(curthread, &thread0); + amd64_bsp_pcpu_init1(pc); /* Non-late cninit() and printf() can be moved up to here. */ - PCPU_SET(tssp, &common_tss[0]); - PCPU_SET(commontssp, &common_tss[0]); - PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); - PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); - PCPU_SET(fs32p, &gdt[GUFS32_SEL]); - PCPU_SET(gs32p, &gdt[GUGS32_SEL]); /* * Initialize mutexes. @@ -1729,31 +1778,7 @@ finishidentcpu(); /* Final stage of CPU initialization */ initializecpu(); /* Initialize CPU registers */ - /* doublefault stack space, runs on ist1 */ - common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; - - /* - * NMI stack, runs on ist2. The pcpu pointer is stored just - * above the start of the ist2 stack. - */ - np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1; - np->np_pcpu = (register_t) pc; - common_tss[0].tss_ist2 = (long) np; - - /* - * MC# stack, runs on ist3. The pcpu pointer is stored just - * above the start of the ist3 stack. - */ - np = ((struct nmi_pcpu *) &mce0_stack[sizeof(mce0_stack)]) - 1; - np->np_pcpu = (register_t) pc; - common_tss[0].tss_ist3 = (long) np; - - /* - * DB# stack, runs on ist4. - */ - np = ((struct nmi_pcpu *) &dbg0_stack[sizeof(dbg0_stack)]) - 1; - np->np_pcpu = (register_t) pc; - common_tss[0].tss_ist4 = (long) np; + amd64_bsp_ist_init(pc); /* Set the IO permission bitmap (empty due to tss seg limit) */ common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE; @@ -1842,10 +1867,7 @@ /* Ensure the stack is aligned to 16 bytes */ rsp0 &= ~0xFul; common_tss[0].tss_rsp0 = rsp0; - PCPU_SET(rsp0, rsp0); - PCPU_SET(pti_rsp0, ((vm_offset_t)PCPU_PTR(pti_stack) + - PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful); - PCPU_SET(curpcb, thread0.td_pcb); + amd64_bsp_pcpu_init2(rsp0); /* transfer to user mode */ Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c +++ sys/amd64/amd64/mp_machdep.c @@ -94,7 +94,7 @@ #define AP_BOOTPT_SZ (PAGE_SIZE * 3) -extern struct pcpu __pcpu[]; +extern struct pcpu *__pcpu; /* Temporary variables for init_secondary() */ char *doublefault_stack; @@ -401,10 +401,10 @@ if (_vm_phys_domain(pmap_kextract(oa)) == domain) return; m = vm_page_alloc_domain(NULL, 0, domain, - VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); + VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ); na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); pagecopy((void *)oa, (void *)na); - pmap_enter(kernel_pmap, oa, m, VM_PROT_READ | VM_PROT_WRITE, 0, 0); + pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1); /* XXX old pcpu page leaked. */ } #endif @@ -475,13 +475,11 @@ domain = acpi_pxm_get_cpu_locality(apic_id); #endif /* allocate and set up an idle stack data page */ - bootstacks[cpu] = (void *)kmem_malloc_domainset( - DOMAINSET_FIXED(domain), kstack_pages * PAGE_SIZE, + bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE, M_WAITOK | M_ZERO); - doublefault_stack = (char *)kmem_malloc_domainset( - DOMAINSET_FIXED(domain), PAGE_SIZE, M_WAITOK | M_ZERO); - mce_stack = (char *)kmem_malloc_domainset( - DOMAINSET_FIXED(domain), PAGE_SIZE, M_WAITOK | M_ZERO); + doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | + M_ZERO); + mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); nmi_stack = (char *)kmem_malloc_domainset( DOMAINSET_FIXED(domain), PAGE_SIZE, M_WAITOK | M_ZERO); dbg_stack = (char *)kmem_malloc_domainset( @@ -510,7 +508,7 @@ outb(CMOS_DATA, mpbiosreason); /* number of APs actually started */ - return mp_naps; + return (mp_naps); } Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -443,6 +443,10 @@ static vm_pindex_t pti_pg_idx; static bool pti_finalized; +extern struct pcpu *__pcpu; +extern struct pcpu temp_bsp_pcpu; +extern pt_entry_t *pcpu_pte; + struct pmap_pkru_range { struct rs_el pkru_rs_el; u_int pkru_keyidx; @@ -1608,8 +1612,8 @@ pmap_bootstrap(vm_paddr_t *firstaddr) { vm_offset_t va; - pt_entry_t *pte; - uint64_t cr4; + pt_entry_t *pte, *pcpu_pte; + uint64_t cr4, pcpu_phys; u_long res; int i; @@ -1624,6 +1628,8 @@ */ create_pagetables(firstaddr); + pcpu_phys = allocpages(firstaddr, MAXCPU); + /* * Add a physical memory segment (vm_phys_seg) corresponding to the * preallocated kernel page table pages so that vm_page structures @@ -1632,6 +1638,7 @@ * addresses to superpage mappings. */ vm_phys_add_seg(KPTphys, KPTphys + ptoa(nkpt)); + vm_phys_add_seg(pcpu_phys, pcpu_phys + ptoa(MAXCPU)); /* * Account for the virtual addresses mapped by create_pagetables(). @@ -1691,8 +1698,20 @@ SYSMAP(caddr_t, CMAP1, crashdumpmap, MAXDUMPPGS) CADDR1 = crashdumpmap; + SYSMAP(struct pcpu *, pcpu_pte, __pcpu, MAXCPU); virtual_avail = va; + for (i = 0; i < MAXCPU; i++) { + pcpu_pte[i] = (pcpu_phys + ptoa(i)) | X86_PG_V | X86_PG_RW | + pg_g | pg_nx | X86_PG_M | X86_PG_A; + } + STAILQ_INIT(&cpuhead); + wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[0]); + pcpu_init(&__pcpu[0], 0, sizeof(struct pcpu)); + amd64_bsp_pcpu_init1(&__pcpu[0]); + amd64_bsp_ist_init(&__pcpu[0]); + __pcpu[0].pc_dynamic = temp_bsp_pcpu.pc_dynamic; + /* * Initialize the PAT MSR. * pmap_init_pat() clears and sets CR4_PGE, which, as a Index: sys/amd64/include/counter.h =================================================================== --- sys/amd64/include/counter.h +++ sys/amd64/include/counter.h @@ -33,9 +33,10 @@ #include -extern struct pcpu __pcpu[]; +extern struct pcpu *__pcpu; +extern struct pcpu temp_bsp_pcpu; -#define EARLY_COUNTER &__pcpu[0].pc_early_dummy_counter +#define EARLY_COUNTER &temp_bsp_pcpu.pc_early_dummy_counter #define counter_enter() do {} while (0) #define counter_exit() do {} while (0) Index: sys/amd64/include/md_var.h =================================================================== --- sys/amd64/include/md_var.h +++ sys/amd64/include/md_var.h @@ -58,6 +58,9 @@ void amd64_conf_fast_syscall(void); void amd64_db_resume_dbreg(void); void amd64_lower_shared_page(struct sysentvec *); +void amd64_bsp_pcpu_init1(struct pcpu *pc); +void amd64_bsp_pcpu_init2(uint64_t rsp0); +void amd64_bsp_ist_init(struct pcpu *pc); void amd64_syscall(struct thread *td, int traced); void amd64_syscall_ret_flush_l1d(int error); void amd64_syscall_ret_flush_l1d_recalc(void);