Index: head/sys/riscv/include/pcb.h =================================================================== --- head/sys/riscv/include/pcb.h +++ head/sys/riscv/include/pcb.h @@ -55,7 +55,6 @@ #define PCB_FP_STARTED 0x1 #define PCB_FP_USERMASK 0x1 uint64_t pcb_sepc; /* Supervisor exception pc */ - vm_offset_t pcb_l1addr; /* L1 page tables base address */ vm_offset_t pcb_onfault; /* Copyinout fault handler */ }; Index: head/sys/riscv/include/pcpu.h =================================================================== --- head/sys/riscv/include/pcpu.h +++ head/sys/riscv/include/pcpu.h @@ -45,6 +45,7 @@ #define ALT_STACK_SIZE 128 #define PCPU_MD_FIELDS \ + struct pmap *pc_curpmap; /* Currently active pmap */ \ uint32_t pc_pending_ipis; /* IPIs pending to this CPU */ \ char __pad[61] Index: head/sys/riscv/include/pmap.h =================================================================== --- head/sys/riscv/include/pmap.h +++ head/sys/riscv/include/pmap.h @@ -41,6 +41,7 @@ #ifndef LOCORE #include +#include #include #include @@ -80,6 +81,8 @@ struct mtx pm_mtx; struct pmap_statistics pm_stats; /* pmap statictics */ pd_entry_t *pm_l1; + u_long pm_satp; /* value for SATP register */ + cpuset_t pm_active; /* active on cpus */ TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ struct vm_radix pm_root; @@ -137,6 +140,10 @@ #define L1_MAPPABLE_P(va, pa, size) \ ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE) +struct thread; + +void pmap_activate_boot(pmap_t); +void pmap_activate_sw(struct thread *); void pmap_bootstrap(vm_offset_t, vm_paddr_t, vm_size_t); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); vm_paddr_t pmap_kextract(vm_offset_t va); Index: head/sys/riscv/riscv/genassym.c =================================================================== --- head/sys/riscv/riscv/genassym.c +++ head/sys/riscv/riscv/genassym.c @@ -63,7 +63,6 @@ ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); -ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr)); ASSYM(PCB_SIZE, sizeof(struct pcb)); ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); Index: head/sys/riscv/riscv/machdep.c =================================================================== --- head/sys/riscv/riscv/machdep.c +++ head/sys/riscv/riscv/machdep.c @@ -871,10 +871,6 @@ init_proc0(rvbp->kern_stack); - /* set page table base register for thread0 */ - thread0.td_pcb->pcb_l1addr = \ - (rvbp->kern_l1pt - KERNBASE + rvbp->kern_phys); - msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); Index: head/sys/riscv/riscv/mp_machdep.c =================================================================== --- head/sys/riscv/riscv/mp_machdep.c +++ head/sys/riscv/riscv/mp_machdep.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include @@ -254,6 +255,9 @@ /* Enable external (PLIC) interrupts */ csr_set(sie, SIE_SEIE); + + /* Activate process 0's pmap. */ + pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); mtx_lock_spin(&ap_boot_mtx); Index: head/sys/riscv/riscv/pmap.c =================================================================== --- head/sys/riscv/riscv/pmap.c +++ head/sys/riscv/riscv/pmap.c @@ -118,9 +118,10 @@ */ #include +#include #include #include -#include +#include #include #include #include @@ -566,6 +567,8 @@ rw_init(&pvh_global_lock, "pmap pv global"); + CPU_FILL(&kernel_pmap->pm_active); + /* Assume the address we were loaded to is a valid physical address. */ min_pa = max_pa = kernstart; @@ -723,9 +726,6 @@ * In general, the calling thread uses a plain fence to order the * writes to the page tables before invoking an SBI callback to invoke * sfence_vma() on remote CPUs. - * - * Since the riscv pmap does not yet have a pm_active field, IPIs are - * sent to all CPUs in the system. */ static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) @@ -733,10 +733,11 @@ cpuset_t mask; sched_pin(); - mask = all_cpus; + mask = pmap->pm_active; CPU_CLR(PCPU_GET(cpuid), &mask); fence(); - sbi_remote_sfence_vma(mask.__bits, va, 1); + if (!CPU_EMPTY(&mask) && smp_started) + sbi_remote_sfence_vma(mask.__bits, va, 1); sfence_vma_page(va); sched_unpin(); } @@ -747,10 +748,11 @@ cpuset_t mask; sched_pin(); - mask = all_cpus; + mask = pmap->pm_active; CPU_CLR(PCPU_GET(cpuid), &mask); fence(); - sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1); + if (!CPU_EMPTY(&mask) && smp_started) + sbi_remote_sfence_vma(mask.__bits, sva, eva - sva + 1); /* * Might consider a loop of sfence_vma_page() for a small @@ -766,16 +768,17 @@ cpuset_t mask; sched_pin(); - mask = all_cpus; + mask = pmap->pm_active; CPU_CLR(PCPU_GET(cpuid), &mask); - fence(); /* * XXX: The SBI doc doesn't detail how to specify x0 as the * address to perform a global fence. BBL currently treats * all sfence_vma requests as global however. */ - sbi_remote_sfence_vma(mask.__bits, 0, 0); + fence(); + if (!CPU_EMPTY(&mask) && smp_started) + sbi_remote_sfence_vma(mask.__bits, 0, 0); sfence_vma(); sched_unpin(); } @@ -1199,6 +1202,9 @@ PMAP_LOCK_INIT(pmap); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); pmap->pm_l1 = kernel_pmap->pm_l1; + pmap->pm_satp = SATP_MODE_SV39 | (vtophys(pmap->pm_l1) >> PAGE_SHIFT); + CPU_ZERO(&pmap->pm_active); + pmap_activate_boot(pmap); } int @@ -1216,12 +1222,15 @@ l1phys = VM_PAGE_TO_PHYS(l1pt); pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys); + pmap->pm_satp = SATP_MODE_SV39 | (l1phys >> PAGE_SHIFT); if ((l1pt->flags & PG_ZERO) == 0) pagezero(pmap->pm_l1); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); + CPU_ZERO(&pmap->pm_active); + /* Install kernel pagetables */ memcpy(pmap->pm_l1, kernel_pmap->pm_l1, PAGE_SIZE); @@ -1411,6 +1420,8 @@ KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); + KASSERT(CPU_EMPTY(&pmap->pm_active), + ("releasing active pmap %p", pmap)); mtx_lock(&allpmaps_lock); LIST_REMOVE(pmap, pm_list); @@ -4252,26 +4263,56 @@ } void -pmap_activate(struct thread *td) +pmap_activate_sw(struct thread *td) { - pmap_t pmap; - uint64_t reg; + pmap_t oldpmap, pmap; + u_int cpu; - critical_enter(); + oldpmap = PCPU_GET(curpmap); pmap = vmspace_pmap(td->td_proc->p_vmspace); - td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1); + if (pmap == oldpmap) + return; + load_satp(pmap->pm_satp); - reg = SATP_MODE_SV39; - reg |= (td->td_pcb->pcb_l1addr >> PAGE_SHIFT); - load_satp(reg); + cpu = PCPU_GET(cpuid); +#ifdef SMP + CPU_SET_ATOMIC(cpu, &pmap->pm_active); + CPU_CLR_ATOMIC(cpu, &oldpmap->pm_active); +#else + CPU_SET(cpu, &pmap->pm_active); + CPU_CLR(cpu, &oldpmap->pm_active); +#endif + PCPU_SET(curpmap, pmap); - pmap_invalidate_all(pmap); + sfence_vma(); +} + +void +pmap_activate(struct thread *td) +{ + + critical_enter(); + pmap_activate_sw(td); critical_exit(); } void -pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +pmap_activate_boot(pmap_t pmap) { + u_int cpu; + + cpu = PCPU_GET(cpuid); +#ifdef SMP + CPU_SET_ATOMIC(cpu, &pmap->pm_active); +#else + CPU_SET(cpu, &pmap->pm_active); +#endif + PCPU_SET(curpmap, pmap); +} + +void +pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz) +{ cpuset_t mask; /* @@ -4286,7 +4327,8 @@ mask = all_cpus; CPU_CLR(PCPU_GET(cpuid), &mask); fence(); - sbi_remote_fence_i(mask.__bits); + if (!CPU_EMPTY(&mask) && smp_started) + sbi_remote_fence_i(mask.__bits); sched_unpin(); } Index: head/sys/riscv/riscv/swtch.S =================================================================== --- head/sys/riscv/riscv/swtch.S +++ head/sys/riscv/riscv/swtch.S @@ -207,28 +207,21 @@ END(fpe_state_clear) /* - * void cpu_throw(struct thread *old, struct thread *new) + * void cpu_throw(struct thread *old __unused, struct thread *new) */ ENTRY(cpu_throw) + /* Activate the new thread's pmap. */ + mv s0, a1 + mv a0, a1 + call _C_LABEL(pmap_activate_sw) + mv a0, s0 + /* Store the new curthread */ - sd a1, PC_CURTHREAD(gp) + sd a0, PC_CURTHREAD(gp) /* And the new pcb */ - ld x13, TD_PCB(a1) + ld x13, TD_PCB(a0) sd x13, PC_CURPCB(gp) - sfence.vma - - /* Switch to the new pmap */ - ld t0, PCB_L1ADDR(x13) - srli t0, t0, PAGE_SHIFT - li t1, SATP_MODE_SV39 - or t0, t0, t1 - csrw satp, t0 - - /* TODO: Invalidate the TLB */ - - sfence.vma - /* Load registers */ ld ra, (PCB_RA)(x13) ld sp, (PCB_SP)(x13) @@ -250,7 +243,7 @@ #ifdef FPE /* Is FPE enabled for new thread? */ - ld t0, TD_FRAME(a1) + ld t0, TD_FRAME(a0) ld t1, (TF_SSTATUS)(t0) li t2, SSTATUS_FS_MASK and t3, t1, t2 @@ -324,38 +317,27 @@ 1: #endif - /* - * Restore the saved context. - */ - ld x13, TD_PCB(a1) + /* Activate the new thread's pmap */ + mv s0, a0 + mv s1, a1 + mv s2, a2 + mv a0, a1 + call _C_LABEL(pmap_activate_sw) + mv a1, s1 - /* - * TODO: We may need to flush the cache here if switching - * to a user process. - */ - - sfence.vma - - /* Switch to the new pmap */ - ld t0, PCB_L1ADDR(x13) - srli t0, t0, PAGE_SHIFT - li t1, SATP_MODE_SV39 - or t0, t0, t1 - csrw satp, t0 - - /* TODO: Invalidate the TLB */ - - sfence.vma - /* Release the old thread */ - sd a2, TD_LOCK(a0) + sd s2, TD_LOCK(s0) #if defined(SCHED_ULE) && defined(SMP) /* Spin if TD_LOCK points to a blocked_lock */ - la a2, _C_LABEL(blocked_lock) + la s2, _C_LABEL(blocked_lock) 1: ld t0, TD_LOCK(a1) - beq t0, a2, 1b + beq t0, s2, 1b #endif + /* + * Restore the saved context. + */ + ld x13, TD_PCB(a1) /* Restore the registers */ ld tp, (PCB_TP)(x13) Index: head/sys/riscv/riscv/vm_machdep.c =================================================================== --- head/sys/riscv/riscv/vm_machdep.c +++ head/sys/riscv/riscv/vm_machdep.c @@ -92,9 +92,6 @@ td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); - td2->td_pcb->pcb_l1addr = - vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1); - tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf));