Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -486,8 +486,13 @@ struct pmap *user_pmap; pd_entry_t *l1; - /* Distribute new kernel L1 entry to all the user pmaps */ - if (pmap != kernel_pmap) + /* + * Distribute new kernel L1 entry to all the user pmaps. This is only + * necessary with three-level paging configured: with four-level paging + * the kernel's half of the top-level page table page is static and can + * simply be copied at pmap initialization time. + */ + if (pmap != kernel_pmap || pmap_mode != PMAP_MODE_SV39) return; mtx_lock(&allpmaps_lock); @@ -1278,11 +1283,15 @@ CPU_ZERO(&pmap->pm_active); - mtx_lock(&allpmaps_lock); - LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); - mtx_unlock(&allpmaps_lock); + if (pmap_mode == PMAP_MODE_SV39) { + mtx_lock(&allpmaps_lock); + LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); + mtx_unlock(&allpmaps_lock); - memcpy(pmap->pm_top, kernel_pmap->pm_top, PAGE_SIZE); + memcpy(pmap->pm_top, kernel_pmap->pm_top, PAGE_SIZE); + } else { + memcpy(pmap->pm_top, kernel_pmap->pm_top, PAGE_SIZE); + } vm_radix_init(&pmap->pm_root); @@ -1472,9 +1481,11 @@ KASSERT(CPU_EMPTY(&pmap->pm_active), ("releasing active pmap %p", pmap)); - mtx_lock(&allpmaps_lock); - LIST_REMOVE(pmap, pm_list); - mtx_unlock(&allpmaps_lock); + if (pmap_mode == PMAP_MODE_SV39) { + mtx_lock(&allpmaps_lock); + LIST_REMOVE(pmap, pm_list); + mtx_unlock(&allpmaps_lock); + } m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_top)); vm_page_unwire_noq(m);