Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -2381,45 +2381,45 @@ int pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype) { - pt_entry_t orig_l3; - pt_entry_t new_l3; - pt_entry_t *l3; + pd_entry_t *l2, l2e; + pt_entry_t bits, *pte, oldpte; int rv; rv = 0; - PMAP_LOCK(pmap); - - l3 = pmap_l3(pmap, va); - if (l3 == NULL) + l2 = pmap_l2(pmap, va); + if (l2 == NULL || ((l2e = pmap_load(l2)) & PTE_V) == 0) goto done; + if ((l2e & PTE_RWX) == 0) { + pte = pmap_l2_to_l3(l2, va); + if (pte == NULL || ((oldpte = pmap_load(pte) & PTE_V)) == 0) + goto done; + } else { + pte = l2; + oldpte = l2e; + } - orig_l3 = pmap_load(l3); - if ((orig_l3 & PTE_V) == 0 || - (ftype == VM_PROT_WRITE && (orig_l3 & PTE_W) == 0) || - (ftype == VM_PROT_EXECUTE && (orig_l3 & PTE_X) == 0) || - (ftype == VM_PROT_READ && (orig_l3 & PTE_R) == 0)) + if ((pmap != kernel_pmap && (oldpte & PTE_U) == 0) || + (ftype == VM_PROT_WRITE && (oldpte & PTE_W) == 0) || + (ftype == VM_PROT_EXECUTE && (oldpte & PTE_X) == 0) || + (ftype == VM_PROT_READ && (oldpte & PTE_R) == 0)) goto done; - new_l3 = orig_l3 | PTE_A; + bits = PTE_A; if (ftype == VM_PROT_WRITE) - new_l3 |= PTE_D; + bits |= PTE_D; - if (orig_l3 != new_l3) { - pmap_store(l3, new_l3); - pmap_invalidate_page(pmap, va); - rv = 1; - goto done; - } - - /* - * XXX: This case should never happen since it means - * the PTE shouldn't have resulted in a fault. + /* + * Spurious faults can occur if the implementation caches invalid + * entries in the TLB, or if simultaneous accesses on multiple CPUs + * race with each other. */ - + if ((oldpte & bits) != bits) + pmap_store_bits(pte, bits); + sfence_vma(); + rv = 1; done: PMAP_UNLOCK(pmap); - return (rv); }