Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -7818,7 +7818,8 @@ * If both the PTP and the reservation are fully populated, then * attempt promotion. */ - if ((mpte == NULL || mpte->ref_count == NPTEPG) && + if ((prot & VM_PROT_NO_PROMOTE) == 0 && + (mpte == NULL || mpte->ref_count == NPTEPG) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (pde == NULL) Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -6052,7 +6052,8 @@ * If both the PTP and the reservation are fully populated, then * attempt promotion. */ - if ((mpte == NULL || mpte->ref_count == NL3PG) && + if ((prot & VM_PROT_NO_PROMOTE) == 0 && + (mpte == NULL || mpte->ref_count == NL3PG) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (l2 == NULL) Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -4250,7 +4250,8 @@ * If both the PTP and the reservation are fully populated, then * attempt promotion. */ - if ((mpte == NULL || mpte->ref_count == NPTEPG) && + if ((prot & VM_PROT_NO_PROMOTE) == 0 && + (mpte == NULL || mpte->ref_count == NPTEPG) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (pde == NULL) Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c +++ sys/powerpc/aim/mmu_oea64.c @@ -1755,10 +1755,14 @@ * If the VA of the entered page is not aligned with its PA, * don't try page promotion as it is not possible. * This reduces the number of promotion failures dramatically. + * + * Ignore VM_PROT_NO_PROMOTE unless PMAP_ENTER_QUICK_LOCKED. */ if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL && (pvo->pvo_vaddr & PVO_MANAGED) != 0 && (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) && + ((prot & VM_PROT_NO_PROMOTE) == 0 || + (flags & PMAP_ENTER_QUICK_LOCKED) == 0) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) moea64_sp_promote(pmap, va, m); @@ -1850,8 +1854,9 @@ vm_prot_t prot) { - moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0); + moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE | + VM_PROT_NO_PROMOTE), PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, + 0); } vm_paddr_t Index: sys/riscv/riscv/pmap.c =================================================================== --- sys/riscv/riscv/pmap.c +++ sys/riscv/riscv/pmap.c @@ -3519,7 +3519,8 @@ * If both the PTP and the reservation are fully populated, then attempt * promotion. */ - if ((mpte == NULL || mpte->ref_count == Ln_ENTRIES) && + if ((prot & VM_PROT_NO_PROMOTE) == 0 && + (mpte == NULL || mpte->ref_count == Ln_ENTRIES) && (m->flags & PG_FICTITIOUS) == 0 && vm_reserv_level_iffullpop(m) == 0) { if (l2 == NULL) Index: sys/vm/vm.h =================================================================== --- sys/vm/vm.h +++ sys/vm/vm.h @@ -76,6 +76,7 @@ #define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */ #define VM_PROT_PRIV_FLAG ((vm_prot_t) 0x10) #define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG +#define VM_PROT_NO_PROMOTE VM_PROT_PRIV_FLAG #define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG /* same to save bits */ #define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -1891,6 +1891,7 @@ vm_offset_t addr, starta; vm_pindex_t pindex; vm_page_t m; + vm_prot_t prot; int i; pmap = fs->map->pmap; @@ -1906,6 +1907,14 @@ if (starta < entry->start) starta = entry->start; } + prot = entry->protection; + + /* + * If pmap_enter() has enabled write access on a nearby mapping, then + * don't attempt promotion, because it will fail. + */ + if ((fs->prot & VM_PROT_WRITE) != 0) + prot |= VM_PROT_NO_PROMOTE; /* * Generate the sequence of virtual addresses that are candidates for @@ -1949,7 +1958,7 @@ } if (vm_page_all_valid(m) && (m->flags & PG_FICTITIOUS) == 0) - pmap_enter_quick(pmap, addr, m, entry->protection); + pmap_enter_quick(pmap, addr, m, prot); if (!obj_locked || lobject != entry->object.vm_object) VM_OBJECT_RUNLOCK(lobject); }